summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/.clang-format4
-rw-r--r--deps/v8/AUTHORS8
-rw-r--r--deps/v8/ChangeLog528
-rw-r--r--deps/v8/DEPS4
-rw-r--r--deps/v8/LICENSE2
-rw-r--r--deps/v8/Makefile67
-rw-r--r--deps/v8/Makefile.android72
-rw-r--r--deps/v8/Makefile.nacl8
-rw-r--r--deps/v8/OWNERS4
-rw-r--r--deps/v8/PRESUBMIT.py13
-rw-r--r--deps/v8/build/all.gyp8
-rw-r--r--deps/v8/build/android.gypi24
-rw-r--r--deps/v8/build/features.gypi8
-rwxr-xr-xdeps/v8/build/gyp_v823
-rw-r--r--deps/v8/build/gyp_v8.py41
-rw-r--r--deps/v8/build/standalone.gypi49
-rw-r--r--deps/v8/build/toolchain.gypi45
-rw-r--r--deps/v8/codereview.settings7
-rw-r--r--deps/v8/include/v8-platform.h86
-rw-r--r--deps/v8/include/v8-profiler.h67
-rw-r--r--deps/v8/include/v8.h1092
-rw-r--r--deps/v8/include/v8config.h20
-rw-r--r--deps/v8/samples/lineprocessor.cc35
-rw-r--r--deps/v8/samples/process.cc93
-rw-r--r--deps/v8/samples/shell.cc60
-rw-r--r--deps/v8/src/a64/OWNERS1
-rw-r--r--deps/v8/src/a64/assembler-a64-inl.h1200
-rw-r--r--deps/v8/src/a64/assembler-a64.cc2606
-rw-r--r--deps/v8/src/a64/assembler-a64.h2085
-rw-r--r--deps/v8/src/a64/builtins-a64.cc1479
-rw-r--r--deps/v8/src/a64/code-stubs-a64.cc5809
-rw-r--r--deps/v8/src/a64/code-stubs-a64.h469
-rw-r--r--deps/v8/src/a64/codegen-a64.cc616
-rw-r--r--deps/v8/src/a64/codegen-a64.h70
-rw-r--r--deps/v8/src/a64/constants-a64.h1262
-rw-r--r--deps/v8/src/a64/cpu-a64.cc199
-rw-r--r--deps/v8/src/a64/cpu-a64.h107
-rw-r--r--deps/v8/src/a64/debug-a64.cc394
-rw-r--r--deps/v8/src/a64/debugger-a64.cc111
-rw-r--r--deps/v8/src/a64/debugger-a64.h56
-rw-r--r--deps/v8/src/a64/decoder-a64.cc726
-rw-r--r--deps/v8/src/a64/decoder-a64.h202
-rw-r--r--deps/v8/src/a64/deoptimizer-a64.cc376
-rw-r--r--deps/v8/src/a64/disasm-a64.cc1854
-rw-r--r--deps/v8/src/a64/disasm-a64.h115
-rw-r--r--deps/v8/src/a64/frames-a64.cc (renamed from deps/v8/include/v8-defaults.h)41
-rw-r--r--deps/v8/src/a64/frames-a64.h131
-rw-r--r--deps/v8/src/a64/full-codegen-a64.cc5010
-rw-r--r--deps/v8/src/a64/ic-a64.cc1413
-rw-r--r--deps/v8/src/a64/instructions-a64.cc334
-rw-r--r--deps/v8/src/a64/instructions-a64.h516
-rw-r--r--deps/v8/src/a64/instrument-a64.cc618
-rw-r--r--deps/v8/src/a64/instrument-a64.h108
-rw-r--r--deps/v8/src/a64/lithium-a64.cc2449
-rw-r--r--deps/v8/src/a64/lithium-a64.h2967
-rw-r--r--deps/v8/src/a64/lithium-codegen-a64.cc5692
-rw-r--r--deps/v8/src/a64/lithium-codegen-a64.h473
-rw-r--r--deps/v8/src/a64/lithium-gap-resolver-a64.cc326
-rw-r--r--deps/v8/src/a64/lithium-gap-resolver-a64.h90
-rw-r--r--deps/v8/src/a64/macro-assembler-a64-inl.h1647
-rw-r--r--deps/v8/src/a64/macro-assembler-a64.cc4975
-rw-r--r--deps/v8/src/a64/macro-assembler-a64.h2238
-rw-r--r--deps/v8/src/a64/regexp-macro-assembler-a64.cc1730
-rw-r--r--deps/v8/src/a64/regexp-macro-assembler-a64.h315
-rw-r--r--deps/v8/src/a64/simulator-a64.cc3414
-rw-r--r--deps/v8/src/a64/simulator-a64.h868
-rw-r--r--deps/v8/src/a64/stub-cache-a64.cc1548
-rw-r--r--deps/v8/src/a64/utils-a64.cc112
-rw-r--r--deps/v8/src/a64/utils-a64.h109
-rw-r--r--deps/v8/src/accessors.cc126
-rw-r--r--deps/v8/src/accessors.h9
-rw-r--r--deps/v8/src/allocation-site-scopes.cc32
-rw-r--r--deps/v8/src/allocation-site-scopes.h53
-rw-r--r--deps/v8/src/allocation-tracker.cc32
-rw-r--r--deps/v8/src/allocation-tracker.h9
-rw-r--r--deps/v8/src/allocation.cc41
-rw-r--r--deps/v8/src/allocation.h29
-rw-r--r--deps/v8/src/api.cc1690
-rw-r--r--deps/v8/src/api.h53
-rw-r--r--deps/v8/src/arm/OWNERS1
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h83
-rw-r--r--deps/v8/src/arm/assembler-arm.cc210
-rw-r--r--deps/v8/src/arm/assembler-arm.h51
-rw-r--r--deps/v8/src/arm/builtins-arm.cc182
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc2044
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h95
-rw-r--r--deps/v8/src/arm/codegen-arm.cc60
-rw-r--r--deps/v8/src/arm/codegen-arm.h41
-rw-r--r--deps/v8/src/arm/constants-arm.h3
-rw-r--r--deps/v8/src/arm/cpu-arm.cc9
-rw-r--r--deps/v8/src/arm/debug-arm.cc10
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc2
-rw-r--r--deps/v8/src/arm/disasm-arm.cc8
-rw-r--r--deps/v8/src/arm/frames-arm.cc15
-rw-r--r--deps/v8/src/arm/frames-arm.h16
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc953
-rw-r--r--deps/v8/src/arm/ic-arm.cc414
-rw-r--r--deps/v8/src/arm/lithium-arm.cc489
-rw-r--r--deps/v8/src/arm/lithium-arm.h429
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc1121
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h8
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc588
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h92
-rw-r--r--deps/v8/src/arm/simulator-arm.cc31
-rw-r--r--deps/v8/src/arm/simulator-arm.h6
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc1990
-rw-r--r--deps/v8/src/array-iterator.js22
-rw-r--r--deps/v8/src/array.js141
-rw-r--r--deps/v8/src/arraybuffer.js2
-rw-r--r--deps/v8/src/assembler.cc173
-rw-r--r--deps/v8/src/assembler.h57
-rw-r--r--deps/v8/src/ast.cc475
-rw-r--r--deps/v8/src/ast.h742
-rw-r--r--deps/v8/src/atomicops.h2
-rw-r--r--deps/v8/src/atomicops_internals_a64_gcc.h416
-rw-r--r--deps/v8/src/bignum-dtoa.cc3
-rw-r--r--deps/v8/src/bootstrapper.cc387
-rw-r--r--deps/v8/src/bootstrapper.h10
-rw-r--r--deps/v8/src/builtins.cc142
-rw-r--r--deps/v8/src/builtins.h171
-rw-r--r--deps/v8/src/cached-powers.cc5
-rw-r--r--deps/v8/src/char-predicates.h21
-rw-r--r--deps/v8/src/checks.cc34
-rw-r--r--deps/v8/src/checks.h20
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc511
-rw-r--r--deps/v8/src/code-stubs.cc612
-rw-r--r--deps/v8/src/code-stubs.h776
-rw-r--r--deps/v8/src/codegen.cc37
-rw-r--r--deps/v8/src/codegen.h34
-rw-r--r--deps/v8/src/collection.js50
-rw-r--r--deps/v8/src/compilation-cache.cc14
-rw-r--r--deps/v8/src/compilation-cache.h2
-rw-r--r--deps/v8/src/compiler.cc1270
-rw-r--r--deps/v8/src/compiler.h163
-rw-r--r--deps/v8/src/contexts.h22
-rw-r--r--deps/v8/src/conversions-inl.h6
-rw-r--r--deps/v8/src/conversions.cc11
-rw-r--r--deps/v8/src/conversions.h2
-rw-r--r--deps/v8/src/cpu-profiler.cc28
-rw-r--r--deps/v8/src/cpu-profiler.h6
-rw-r--r--deps/v8/src/cpu.cc57
-rw-r--r--deps/v8/src/d8-debug.cc42
-rw-r--r--deps/v8/src/d8-debug.h2
-rw-r--r--deps/v8/src/d8-posix.cc167
-rw-r--r--deps/v8/src/d8-readline.cc22
-rw-r--r--deps/v8/src/d8-windows.cc2
-rw-r--r--deps/v8/src/d8.cc350
-rw-r--r--deps/v8/src/d8.gyp7
-rw-r--r--deps/v8/src/d8.h17
-rw-r--r--deps/v8/src/date.cc1
-rw-r--r--deps/v8/src/date.js3
-rw-r--r--deps/v8/src/dateparser.h2
-rw-r--r--deps/v8/src/debug-agent.cc9
-rw-r--r--deps/v8/src/debug-agent.h1
-rw-r--r--deps/v8/src/debug.cc298
-rw-r--r--deps/v8/src/debug.h14
-rw-r--r--deps/v8/src/deoptimizer.cc1084
-rw-r--r--deps/v8/src/deoptimizer.h151
-rw-r--r--deps/v8/src/disassembler.cc10
-rw-r--r--deps/v8/src/effects.h28
-rw-r--r--deps/v8/src/elements-kind.cc35
-rw-r--r--deps/v8/src/elements-kind.h59
-rw-r--r--deps/v8/src/elements.cc248
-rw-r--r--deps/v8/src/execution.cc100
-rw-r--r--deps/v8/src/execution.h19
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc34
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.h5
-rw-r--r--deps/v8/src/extensions/free-buffer-extension.cc50
-rw-r--r--deps/v8/src/extensions/free-buffer-extension.h48
-rw-r--r--deps/v8/src/extensions/gc-extension.cc28
-rw-r--r--deps/v8/src/extensions/gc-extension.h17
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc116
-rw-r--r--deps/v8/src/extensions/statistics-extension.h5
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.cc79
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.h55
-rw-r--r--deps/v8/src/factory.cc399
-rw-r--r--deps/v8/src/factory.h142
-rw-r--r--deps/v8/src/feedback-slots.h110
-rw-r--r--deps/v8/src/flag-definitions.h126
-rw-r--r--deps/v8/src/flags.cc2
-rw-r--r--deps/v8/src/frames-inl.h2
-rw-r--r--deps/v8/src/frames.cc23
-rw-r--r--deps/v8/src/frames.h38
-rw-r--r--deps/v8/src/full-codegen.cc96
-rw-r--r--deps/v8/src/full-codegen.h65
-rw-r--r--deps/v8/src/func-name-inferrer.cc2
-rw-r--r--deps/v8/src/gdb-jit.cc13
-rw-r--r--deps/v8/src/gdb-jit.h6
-rw-r--r--deps/v8/src/global-handles.cc49
-rw-r--r--deps/v8/src/global-handles.h10
-rw-r--r--deps/v8/src/globals.h45
-rw-r--r--deps/v8/src/handles-inl.h19
-rw-r--r--deps/v8/src/handles.cc102
-rw-r--r--deps/v8/src/handles.h33
-rw-r--r--deps/v8/src/harmony-array.js12
-rw-r--r--deps/v8/src/harmony-math.js141
-rw-r--r--deps/v8/src/harmony-string.js40
-rw-r--r--deps/v8/src/heap-inl.h166
-rw-r--r--deps/v8/src/heap-profiler.cc152
-rw-r--r--deps/v8/src/heap-profiler.h45
-rw-r--r--deps/v8/src/heap-snapshot-generator-inl.h6
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc509
-rw-r--r--deps/v8/src/heap-snapshot-generator.h106
-rw-r--r--deps/v8/src/heap.cc1625
-rw-r--r--deps/v8/src/heap.h514
-rw-r--r--deps/v8/src/hydrogen-check-elimination.cc718
-rw-r--r--deps/v8/src/hydrogen-check-elimination.h32
-rw-r--r--deps/v8/src/hydrogen-dce.cc2
-rw-r--r--deps/v8/src/hydrogen-environment-liveness.cc2
-rw-r--r--deps/v8/src/hydrogen-flow-engine.h39
-rw-r--r--deps/v8/src/hydrogen-gvn.cc551
-rw-r--r--deps/v8/src/hydrogen-gvn.h88
-rw-r--r--deps/v8/src/hydrogen-instructions.cc656
-rw-r--r--deps/v8/src/hydrogen-instructions.h1749
-rw-r--r--deps/v8/src/hydrogen-load-elimination.cc141
-rw-r--r--deps/v8/src/hydrogen-mark-unreachable.h2
-rw-r--r--deps/v8/src/hydrogen-minus-zero.cc8
-rw-r--r--deps/v8/src/hydrogen-representation-changes.cc7
-rw-r--r--deps/v8/src/hydrogen-sce.cc2
-rw-r--r--deps/v8/src/hydrogen-uint32-analysis.cc6
-rw-r--r--deps/v8/src/hydrogen.cc4638
-rw-r--r--deps/v8/src/hydrogen.h669
-rw-r--r--deps/v8/src/i18n.cc71
-rw-r--r--deps/v8/src/i18n.h23
-rw-r--r--deps/v8/src/i18n.js44
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h22
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc118
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h39
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc126
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc2010
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h80
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc55
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h37
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc12
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc2
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc53
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc14
-rw-r--r--deps/v8/src/ia32/frames-ia32.h2
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc861
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc440
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc1011
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h17
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc2
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc484
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h450
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc317
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h76
-rw-r--r--deps/v8/src/ia32/simulator-ia32.cc1
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc2056
-rw-r--r--deps/v8/src/ic-inl.h54
-rw-r--r--deps/v8/src/ic.cc1683
-rw-r--r--deps/v8/src/ic.h592
-rw-r--r--deps/v8/src/incremental-marking.cc10
-rw-r--r--deps/v8/src/incremental-marking.h2
-rw-r--r--deps/v8/src/interface.cc7
-rw-r--r--deps/v8/src/isolate.cc387
-rw-r--r--deps/v8/src/isolate.h336
-rw-r--r--deps/v8/src/json.js15
-rw-r--r--deps/v8/src/jsregexp.cc28
-rw-r--r--deps/v8/src/libplatform/default-platform.cc89
-rw-r--r--deps/v8/src/libplatform/default-platform.h (renamed from deps/v8/src/defaults.cc)83
-rw-r--r--deps/v8/src/libplatform/task-queue.cc80
-rw-r--r--deps/v8/src/libplatform/task-queue.h71
-rw-r--r--deps/v8/src/libplatform/worker-thread.cc56
-rw-r--r--deps/v8/src/libplatform/worker-thread.h62
-rw-r--r--deps/v8/src/list-inl.h1
-rw-r--r--deps/v8/src/list.h15
-rw-r--r--deps/v8/src/lithium-allocator-inl.h2
-rw-r--r--deps/v8/src/lithium-allocator.cc4
-rw-r--r--deps/v8/src/lithium-codegen.cc55
-rw-r--r--deps/v8/src/lithium-codegen.h2
-rw-r--r--deps/v8/src/lithium.cc142
-rw-r--r--deps/v8/src/lithium.h29
-rw-r--r--deps/v8/src/liveedit.cc23
-rw-r--r--deps/v8/src/log-utils.h3
-rw-r--r--deps/v8/src/log.cc302
-rw-r--r--deps/v8/src/log.h13
-rw-r--r--deps/v8/src/macro-assembler.h8
-rw-r--r--deps/v8/src/macros.py18
-rw-r--r--deps/v8/src/mark-compact.cc332
-rw-r--r--deps/v8/src/mark-compact.h27
-rw-r--r--deps/v8/src/math.js168
-rw-r--r--deps/v8/src/messages.cc12
-rw-r--r--deps/v8/src/messages.h1
-rw-r--r--deps/v8/src/messages.js125
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h24
-rw-r--r--deps/v8/src/mips/assembler-mips.cc24
-rw-r--r--deps/v8/src/mips/assembler-mips.h23
-rw-r--r--deps/v8/src/mips/builtins-mips.cc194
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc2064
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h100
-rw-r--r--deps/v8/src/mips/codegen-mips.cc484
-rw-r--r--deps/v8/src/mips/codegen-mips.h41
-rw-r--r--deps/v8/src/mips/constants-mips.h12
-rw-r--r--deps/v8/src/mips/debug-mips.cc10
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc10
-rw-r--r--deps/v8/src/mips/disasm-mips.cc3
-rw-r--r--deps/v8/src/mips/frames-mips.cc15
-rw-r--r--deps/v8/src/mips/frames-mips.h8
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc950
-rw-r--r--deps/v8/src/mips/ic-mips.cc416
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc984
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h43
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.cc5
-rw-r--r--deps/v8/src/mips/lithium-mips.cc429
-rw-r--r--deps/v8/src/mips/lithium-mips.h421
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc588
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h130
-rw-r--r--deps/v8/src/mips/simulator-mips.cc32
-rw-r--r--deps/v8/src/mips/simulator-mips.h4
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc2017
-rw-r--r--deps/v8/src/mirror-debugger.js5
-rw-r--r--deps/v8/src/mksnapshot.cc9
-rw-r--r--deps/v8/src/object-observe.js147
-rw-r--r--deps/v8/src/objects-debug.cc115
-rw-r--r--deps/v8/src/objects-inl.h964
-rw-r--r--deps/v8/src/objects-printer.cc260
-rw-r--r--deps/v8/src/objects-visiting-inl.h73
-rw-r--r--deps/v8/src/objects-visiting.cc27
-rw-r--r--deps/v8/src/objects-visiting.h11
-rw-r--r--deps/v8/src/objects.cc2992
-rw-r--r--deps/v8/src/objects.h1419
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc89
-rw-r--r--deps/v8/src/optimizing-compiler-thread.h29
-rw-r--r--deps/v8/src/parser.cc1379
-rw-r--r--deps/v8/src/parser.h283
-rw-r--r--deps/v8/src/platform-cygwin.cc6
-rw-r--r--deps/v8/src/platform-freebsd.cc6
-rw-r--r--deps/v8/src/platform-linux.cc22
-rw-r--r--deps/v8/src/platform-macos.cc6
-rw-r--r--deps/v8/src/platform-openbsd.cc6
-rw-r--r--deps/v8/src/platform-posix.cc71
-rw-r--r--deps/v8/src/platform-qnx.cc401
-rw-r--r--deps/v8/src/platform-solaris.cc6
-rw-r--r--deps/v8/src/platform-win32.cc15
-rw-r--r--deps/v8/src/platform.h36
-rw-r--r--deps/v8/src/platform/condition-variable.cc4
-rw-r--r--deps/v8/src/platform/mutex.cc2
-rw-r--r--deps/v8/src/platform/semaphore.cc2
-rw-r--r--deps/v8/src/platform/socket.cc2
-rw-r--r--deps/v8/src/platform/time.cc2
-rw-r--r--deps/v8/src/platform/time.h2
-rw-r--r--deps/v8/src/preparser.cc800
-rw-r--r--deps/v8/src/preparser.h1137
-rw-r--r--deps/v8/src/prettyprinter.cc10
-rw-r--r--deps/v8/src/prettyprinter.h6
-rw-r--r--deps/v8/src/profile-generator.cc11
-rw-r--r--deps/v8/src/profile-generator.h6
-rw-r--r--deps/v8/src/promise.js308
-rw-r--r--deps/v8/src/property-details.h92
-rw-r--r--deps/v8/src/property.cc5
-rw-r--r--deps/v8/src/property.h126
-rw-r--r--deps/v8/src/proxy.js3
-rw-r--r--deps/v8/src/qnx-math.h42
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc6
-rw-r--r--deps/v8/src/regexp-macro-assembler.h1
-rw-r--r--deps/v8/src/rewriter.cc11
-rw-r--r--deps/v8/src/runtime-profiler.cc228
-rw-r--r--deps/v8/src/runtime-profiler.h29
-rw-r--r--deps/v8/src/runtime.cc1389
-rw-r--r--deps/v8/src/runtime.h99
-rw-r--r--deps/v8/src/runtime.js9
-rw-r--r--deps/v8/src/safepoint-table.cc12
-rw-r--r--deps/v8/src/safepoint-table.h4
-rw-r--r--deps/v8/src/sampler.cc61
-rw-r--r--deps/v8/src/scanner-character-streams.cc2
-rw-r--r--deps/v8/src/scanner.cc17
-rw-r--r--deps/v8/src/scanner.h7
-rw-r--r--deps/v8/src/scopeinfo.cc17
-rw-r--r--deps/v8/src/scopes.cc11
-rw-r--r--deps/v8/src/serialize.cc188
-rw-r--r--deps/v8/src/serialize.h4
-rw-r--r--deps/v8/src/simulator.h2
-rw-r--r--deps/v8/src/smart-pointers.h47
-rw-r--r--deps/v8/src/spaces-inl.h21
-rw-r--r--deps/v8/src/spaces.cc266
-rw-r--r--deps/v8/src/spaces.h257
-rw-r--r--deps/v8/src/string-stream.cc5
-rw-r--r--deps/v8/src/string.js159
-rw-r--r--deps/v8/src/stub-cache.cc1175
-rw-r--r--deps/v8/src/stub-cache.h475
-rw-r--r--deps/v8/src/sweeper-thread.cc36
-rw-r--r--deps/v8/src/sweeper-thread.h7
-rw-r--r--deps/v8/src/symbol.js25
-rw-r--r--deps/v8/src/third_party/valgrind/valgrind.h44
-rw-r--r--deps/v8/src/third_party/vtune/jitprofiling.cc40
-rw-r--r--deps/v8/src/third_party/vtune/jitprofiling.h92
-rw-r--r--deps/v8/src/third_party/vtune/v8-vtune.h22
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc8
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.h22
-rw-r--r--deps/v8/src/token.h4
-rw-r--r--deps/v8/src/transitions-inl.h8
-rw-r--r--deps/v8/src/trig-table.h61
-rw-r--r--deps/v8/src/type-info.cc534
-rw-r--r--deps/v8/src/type-info.h305
-rw-r--r--deps/v8/src/typedarray.js325
-rw-r--r--deps/v8/src/types.cc725
-rw-r--r--deps/v8/src/types.h479
-rw-r--r--deps/v8/src/typing.cc271
-rw-r--r--deps/v8/src/typing.h20
-rw-r--r--deps/v8/src/unicode-inl.h20
-rw-r--r--deps/v8/src/unicode.cc40
-rw-r--r--deps/v8/src/unicode.h18
-rw-r--r--deps/v8/src/unique.h2
-rw-r--r--deps/v8/src/utils.h91
-rw-r--r--deps/v8/src/utils/random-number-generator.cc4
-rw-r--r--deps/v8/src/v8-counters.cc2
-rw-r--r--deps/v8/src/v8-counters.h17
-rw-r--r--deps/v8/src/v8.cc136
-rw-r--r--deps/v8/src/v8.h13
-rw-r--r--deps/v8/src/v8checks.h2
-rw-r--r--deps/v8/src/v8conversions.h50
-rw-r--r--deps/v8/src/v8globals.h36
-rw-r--r--deps/v8/src/v8natives.js251
-rw-r--r--deps/v8/src/v8threads.cc70
-rw-r--r--deps/v8/src/v8threads.h28
-rw-r--r--deps/v8/src/v8utils.h21
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/vm-state-inl.h3
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h22
-rw-r--r--deps/v8/src/x64/assembler-x64.cc324
-rw-r--r--deps/v8/src/x64/assembler-x64.h148
-rw-r--r--deps/v8/src/x64/builtins-x64.cc290
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc2482
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h85
-rw-r--r--deps/v8/src/x64/codegen-x64.cc136
-rw-r--r--deps/v8/src/x64/codegen-x64.h36
-rw-r--r--deps/v8/src/x64/debug-x64.cc30
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc40
-rw-r--r--deps/v8/src/x64/disasm-x64.cc45
-rw-r--r--deps/v8/src/x64/frames-x64.cc14
-rw-r--r--deps/v8/src/x64/frames-x64.h2
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc1163
-rw-r--r--deps/v8/src/x64/ic-x64.cc523
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc1422
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h22
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc30
-rw-r--r--deps/v8/src/x64/lithium-x64.cc571
-rw-r--r--deps/v8/src/x64/lithium-x64.h696
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc941
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h128
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc90
-rw-r--r--deps/v8/src/x64/simulator-x64.cc1
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc2166
-rw-r--r--deps/v8/src/zone-allocator.h80
-rw-r--r--deps/v8/src/zone-containers.h (renamed from deps/v8/src/allocation-inl.h)27
-rw-r--r--deps/v8/src/zone.cc29
-rw-r--r--deps/v8/test/benchmarks/benchmarks.status8
-rw-r--r--deps/v8/test/benchmarks/testcfg.py18
-rw-r--r--deps/v8/test/cctest/OWNERS2
-rw-r--r--deps/v8/test/cctest/cctest.cc12
-rw-r--r--deps/v8/test/cctest/cctest.gyp28
-rw-r--r--deps/v8/test/cctest/cctest.h43
-rw-r--r--deps/v8/test/cctest/cctest.status92
-rw-r--r--deps/v8/test/cctest/print-extension.cc51
-rw-r--r--deps/v8/test/cctest/print-extension.h (renamed from deps/v8/src/apiutils.h)32
-rw-r--r--deps/v8/test/cctest/profiler-extension.cc75
-rw-r--r--deps/v8/test/cctest/profiler-extension.h54
-rw-r--r--deps/v8/test/cctest/test-accessors.cc220
-rw-r--r--deps/v8/test/cctest/test-alloc.cc11
-rw-r--r--deps/v8/test/cctest/test-api.cc5296
-rw-r--r--deps/v8/test/cctest/test-assembler-a64.cc9803
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc66
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc50
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc36
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc54
-rw-r--r--deps/v8/test/cctest/test-ast.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs-a64.cc188
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm.cc1
-rw-r--r--deps/v8/test/cctest/test-code-stubs-ia32.cc5
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips.cc188
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x64.cc17
-rw-r--r--deps/v8/test/cctest/test-code-stubs.cc33
-rw-r--r--deps/v8/test/cctest/test-compiler.cc79
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc722
-rw-r--r--deps/v8/test/cctest/test-debug.cc1269
-rw-r--r--deps/v8/test/cctest/test-declarative-accessors.cc32
-rw-r--r--deps/v8/test/cctest/test-decls.cc102
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc37
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc32
-rw-r--r--deps/v8/test/cctest/test-disasm-a64.cc1761
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc35
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc25
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc30
-rw-r--r--deps/v8/test/cctest/test-fuzz-a64.cc70
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc8
-rw-r--r--deps/v8/test/cctest/test-global-object.cc2
-rw-r--r--deps/v8/test/cctest/test-hashing.cc42
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc522
-rw-r--r--deps/v8/test/cctest/test-heap.cc492
-rw-r--r--deps/v8/test/cctest/test-javascript-a64.cc266
-rw-r--r--deps/v8/test/cctest/test-js-a64-variables.cc143
-rw-r--r--deps/v8/test/cctest/test-libplatform-task-queue.cc95
-rw-r--r--deps/v8/test/cctest/test-libplatform-worker-thread.cc (renamed from deps/v8/test/mjsunit/proto-poison.js)55
-rw-r--r--deps/v8/test/cctest/test-libplatform.h120
-rw-r--r--deps/v8/test/cctest/test-lockers.cc2
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc149
-rw-r--r--deps/v8/test/cctest/test-log.cc34
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc96
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-ia32.cc139
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc42
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc319
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc79
-rw-r--r--deps/v8/test/cctest/test-mementos.cc79
-rw-r--r--deps/v8/test/cctest/test-microtask-delivery.cc137
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc247
-rw-r--r--deps/v8/test/cctest/test-parsing.cc848
-rw-r--r--deps/v8/test/cctest/test-platform.cc11
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc177
-rw-r--r--deps/v8/test/cctest/test-random.cc100
-rw-r--r--deps/v8/test/cctest/test-regexp.cc42
-rw-r--r--deps/v8/test/cctest/test-reloc-info.cc12
-rw-r--r--deps/v8/test/cctest/test-representation.cc128
-rw-r--r--deps/v8/test/cctest/test-serialize.cc10
-rw-r--r--deps/v8/test/cctest/test-spaces.cc6
-rw-r--r--deps/v8/test/cctest/test-strings.cc116
-rw-r--r--deps/v8/test/cctest/test-symbols.cc2
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc210
-rw-r--r--deps/v8/test/cctest/test-threads.cc31
-rw-r--r--deps/v8/test/cctest/test-time.cc6
-rw-r--r--deps/v8/test/cctest/test-types.cc1436
-rw-r--r--deps/v8/test/cctest/test-unbound-queue.cc1
-rw-r--r--deps/v8/test/cctest/test-unique.cc1
-rw-r--r--deps/v8/test/cctest/test-utils-a64.cc426
-rw-r--r--deps/v8/test/cctest/test-utils-a64.h233
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc23
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc23
-rw-r--r--deps/v8/test/cctest/test-weaktypedarrays.cc35
-rw-r--r--deps/v8/test/cctest/trace-extension.cc142
-rw-r--r--deps/v8/test/cctest/trace-extension.h56
-rw-r--r--deps/v8/test/intl/collator/sr-sort.js1
-rw-r--r--deps/v8/test/intl/intl.status3
-rw-r--r--deps/v8/test/intl/overrides/caching.js1
-rw-r--r--deps/v8/test/intl/string/normalization.js145
-rw-r--r--deps/v8/test/message/instanceof.js28
-rw-r--r--deps/v8/test/message/instanceof.out5
-rw-r--r--deps/v8/test/message/isvar.js2
-rw-r--r--deps/v8/test/message/message.status3
-rw-r--r--deps/v8/test/message/paren_in_arg_string.js2
-rw-r--r--deps/v8/test/message/replacement-marker-as-argument.js2
-rw-r--r--deps/v8/test/message/single-function-literal.js2
-rw-r--r--deps/v8/test/message/testcfg.py2
-rw-r--r--deps/v8/test/mjsunit/allocation-folding.js13
-rw-r--r--deps/v8/test/mjsunit/allocation-site-info.js13
-rw-r--r--deps/v8/test/mjsunit/arguments-apply-deopt.js77
-rw-r--r--deps/v8/test/mjsunit/arguments-load-across-eval.js2
-rw-r--r--deps/v8/test/mjsunit/arguments-read-and-assignment.js2
-rw-r--r--deps/v8/test/mjsunit/arguments.js2
-rw-r--r--deps/v8/test/mjsunit/array-constructor-feedback.js52
-rw-r--r--deps/v8/test/mjsunit/array-feedback.js163
-rw-r--r--deps/v8/test/mjsunit/array-iteration.js1
-rw-r--r--deps/v8/test/mjsunit/array-literal-feedback.js10
-rw-r--r--deps/v8/test/mjsunit/array-natives-elements.js1
-rw-r--r--deps/v8/test/mjsunit/array-non-smi-length.js1
-rw-r--r--deps/v8/test/mjsunit/array-pop.js17
-rw-r--r--deps/v8/test/mjsunit/array-push.js33
-rw-r--r--deps/v8/test/mjsunit/array-tostring.js2
-rw-r--r--deps/v8/test/mjsunit/bool-concat.js39
-rw-r--r--deps/v8/test/mjsunit/bugs/bug-1344252.js1
-rw-r--r--deps/v8/test/mjsunit/bugs/bug-proto.js1
-rw-r--r--deps/v8/test/mjsunit/closures.js1
-rw-r--r--deps/v8/test/mjsunit/compare-character.js1
-rw-r--r--deps/v8/test/mjsunit/compare-nan.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/alloc-number-debug.js44
-rw-r--r--deps/v8/test/mjsunit/compiler/compare-map-elim.js51
-rw-r--r--deps/v8/test/mjsunit/compiler/compare-map-elim2.js130
-rw-r--r--deps/v8/test/mjsunit/compiler/compare-objeq-elim.js85
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-proto-change.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/control-flow-1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-loops-neg.js100
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-loops.js87
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js187
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis.js95
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-throw.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/lazy-const-lookup.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination-params.js71
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination.js52
-rw-r--r--deps/v8/test/mjsunit/compiler/math-floor-global.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/math-floor-local.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/minus-zero.js58
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-closures.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-with-args.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1394.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-3260426.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-arguments.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-inline-callfunctionstub.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-rep-change.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-toint32.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/rotate.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/smi-stores-opt.js49
-rw-r--r--deps/v8/test/mjsunit/compiler/to-fast-properties.js43
-rw-r--r--deps/v8/test/mjsunit/concurrent-initial-prototype-change.js2
-rw-r--r--deps/v8/test/mjsunit/const-declaration.js1
-rw-r--r--deps/v8/test/mjsunit/constant-folding-2.js24
-rw-r--r--deps/v8/test/mjsunit/context-calls-maintained.js116
-rw-r--r--deps/v8/test/mjsunit/contextual-calls.js103
-rw-r--r--deps/v8/test/mjsunit/cyclic-array-to-string.js1
-rw-r--r--deps/v8/test/mjsunit/d8-performance-now.js5
-rw-r--r--deps/v8/test/mjsunit/debug-breakpoints.js1
-rw-r--r--deps/v8/test/mjsunit/debug-constructor.js2
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-const.js1
-rw-r--r--deps/v8/test/mjsunit/debug-function-scopes.js1
-rw-r--r--deps/v8/test/mjsunit/debug-ignore-breakpoints.js1
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-3.js2
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-check-stack.js1
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-compile-error.js2
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-diff.js1
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-utils.js2
-rw-r--r--deps/v8/test/mjsunit/debug-mirror-cache.js1
-rw-r--r--deps/v8/test/mjsunit/debug-setbreakpoint.js1
-rw-r--r--deps/v8/test/mjsunit/debug-step-4-in-frame.js16
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-positions.js2
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-scope-part1.js2
-rw-r--r--deps/v8/test/mjsunit/deopt-with-fp-regs.js90
-rw-r--r--deps/v8/test/mjsunit/div-mod.js6
-rw-r--r--deps/v8/test/mjsunit/div-mul-minus-one.js2
-rw-r--r--deps/v8/test/mjsunit/elements-kind.js123
-rw-r--r--deps/v8/test/mjsunit/elements-transition-and-store.js2
-rw-r--r--deps/v8/test/mjsunit/elements-transition-hoisting.js15
-rw-r--r--deps/v8/test/mjsunit/elements-transition.js40
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-9.js1
-rw-r--r--deps/v8/test/mjsunit/enumeration-order.js4
-rw-r--r--deps/v8/test/mjsunit/error-accessors.js1
-rw-r--r--deps/v8/test/mjsunit/error-tostring-omit.js63
-rw-r--r--deps/v8/test/mjsunit/eval-stack-trace.js1
-rw-r--r--deps/v8/test/mjsunit/extra-arguments.js2
-rw-r--r--deps/v8/test/mjsunit/fast-array-length.js1
-rw-r--r--deps/v8/test/mjsunit/fast-literal.js7
-rw-r--r--deps/v8/test/mjsunit/fast-prototype.js3
-rw-r--r--deps/v8/test/mjsunit/fun-name.js1
-rw-r--r--deps/v8/test/mjsunit/function-arguments-duplicate.js36
-rw-r--r--deps/v8/test/mjsunit/function.js1
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part1.js12
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part2.js12
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part3.js17
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part4.js12
-rw-r--r--deps/v8/test/mjsunit/get-prototype-of.js1
-rw-r--r--deps/v8/test/mjsunit/getter-in-value-prototype.js1
-rw-r--r--deps/v8/test/mjsunit/getters-on-elements.js221
-rw-r--r--deps/v8/test/mjsunit/global-load-from-eval-in-with.js1
-rw-r--r--deps/v8/test/mjsunit/global-load-from-nested-eval.js5
-rw-r--r--deps/v8/test/mjsunit/harmony/array-find.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/array-findindex.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/collections.js24
-rw-r--r--deps/v8/test/mjsunit/harmony/dataview-accessors.js12
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-iteration.js81
-rw-r--r--deps/v8/test/mjsunit/harmony/iteration-semantics.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/math-clz32.js28
-rw-r--r--deps/v8/test/mjsunit/harmony/math-fround.js99
-rw-r--r--deps/v8/test/mjsunit/harmony/math-hyperbolic.js138
-rw-r--r--deps/v8/test/mjsunit/harmony/math-hypot.js94
-rw-r--r--deps/v8/test/mjsunit/harmony/math-log2-log10.js47
-rw-r--r--deps/v8/test/mjsunit/harmony/microtask-delivery.js168
-rw-r--r--deps/v8/test/mjsunit/harmony/object-observe.js711
-rw-r--r--deps/v8/test/mjsunit/harmony/private.js324
-rw-r--r--deps/v8/test/mjsunit/harmony/promises.js791
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-function.js34
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/string-endswith.js282
-rw-r--r--deps/v8/test/mjsunit/harmony/string-repeat.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/string-startswith.js274
-rw-r--r--deps/v8/test/mjsunit/harmony/symbols.js56
-rw-r--r--deps/v8/test/mjsunit/json2.js11
-rw-r--r--deps/v8/test/mjsunit/keyed-array-call.js56
-rw-r--r--deps/v8/test/mjsunit/large-object-literal.js1
-rw-r--r--deps/v8/test/mjsunit/limit-locals.js4
-rw-r--r--deps/v8/test/mjsunit/load-callback-from-value-classic.js38
-rw-r--r--deps/v8/test/mjsunit/local-load-from-eval.js1
-rw-r--r--deps/v8/test/mjsunit/math-floor-part1.js9
-rw-r--r--deps/v8/test/mjsunit/math-pow.js2
-rw-r--r--deps/v8/test/mjsunit/math-round.js11
-rw-r--r--deps/v8/test/mjsunit/math-sqrt.js1
-rw-r--r--deps/v8/test/mjsunit/mirror-boolean.js2
-rw-r--r--deps/v8/test/mjsunit/mirror-undefined.js2
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js11
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status162
-rw-r--r--deps/v8/test/mjsunit/mul-exhaustive-part6.js2
-rw-r--r--deps/v8/test/mjsunit/nans.js4
-rw-r--r--deps/v8/test/mjsunit/never-optimize.js2
-rw-r--r--deps/v8/test/mjsunit/new-string-add.js197
-rw-r--r--deps/v8/test/mjsunit/new.js1
-rw-r--r--deps/v8/test/mjsunit/newline-in-string.js2
-rw-r--r--deps/v8/test/mjsunit/number-is.js70
-rw-r--r--deps/v8/test/mjsunit/number-tostring-func.js3
-rw-r--r--deps/v8/test/mjsunit/number-tostring-small.js1
-rw-r--r--deps/v8/test/mjsunit/number-tostring.js3
-rw-r--r--deps/v8/test/mjsunit/object-freeze.js23
-rw-r--r--deps/v8/test/mjsunit/object-literal-conversions.js1
-rw-r--r--deps/v8/test/mjsunit/object-literal-gc.js1
-rw-r--r--deps/v8/test/mjsunit/object-seal.js75
-rw-r--r--deps/v8/test/mjsunit/opt-elements-kind.js20
-rw-r--r--deps/v8/test/mjsunit/osr-elements-kind.js19
-rw-r--r--deps/v8/test/mjsunit/packed-elements.js1
-rw-r--r--deps/v8/test/mjsunit/parse-int-float.js9
-rw-r--r--deps/v8/test/mjsunit/property-object-key.js1
-rw-r--r--deps/v8/test/mjsunit/proto-accessor.js81
-rw-r--r--deps/v8/test/mjsunit/prototype.js2
-rw-r--r--deps/v8/test/mjsunit/recursive-store-opt.js41
-rw-r--r--deps/v8/test/mjsunit/regexp-indexof.js2
-rw-r--r--deps/v8/test/mjsunit/regexp-multiline-stack-trace.js116
-rw-r--r--deps/v8/test/mjsunit/regexp-results-cache.js1
-rw-r--r--deps/v8/test/mjsunit/regress-3135.js53
-rw-r--r--deps/v8/test/mjsunit/regress-330046.js61
-rw-r--r--deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js65
-rw-r--r--deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js41
-rw-r--r--deps/v8/test/mjsunit/regress/clear-keyed-call.js40
-rw-r--r--deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js47
-rw-r--r--deps/v8/test/mjsunit/regress/d8-readbuffer.js40
-rw-r--r--deps/v8/test/mjsunit/regress/internalized-string-not-equal.js40
-rw-r--r--deps/v8/test/mjsunit/regress/number-named-call-deopt.js41
-rw-r--r--deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1017.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1039610.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-105.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1066899.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1092.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1099.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1112.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1114040.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1117.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1178598.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1181.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1246.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1254366.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-131994.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-137.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1546.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1591.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1647.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-165637.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-166379.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1748.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1757.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1853.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-186.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1919169.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-193.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-20070207.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2027.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2119.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2170.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2172.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2250.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2284.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2285.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2286.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2289.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-231.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2318.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2374.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-237617.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2419.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2438.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2444.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-246.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2570.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2596.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2618.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2624.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2671-1.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2671.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2690.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-270142.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2711.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2758.js (renamed from deps/v8/test/mjsunit/bugs/bug-2758.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2790.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-298269.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2984.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2988.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2989.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-299979.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3010.js65
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3025.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3026.js28
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3027.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3029.js45
-rwxr-xr-xdeps/v8/test/mjsunit/regress/regress-3032.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3039.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3138.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3158.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3159.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-317.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-320532.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-323845.js47
-rw-r--r--deps/v8/test/mjsunit/regress/regress-324028.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-325676.js69
-rw-r--r--deps/v8/test/mjsunit/regress/regress-331416.js52
-rw-r--r--deps/v8/test/mjsunit/regress/regress-336820.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-340125.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-45469.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-483.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-490.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-588599.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-619.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-634-debug.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-670147.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-674753.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-675.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-678525.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-734862.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-74.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-753.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-806473.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-85177.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-892742.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-925537.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-94873.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-990205.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-995.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-998565.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-add-minus-zero.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-array-pop-deopt.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-array-pop-nonconfigurable.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-binop-nosse2.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-builtin-array-op.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-clobbered-fp-regs.js54
-rw-r--r--deps/v8/test/mjsunit/regress/regress-context-osr.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-convert-hole2.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-158185.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-178790.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-18639.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-222893.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-233737.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-244461.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-245480.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-259300.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-306220.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-315252.js61
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-3184.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-320922.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-323942.js57
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-325225.js46
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-329709.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-336148.js56
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-340064.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-frame-details-null-receiver.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-is-contextual.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-keyed-access-string-length.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-lookup-transition.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-map-invalidation-1.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-param-local-type.js58
-rw-r--r--deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-transcendental.js1
-rw-r--r--deps/v8/test/mjsunit/regress/setvalueof-deopt.js42
-rw-r--r--deps/v8/test/mjsunit/regress/string-set-char-deopt.js85
-rw-r--r--deps/v8/test/mjsunit/set-prototype-of.js170
-rw-r--r--deps/v8/test/mjsunit/setters-on-elements.js199
-rw-r--r--deps/v8/test/mjsunit/shift-for-integer-div.js13
-rwxr-xr-xdeps/v8/test/mjsunit/simple-constructor.js1
-rw-r--r--deps/v8/test/mjsunit/sin-cos.js139
-rw-r--r--deps/v8/test/mjsunit/smi-mul-const.js87
-rw-r--r--deps/v8/test/mjsunit/sparse-array.js2
-rw-r--r--deps/v8/test/mjsunit/stack-traces-2.js2
-rw-r--r--deps/v8/test/mjsunit/stack-traces-custom-lazy.js1
-rw-r--r--deps/v8/test/mjsunit/store-dictionary.js1
-rw-r--r--deps/v8/test/mjsunit/str-to-num.js2
-rw-r--r--deps/v8/test/mjsunit/strict-equals.js4
-rw-r--r--deps/v8/test/mjsunit/string-natives.js11
-rw-r--r--deps/v8/test/mjsunit/string-replace-gc.js1
-rw-r--r--deps/v8/test/mjsunit/string-search.js1
-rwxr-xr-xdeps/v8/test/mjsunit/string-slices.js13
-rw-r--r--deps/v8/test/mjsunit/string-split.js19
-rw-r--r--deps/v8/test/mjsunit/switch-opt.js221
-rw-r--r--deps/v8/test/mjsunit/third_party/array-isarray.js48
-rw-r--r--deps/v8/test/mjsunit/third_party/array-splice-webkit.js62
-rw-r--r--deps/v8/test/mjsunit/third_party/string-trim.js107
-rw-r--r--deps/v8/test/mjsunit/to-precision.js1
-rw-r--r--deps/v8/test/mjsunit/tobool.js2
-rw-r--r--deps/v8/test/mjsunit/tools/profile.js1
-rw-r--r--deps/v8/test/mjsunit/touint32.js2
-rw-r--r--deps/v8/test/mjsunit/transcendentals.js1
-rw-r--r--deps/v8/test/mjsunit/transition-elements-kind.js2
-rw-r--r--deps/v8/test/mjsunit/try-finally-continue.js2
-rw-r--r--deps/v8/test/mjsunit/unicode-string-to-number.js1
-rw-r--r--deps/v8/test/mjsunit/unicode-test.js2
-rw-r--r--deps/v8/test/mjsunit/value-wrapper-accessor.js99
-rw-r--r--deps/v8/test/mjsunit/value-wrapper.js1
-rw-r--r--deps/v8/test/mjsunit/verify-assert-false.js30
-rw-r--r--deps/v8/test/mjsunit/verify-check-false.js30
-rw-r--r--deps/v8/test/mjsunit/whitespaces.js115
-rw-r--r--deps/v8/test/mjsunit/with-value.js2
-rw-r--r--deps/v8/test/mozilla/mozilla.status81
-rw-r--r--deps/v8/test/preparser/non-use-strict-hex-escape.js2
-rw-r--r--deps/v8/test/preparser/non-use-strict-octal-escape.js2
-rw-r--r--deps/v8/test/preparser/non-use-strict-uhex-escape.js2
-rw-r--r--deps/v8/test/preparser/nonstrict-arguments.js1
-rw-r--r--deps/v8/test/preparser/nonstrict-eval.js1
-rw-r--r--deps/v8/test/preparser/nonstrict-with.js3
-rw-r--r--deps/v8/test/preparser/preparser.status4
-rw-r--r--deps/v8/test/preparser/strict-const.js2
-rw-r--r--deps/v8/test/preparser/strict-octal-indirect-regexp.js2
-rw-r--r--deps/v8/test/preparser/strict-octal-number.js2
-rw-r--r--deps/v8/test/preparser/strict-octal-regexp.js2
-rw-r--r--deps/v8/test/preparser/strict-octal-string.js2
-rw-r--r--deps/v8/test/preparser/strict-octal-use-strict-after.js2
-rw-r--r--deps/v8/test/preparser/strict-octal-use-strict-before.js2
-rw-r--r--deps/v8/test/preparser/strict-with.js2
-rw-r--r--deps/v8/test/test262/test262.status11
-rw-r--r--deps/v8/test/webkit/Object-create-expected.txt4
-rw-r--r--deps/v8/test/webkit/arguments-bad-index.js1
-rw-r--r--deps/v8/test/webkit/array-splice-expected.txt53
-rw-r--r--deps/v8/test/webkit/array-splice.js61
-rw-r--r--deps/v8/test/webkit/concat-while-having-a-bad-time.js2
-rw-r--r--deps/v8/test/webkit/dfg-abs-backwards-propagation.js1
-rw-r--r--deps/v8/test/webkit/dfg-add-not-number.js1
-rw-r--r--deps/v8/test/webkit/dfg-arguments-alias-escape.js1
-rw-r--r--deps/v8/test/webkit/dfg-arguments-alias-one-block-overwrite.js1
-rw-r--r--deps/v8/test/webkit/dfg-arguments-alias.js1
-rw-r--r--deps/v8/test/webkit/dfg-arguments-mixed-alias.js1
-rw-r--r--deps/v8/test/webkit/dfg-arguments-out-of-bounds.js1
-rw-r--r--deps/v8/test/webkit/dfg-arith-add-overflow-check-elimination-predicted-but-not-proven-int.js1
-rw-r--r--deps/v8/test/webkit/dfg-arith-add-overflow-check-elimination-tower-of-large-numbers.js1
-rw-r--r--deps/v8/test/webkit/dfg-array-dead.js1
-rw-r--r--deps/v8/test/webkit/dfg-array-length-dead.js1
-rw-r--r--deps/v8/test/webkit/dfg-branch-logical-not-peephole-around-osr-exit.js1
-rw-r--r--deps/v8/test/webkit/dfg-branch-not-fail.js1
-rw-r--r--deps/v8/test/webkit/dfg-call-function-hit-watchpoint.js1
-rw-r--r--deps/v8/test/webkit/dfg-call-method-hit-watchpoint.js1
-rw-r--r--deps/v8/test/webkit/dfg-cfa-prove-put-by-id-simple-when-storing-to-specialized-function.js1
-rw-r--r--deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-branch-not-null-and-decrement.js2
-rw-r--r--deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-branch-not-null.js1
-rw-r--r--deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-typeof.js1
-rw-r--r--deps/v8/test/webkit/dfg-cfg-simplify-phantom-get-local-on-same-block-set-local.js1
-rw-r--r--deps/v8/test/webkit/dfg-check-structure-elimination-for-non-cell-expected.txt2
-rw-r--r--deps/v8/test/webkit/dfg-compare-final-object-to-final-object-or-other-when-both-proven-final-object.js1
-rw-r--r--deps/v8/test/webkit/dfg-compare-final-object-to-final-object-or-other-when-proven-final-object.js1
-rw-r--r--deps/v8/test/webkit/dfg-constant-fold-first-local-read-after-block-merge.js1
-rw-r--r--deps/v8/test/webkit/dfg-constant-fold-logical-not-branch.js2
-rw-r--r--deps/v8/test/webkit/dfg-constant-fold-misprediction.js2
-rw-r--r--deps/v8/test/webkit/dfg-cse-cfa-discrepancy.js1
-rw-r--r--deps/v8/test/webkit/dfg-cse-dead-get-scoped-var.js1
-rw-r--r--deps/v8/test/webkit/dfg-dead-min-one-arg.js1
-rw-r--r--deps/v8/test/webkit/dfg-dead-min-two-args.js1
-rw-r--r--deps/v8/test/webkit/dfg-dead-speculation.js1
-rw-r--r--deps/v8/test/webkit/dfg-dead-variable-on-exit.js1
-rw-r--r--deps/v8/test/webkit/dfg-double-use-of-post-simplification-double-prediction.js1
-rw-r--r--deps/v8/test/webkit/dfg-double-vote-fuzz.js2
-rw-r--r--deps/v8/test/webkit/dfg-ensure-non-array-array-storage-on-window.js1
-rw-r--r--deps/v8/test/webkit/dfg-get-by-val-clobber.js1
-rw-r--r--deps/v8/test/webkit/dfg-getter-throw.js2
-rw-r--r--deps/v8/test/webkit/dfg-getter.js2
-rw-r--r--deps/v8/test/webkit/dfg-holy-put-by-val-interferes-with-get-array-length.js2
-rw-r--r--deps/v8/test/webkit/dfg-inline-arguments-osr-exit-and-capture.js1
-rw-r--r--deps/v8/test/webkit/dfg-inline-arguments-out-of-bounds.js1
-rw-r--r--deps/v8/test/webkit/dfg-inline-arguments-use-directly-from-inlined-code.js1
-rw-r--r--deps/v8/test/webkit/dfg-inline-arguments-use-from-all-the-places-broken.js1
-rw-r--r--deps/v8/test/webkit/dfg-inline-arguments-use-from-all-the-places.js1
-rw-r--r--deps/v8/test/webkit/dfg-inline-arguments-use-from-getter.js1
-rw-r--r--deps/v8/test/webkit/dfg-inline-function-dot-caller.js1
-rw-r--r--deps/v8/test/webkit/dfg-inline-unused-this-method-check.js1
-rw-r--r--deps/v8/test/webkit/dfg-inline-unused-this.js1
-rw-r--r--deps/v8/test/webkit/dfg-int-overflow-in-loop.js1
-rw-r--r--deps/v8/test/webkit/dfg-int-overflow-large-constants-in-a-line.js2
-rw-r--r--deps/v8/test/webkit/dfg-integer-optimization.js1
-rw-r--r--deps/v8/test/webkit/dfg-intrinsic-side-effect-assignment-osr-exit.js1
-rw-r--r--deps/v8/test/webkit/dfg-intrinsic-unused-this-method-check.js1
-rw-r--r--deps/v8/test/webkit/dfg-intrinsic-unused-this.js1
-rw-r--r--deps/v8/test/webkit/dfg-max-backwards-propagation.js1
-rw-r--r--deps/v8/test/webkit/dfg-min-backwards-propagation.js1
-rw-r--r--deps/v8/test/webkit/dfg-mispredict-variable-but-prove-int.js1
-rw-r--r--deps/v8/test/webkit/dfg-mul-big-integer-with-small-integer-and-bitor.js3
-rw-r--r--deps/v8/test/webkit/dfg-mul-big-integer-with-small-integer-and-detect-overflow.js2
-rw-r--r--deps/v8/test/webkit/dfg-mul-big-integers.js2
-rw-r--r--deps/v8/test/webkit/dfg-multi-basic-block-structure-clobber.js1
-rw-r--r--deps/v8/test/webkit/dfg-multiply.js1
-rw-r--r--deps/v8/test/webkit/dfg-negative-array-index.js1
-rw-r--r--deps/v8/test/webkit/dfg-patchable-get-by-id-after-watchpoint.js1
-rw-r--r--deps/v8/test/webkit/dfg-peephole-compare-final-object-to-final-object-or-other-when-both-proven-final-object.js1
-rw-r--r--deps/v8/test/webkit/dfg-peephole-compare-final-object-to-final-object-or-other-when-proven-final-object.js1
-rw-r--r--deps/v8/test/webkit/dfg-phantom-base.js1
-rw-r--r--deps/v8/test/webkit/dfg-phantom-get-local.js1
-rw-r--r--deps/v8/test/webkit/dfg-proto-access-inline-osr-exit.js1
-rw-r--r--deps/v8/test/webkit/dfg-proven-sqrt-backwards-propagation.js1
-rw-r--r--deps/v8/test/webkit/dfg-put-by-id-allocate-storage-polymorphic.js2
-rw-r--r--deps/v8/test/webkit/dfg-put-by-id-allocate-storage.js2
-rw-r--r--deps/v8/test/webkit/dfg-put-by-id-prototype-check.js1
-rw-r--r--deps/v8/test/webkit/dfg-put-by-id-reallocate-storage-polymorphic.js2
-rw-r--r--deps/v8/test/webkit/dfg-put-by-id-reallocate-storage.js2
-rw-r--r--deps/v8/test/webkit/dfg-put-by-val-setter-then-get-by-val.js1
-rw-r--r--deps/v8/test/webkit/dfg-put-scoped-var-backward-flow.js1
-rw-r--r--deps/v8/test/webkit/dfg-sqrt-backwards-propagation.js1
-rw-r--r--deps/v8/test/webkit/dfg-store-unexpected-value-into-argument-and-osr-exit.js1
-rw-r--r--deps/v8/test/webkit/dfg-string-stricteq.js1
-rw-r--r--deps/v8/test/webkit/dfg-tear-off-arguments-not-activation.js1
-rw-r--r--deps/v8/test/webkit/dfg-tear-off-function-dot-arguments.js1
-rw-r--r--deps/v8/test/webkit/dfg-to-string-on-cell.js1
-rw-r--r--deps/v8/test/webkit/dfg-to-string-on-value.js1
-rw-r--r--deps/v8/test/webkit/dfg-to-string-toString-becomes-bad-with-check-structure.js1
-rw-r--r--deps/v8/test/webkit/dfg-to-string-toString-becomes-bad-with-dictionary-string-prototype.js1
-rw-r--r--deps/v8/test/webkit/dfg-to-string-toString-becomes-bad.js1
-rw-r--r--deps/v8/test/webkit/dfg-to-string-toString-in-string.js1
-rw-r--r--deps/v8/test/webkit/dfg-to-string-valueOf-in-string.js1
-rw-r--r--deps/v8/test/webkit/dfg-uint32-to-number-in-middle-of-copy-propagation.js1
-rw-r--r--deps/v8/test/webkit/dfg-uint32-to-number-on-captured-variable.js1
-rw-r--r--deps/v8/test/webkit/dfg-uint32-to-number-skip-then-exit.js1
-rw-r--r--deps/v8/test/webkit/dfg-uint32-to-number.js1
-rw-r--r--deps/v8/test/webkit/dfg-uint32array-overflow-constant.js1
-rw-r--r--deps/v8/test/webkit/dfg-weak-js-constant-silent-fill.js1
-rw-r--r--deps/v8/test/webkit/exception-for-nonobject-expected.txt2
-rw-r--r--deps/v8/test/webkit/fast/js/JSON-parse-reviver.js2
-rw-r--r--deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt6
-rw-r--r--deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames.js4
-rw-r--r--deps/v8/test/webkit/fast/js/array-bad-time-expected.txt103
-rw-r--r--deps/v8/test/webkit/fast/js/array-bad-time.js2
-rw-r--r--deps/v8/test/webkit/fast/js/array-prototype-properties-expected.txt2
-rw-r--r--deps/v8/test/webkit/fast/js/array-slow-put-expected.txt203
-rw-r--r--deps/v8/test/webkit/fast/js/array-slow-put.js2
-rw-r--r--deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt86
-rw-r--r--deps/v8/test/webkit/fast/js/date-toisostring-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/js/deep-recursion-test.js2
-rw-r--r--deps/v8/test/webkit/fast/js/function-decompilation-operators.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/Array.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/Boolean.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/Date-setYear.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/Error.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/GlobalObject.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/Number.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/Object.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/Prototype.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/RegExp.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/arguments-scope.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/assignments.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/cast.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/comment-1.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/comment-2.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/completion.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/conditional.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/constructor_length.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/crash-1.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/crash-2.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/delete.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/empty.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/encode_decode_uri.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/eval.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/evil-n.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/exception_propagation.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/exceptions.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/func-decl.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/inbuilt_function_proto-expected.txt1
-rw-r--r--deps/v8/test/webkit/fast/js/kde/inbuilt_function_proto.js3
-rw-r--r--deps/v8/test/webkit/fast/js/kde/iteration.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/j-comment-3.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/j-comment-4.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/literals.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/lval-exceptions.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/math.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/md5-1.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/md5-2.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/object_prototype.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/object_prototype_tostring.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/operators.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/parse.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/prototype_length.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/prototype_proto.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/scope.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/statements.js2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/var_decl_init.js2
-rw-r--r--deps/v8/test/webkit/fast/js/object-bad-time-expected.txt103
-rw-r--r--deps/v8/test/webkit/fast/js/object-bad-time.js2
-rw-r--r--deps/v8/test/webkit/fast/js/object-slow-put-expected.txt203
-rw-r--r--deps/v8/test/webkit/fast/js/object-slow-put.js2
-rw-r--r--deps/v8/test/webkit/fast/js/string-anchor.js2
-rw-r--r--deps/v8/test/webkit/fast/js/string-fontcolor.js2
-rw-r--r--deps/v8/test/webkit/fast/js/string-fontsize.js2
-rw-r--r--deps/v8/test/webkit/fast/js/string-link.js2
-rw-r--r--deps/v8/test/webkit/fast/js/toString-number.js2
-rw-r--r--deps/v8/test/webkit/fast/js/toString-overrides-expected.txt5
-rw-r--r--deps/v8/test/webkit/fast/regex/assertion.js1
-rw-r--r--deps/v8/test/webkit/fast/regex/constructor.js1
-rw-r--r--deps/v8/test/webkit/fast/regex/dotstar.js1
-rw-r--r--deps/v8/test/webkit/fast/regex/parentheses.js1
-rw-r--r--deps/v8/test/webkit/function-dot-apply-replace-base.js3
-rw-r--r--deps/v8/test/webkit/get-by-pname-non-final-object.js1
-rw-r--r--deps/v8/test/webkit/get-by-pname-that-looks-like-a-patchable-get-by-val.js2
-rw-r--r--deps/v8/test/webkit/get-by-pname.js1
-rw-r--r--deps/v8/test/webkit/indexed-setter-on-global-object.js1
-rw-r--r--deps/v8/test/webkit/instance-of-immediates-expected.txt8
-rw-r--r--deps/v8/test/webkit/new-array-double-with-holes.js1
-rw-r--r--deps/v8/test/webkit/regexp-in-and-foreach-handling.js1
-rw-r--r--deps/v8/test/webkit/regexp-zero-length-alternatives.js1
-rw-r--r--deps/v8/test/webkit/resolve-arguments-from-scope.js1
-rw-r--r--deps/v8/test/webkit/sort-with-side-effecting-comparisons.js2
-rw-r--r--deps/v8/test/webkit/stack-unwinding.js1
-rw-r--r--deps/v8/test/webkit/string-replacement-outofmemory-expected.txt23
-rw-r--r--deps/v8/test/webkit/string-replacement-outofmemory.js41
-rw-r--r--deps/v8/test/webkit/string-trim-expected.txt44
-rw-r--r--deps/v8/test/webkit/string-trim.js1
-rw-r--r--deps/v8/test/webkit/throw-from-finally.js1
-rw-r--r--deps/v8/test/webkit/try-catch-try-try-catch-try-finally-return-catch-finally.js1
-rw-r--r--deps/v8/test/webkit/try-try-return-finally-finally.js1
-rw-r--r--deps/v8/test/webkit/var-declarations-zero-width.js2
-rw-r--r--deps/v8/test/webkit/webkit.status18
-rwxr-xr-xdeps/v8/tools/bash-completion.sh4
-rw-r--r--deps/v8/tools/blink_tests/TestExpectations3
-rw-r--r--deps/v8/tools/consarray.js1
-rwxr-xr-xdeps/v8/tools/cross_build_gcc.sh67
-rwxr-xr-xdeps/v8/tools/draw_instruction_graph.sh130
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py2
-rw-r--r--deps/v8/tools/generate-trig-table.py83
-rwxr-xr-xdeps/v8/tools/grokdump.py13
-rw-r--r--deps/v8/tools/gyp/v8.gyp180
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/js2c.py98
-rw-r--r--deps/v8/tools/lexer-shell.cc267
-rw-r--r--deps/v8/tools/lexer-shell.gyp57
-rwxr-xr-xdeps/v8/tools/merge-to-branch.sh97
-rwxr-xr-xdeps/v8/tools/presubmit.py26
-rwxr-xr-xdeps/v8/tools/push-to-trunk/auto_roll.py219
-rw-r--r--deps/v8/tools/push-to-trunk/common_includes.py519
-rwxr-xr-xdeps/v8/tools/push-to-trunk/merge_to_branch.py406
-rwxr-xr-xdeps/v8/tools/push-to-trunk/push_to_trunk.py628
-rw-r--r--deps/v8/tools/push-to-trunk/test_scripts.py991
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py12
-rwxr-xr-xdeps/v8/tools/run-tests.py135
-rw-r--r--deps/v8/tools/testrunner/local/commands.py75
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py1
-rw-r--r--deps/v8/tools/testrunner/local/progress.py1
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py12
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py23
-rw-r--r--deps/v8/tools/testrunner/local/utils.py7
-rw-r--r--deps/v8/tools/tickprocessor.js1
-rw-r--r--deps/v8/tools/v8heapconst.py321
1130 files changed, 136349 insertions, 54455 deletions
diff --git a/deps/v8/.clang-format b/deps/v8/.clang-format
new file mode 100644
index 0000000000..d9bbf504a6
--- /dev/null
+++ b/deps/v8/.clang-format
@@ -0,0 +1,4 @@
+# Defines the Google C++ style for automatic reformatting.
+# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
+BasedOnStyle: Google
+MaxEmptyLinesToKeep: 2
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 46e3a14bc1..4ef2bcca33 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -11,12 +11,16 @@ Igalia, S.L.
Joyent, Inc.
Bloomberg Finance L.P.
NVIDIA Corporation
+BlackBerry Limited
+Opera Software ASA
Akinori MUSHA <knu@FreeBSD.org>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
+Alexandre Rames <alexandre.rames@arm.com>
Alexandre Vassalotti <avassalotti@gmail.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
+Baptiste Afsa <baptiste.afsa@arm.com>
Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
@@ -29,6 +33,7 @@ Fedor Indutny <fedor@indutny.com>
Filipe David Manana <fdmanana@gmail.com>
Haitao Feng <haitao.feng@intel.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
+Jacob Bramley <jacob.bramley@arm.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
@@ -53,10 +58,11 @@ Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org>
Rodolph Perfetta <rodolph.perfetta@arm.com>
Ryan Dahl <coldredlemur@gmail.com>
-Sandro Santilli <strk@keybit.net>
+Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Tobias Burnus <burnus@net-b.de>
+Vincent Belliard <vincent.belliard@arm.com>
Vlad Burlik <vladbph@gmail.com>
Xi Qian <xi.qian@intel.com>
Yuqiang Xian <yuqiang.xian@intel.com>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 97895d3693..443f8d5ba5 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,531 @@
+2014-02-19: Version 3.24.40
+
+ A64: Let the MacroAssembler resolve branches to distant targets (issue
+ 3148).
+
+ Fixed and improved code for integral division. Fixed and extended tests
+ (issue 3151).
+
+ MIPS: Fix assignment of function name constant (issue 3138).
+
+ Fix assignment of function name constant (issue 3138).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-14: Version 3.24.39
+
+ Introduce --job-based-sweeping flag and use individual jobs for sweeping
+ if set (issue 3104).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-13: Version 3.24.38
+
+ Merge experimental/a64 to bleeding_edge (issue 3113).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-12: Version 3.24.37
+
+ Fix spec violations in JSON.stringify wrt replacer array (issue 3135).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-11: Version 3.24.36
+
+ Fix inconsistencies wrt whitespaces (issue 3109).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-10: Version 3.24.35
+
+ Fix inconsistencies wrt whitespaces (issue 3109).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-07: Version 3.24.34
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-06: Version 3.24.33
+
+ Allow externalizing strings in old pointer space (Chromium issue
+ 268686).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-05: Version 3.24.32
+
+ Add Box object to heap profiler.
+
+ Check the offset argument of TypedArray.set for fitting into Smi
+ (Chromium issue 340125).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-04: Version 3.24.31
+
+ Fix short-circuiting logical and/or in HOptimizedGraphBuilder (Chromium
+ issue 336148).
+
+ Elements field of newly allocated JSArray could be left uninitialized in
+ some cases (fast literal case) (Chromium issue 340124).
+
+ Re-enable escape analysis.
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-03: Version 3.24.30
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-01: Version 3.24.29
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-31: Version 3.24.28
+
+ Don't crash in Array.join() if the resulting string exceeds the max
+ string length (Chromium issue 336820).
+
+ Implements ES6 String.prototype.normalize method (issue 2943).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-30: Version 3.24.27
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-29: Version 3.24.26
+
+ ES6: Map and Set needs to normalize minus zero (issue 3069).
+
+ Make `String.prototype.{starts,ends}With` throw when passing a regular
+ expression (issue 3070).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-28: Version 3.24.25
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-27: Version 3.24.24
+
+ MIPS: Reduce the stack requirements of GetNoCodeAgeSequence (issue
+ 3111).
+
+ Delete v8_shell target now that chrome uses d8 (Chromium issue 331585).
+
+ ARM: Reduce the stack requirements of GetNoCodeAgeSequence (issue 3111).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-24: Version 3.24.23
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-23: Version 3.24.22
+
+ Fix compilation on x64 architectures (issue 3110).
+
+ Ensure we don't overwrite transitions in SetPropertyIgnoreAttributes
+ (Chromium issue 326155).
+
+ ES6: Implement Object.setPrototypeOf (issue 2675).
+
+ Fixed floor-of-div optimization (Chromium issue 334708).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-22: Version 3.24.21
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-21: Version 3.24.20
+
+ ES6: Implement Object.setPrototypeOf (issue 2675).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-20: Version 3.24.19
+
+ Introduce a setting to control the toolset for which d8 is compiled
+ (issue 1775).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-17: Version 3.24.18
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-16: Version 3.24.17
+
+ Make cells pointing to JSObjects weak in optimized code (issue 2073).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-15: Version 3.24.16
+
+ Annotate mapped memory regions for LeakSanitizer (Chromium issue
+ 328552).
+
+ Turn Runtime_MigrateInstance into Runtime_TryMigrateInstance (Chromium
+ issue 315252).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-14: Version 3.24.15
+
+ Introduce an API mirroring the gc extension.
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-10: Version 3.24.14
+
+ ES6: Add Object.getOwnPropertySymbols (issue 3049).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-09: Version 3.24.13
+
+ Add Isolate parameter to HandleScope::NumberOfHandles (Chromium issue
+ 324225).
+
+ Removed v8::AssertNoGCScope.
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-08: Version 3.24.12
+
+ Correctly handle instances without elements in polymorphic keyed
+ load/store (Chromium issue 331416).
+
+ Fix selection of popular pages in store buffer (Chromium issue 331444).
+
+ Prepare removal of ObjectTemplate::New without Isolate parameter
+ (Chromium issue 324225).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-07: Version 3.24.11
+
+ Remove generated makefiles on linux when running gyp_v8
+ (Chromium issue 331475)
+
+ Fix building d8 with readline support due to API changes
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-03: Version 3.24.10
+
+ Reland r18383: More API cleanup (Chromium issue 324225).
+
+ MIPS: Fix loading of global object in LWrapReceiver (Chromium issue
+ 318420).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-01-02: Version 3.24.9
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-30: Version 3.24.8
+
+ ARM: fix loading of global object in LWrapReceiver (Chromium issue
+ 318420).
+
+ Fix a race between concurrent recompilation and OSR (Chromium issue
+ 330046).
+
+ Turn off concurrent sweeping (issue 3071).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-23: Version 3.24.7
+
+ Fix small spec violation in String.prototype.split (issue 3026).
+
+ Correctly resolve forcibly context allocated parameters in debug-
+ evaluate (Chromium issue 325676).
+
+ Introduce Function::GetBoundFunction.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-20: Version 3.24.6
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-19: Version 3.24.5
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-18: Version 3.24.4
+
+ Removed all stuff marked as V8_DEPRECATED.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-17: Version 3.24.3
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-17: Version 3.24.2
+
+ Flush instruction cache for deserialized code objects.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-13: Version 3.24.1
+
+ Fix polymorphic inlined calls with migrating prototypes.
+
+ Fixed global object leak caused by overwriting the global receiver (the
+ global proxy) in the global object with the global object itself
+ (Chromium issue 324812).
+
+ Initialize Date parse cache with SMI instead of double to workaround
+ sharing mutable heap numbers in snapshot (Chromium issue 280531).
+
+ Switch armv7 setting to arm_version==7 in v8 gyp files (Chromium issue
+ 234135).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-09: Version 3.24.0
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-04: Version 3.23.18
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-03: Version 3.23.17
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-02: Version 3.23.16
+
+ Array builtins need to be prevented from changing frozen objects, and
+ changing structure on sealed objects (Chromium issue 299979).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-29: Version 3.23.15
+
+ Fix context register allocation in LTransitionElementsKind
+ (Chromium issue 324306).
+
+ Fix bug in inlining Function.apply (Chromium issue 323942).
+
+ Ensure that length is Smi in TypedArrayFromArrayLike constructor
+ (Chromium issue 324028).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-28: Version 3.23.14
+
+ Shorten autogenerated error message (issue 3019).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-27: Version 3.23.13
+
+ Increase precision for base conversion for large integers (issue 3025).
+
+ Flatten cons string for single character substrings (Chromium issue
+ 323041).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-26: Version 3.23.12
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-25: Version 3.23.11
+
+ Deprecate old versions of Isolate::SetData and GetData.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-22: Version 3.23.10
+
+ Remove preemption thread and API.
+ (issue 3004)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-21: Version 3.23.9
+
+ API: Change AdjustAmountOfExternalAllocatedMemory calls to use int64_t
+ instead of intptr_t.
+
+ Remove deprecated v8::SetResourceConstraints without Isolate parameter.
+
+ Remove deprecated v8-defaults.h and defaults.cc.
+ (Chromium issue 312241)
+
+ Make it possible to add more than one piece of embedder data to
+ isolates.
+ (Chromium issue 317398)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-20: Version 3.23.8
+
+ Fixed crashes exposed though fuzzing.
+ (Chromium issue 320948)
+
+ Deprecated v8::External::New without Isolate parameter.
+
+ Made number of available threads isolate-dependent and exposed it to
+ ResourceConstraints.
+ (issue 2991)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-19: Version 3.23.7
+
+ Bugfix: dependent code field in AllocationSite was keeping code objects
+ alive even after context death.
+ (Chromium issue 320532)
+
+ Fixed data view accessors to throw execptions on offsets bigger than
+ size_t.
+ (issue 3013)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-18: Version 3.23.6
+
+ Limit size of dehoistable array indices.
+ (Chromium issues 319835, 319860)
+
+ Limit the size for typed arrays to MaxSmi.
+ (Chromium issue 319722)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-15: Version 3.23.5
+
+ Fixed missing type feedback check for Generic*String addition.
+ (Chromium issue 318671)
+
+ Fixed duplicate check in DependentCode::Insert.
+ (Chromium issue 318454)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-14: Version 3.23.4
+
+ Fixed overflow in TypedArray initialization function.
+ (Chromium issue 319120)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-13: Version 3.23.3
+
+ Fixed compilation with GCC 4.8.
+ (issue 2767, 2149)
+
+ Added explicit Isolate parameter to External::New.
+ (Chromium issue 266838)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-12: Version 3.23.2
+
+ Fixed --extra-code flag for snapshot creation.
+ (issue 2994)
+
+ Fixed error message wording when instanceof throws.
+ (Chromium issue 82797, issue 1593)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-08: Version 3.23.1
+
+ Made HCapturedObjects non-deletable for DCE. (issue 2987)
+
+ Use a fixed random seed per default. (issue 1880, 2885)
+
+ Fixed y-umlaut to uppercase. (issue 2984)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-06: Version 3.23.0
+
+ Fixed loading message from an Error object. (Chromium issue 306220)
+
+ Made Object.freeze/seal/preventExtensions observable. (issue 2975, 2941)
+
+ Made snapshots reproducible. (issue 2885)
+
+ Added missing negative dictionary lookup to NonexistentHandlerFrontend.
+ (issue 2980)
+
+ Performance and stability improvements on all platforms.
+
+
2013-10-31: Version 3.22.24
Fixed uint32-to-smi conversion in Lithium.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index b91ae4e766..66d21eb364 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -5,10 +5,10 @@
deps = {
# Remember to keep the revision in sync with the Makefile.
"v8/build/gyp":
- "http://gyp.googlecode.com/svn/trunk@1685",
+ "http://gyp.googlecode.com/svn/trunk@1831",
"v8/third_party/icu":
- "https://src.chromium.org/chrome/trunk/deps/third_party/icu46@214189",
+ "https://src.chromium.org/chrome/trunk/deps/third_party/icu46@239289",
}
deps_os = {
diff --git a/deps/v8/LICENSE b/deps/v8/LICENSE
index 2e516bab62..2f5bce8369 100644
--- a/deps/v8/LICENSE
+++ b/deps/v8/LICENSE
@@ -26,7 +26,7 @@ are:
These libraries have their own licenses; we recommend you read them,
as their terms may differ from the terms below.
-Copyright 2006-2012, the V8 project authors. All rights reserved.
+Copyright 2014, the V8 project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index bbec44076e..9491412155 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -94,7 +94,7 @@ ifeq ($(vtunejit), on)
endif
# optdebug=on
ifeq ($(optdebug), on)
- GYPFLAGS += -Dv8_optimized_debug=1
+ GYPFLAGS += -Dv8_optimized_debug=2
endif
# debuggersupport=off
ifeq ($(debuggersupport), off)
@@ -104,6 +104,10 @@ endif
ifeq ($(unalignedaccess), on)
GYPFLAGS += -Dv8_can_use_unaligned_accesses=true
endif
+# randomseed=12345, disable random seed via randomseed=0
+ifdef randomseed
+ GYPFLAGS += -Dv8_random_seed=$(randomseed)
+endif
# soname_version=1.2.3
ifdef soname_version
GYPFLAGS += -Dsoname_version=$(soname_version)
@@ -134,12 +138,17 @@ ifeq ($(deprecationwarnings), on)
GYPFLAGS += -Dv8_deprecation_warnings=1
endif
# arm specific flags.
-# armv7=false/true
+# arm_version=<number | "default">
+ifneq ($(strip $(arm_version)),)
+ GYPFLAGS += -Darm_version=$(arm_version)
+else
+# Deprecated (use arm_version instead): armv7=false/true
ifeq ($(armv7), false)
- GYPFLAGS += -Darmv7=0
+ GYPFLAGS += -Darm_version=6
else
ifeq ($(armv7), true)
- GYPFLAGS += -Darmv7=1
+ GYPFLAGS += -Darm_version=7
+endif
endif
endif
# vfp2=off. Deprecated, use armfpu=
@@ -214,10 +223,11 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 arm mipsel
+ARCHES = ia32 x64 arm a64 mipsel
DEFAULT_ARCHES = ia32 x64 arm
-MODES = release debug
-ANDROID_ARCHES = android_ia32 android_arm android_mipsel
+MODES = release debug optdebug
+DEFAULT_MODES = release debug
+ANDROID_ARCHES = android_ia32 android_arm android_a64 android_mipsel
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
@@ -243,6 +253,7 @@ NACL_CHECKS = $(addsuffix .check,$(NACL_BUILDS))
ENVFILE = $(OUTDIR)/environment
.PHONY: all check clean dependencies $(ENVFILE).new native \
+ qc quickcheck \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
@@ -251,7 +262,7 @@ ENVFILE = $(OUTDIR)/environment
must-set-NACL_SDK_ROOT
# Target definitions. "all" is the default.
-all: $(MODES)
+all: $(DEFAULT_MODES)
# Special target for the buildbots to use. Depends on $(OUTDIR)/Makefile
# having been created before.
@@ -267,14 +278,15 @@ mips mips.release mips.debug:
.SECONDEXPANSION:
$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
-$(ARCHES): $(addprefix $$@.,$(MODES))
+$(ARCHES): $(addprefix $$@.,$(DEFAULT_MODES))
# Defines how to build a particular target (e.g. ia32.release).
-$(BUILDS): $(OUTDIR)/Makefile.$$(basename $$@)
- @$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
+$(BUILDS): $(OUTDIR)/Makefile.$$@
+ @$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
CXX="$(CXX)" LINK="$(LINK)" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
- python -c "print raw_input().capitalize()") \
+ python -c "print \
+ raw_input().replace('opt', '').capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
native: $(OUTDIR)/Makefile.native
@@ -346,32 +358,47 @@ native.check: native
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
+FASTTESTMODES = ia32.release,x64.release,ia32.optdebug,x64.optdebug,arm.optdebug
+FASTCOMPILEMODES = $(FASTTESTMODES),a64.optdebug
+
+COMMA = ,
+EMPTY =
+SPACE = $(EMPTY) $(EMPTY)
+quickcheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
+ tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ --arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) --quickcheck
+qc: quickcheck
+
# Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)):
- rm -f $(OUTDIR)/Makefile.$(basename $@)
+ rm -f $(OUTDIR)/Makefile.$(basename $@)*
rm -rf $(OUTDIR)/$(basename $@).release
rm -rf $(OUTDIR)/$(basename $@).debug
- find $(OUTDIR) -regex '.*\(host\|target\).$(basename $@)\.mk' -delete
+ rm -rf $(OUTDIR)/$(basename $@).optdebug
+ find $(OUTDIR) -regex '.*\(host\|target\)\.$(basename $@).*\.mk' -delete
native.clean:
rm -f $(OUTDIR)/Makefile.native
rm -rf $(OUTDIR)/native
- find $(OUTDIR) -regex '.*\(host\|target\).native\.mk' -delete
+ find $(OUTDIR) -regex '.*\(host\|target\)\.native\.mk' -delete
clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.clean
# GYP file generation targets.
-OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
+OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(BUILDS))
$(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
+ PYTHONPATH="$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
- -Dv8_target_arch=$(subst .,,$(suffix $@)) \
- -S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
+ -Dv8_target_arch=$(subst .,,$(suffix $(basename $@))) \
+ -Dv8_optimized_debug=$(if $(findstring optdebug,$@),2,0) \
+ -S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
+ PYTHONPATH="$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
@@ -417,7 +444,7 @@ grokdump: ia32.release
# Remember to keep these in sync with the DEPS file.
dependencies:
svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \
- --revision 1685
+ --revision 1831
svn checkout --force \
https://src.chromium.org/chrome/trunk/deps/third_party/icu46 \
- third_party/icu --revision 214189
+ third_party/icu --revision 239289
diff --git a/deps/v8/Makefile.android b/deps/v8/Makefile.android
index 2d45d3bb12..ea2c6e284b 100644
--- a/deps/v8/Makefile.android
+++ b/deps/v8/Makefile.android
@@ -26,7 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
-ANDROID_ARCHES = android_ia32 android_arm android_mipsel
+ANDROID_ARCHES = android_ia32 android_arm android_a64 android_mipsel
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
@@ -48,25 +48,41 @@ endif
ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm
- DEFINES += arm_neon=0 armv7=1
- TOOLCHAIN_ARCH = arm-linux-androideabi-4.6
+ DEFINES += arm_neon=0 arm_version=7
+ TOOLCHAIN_ARCH = arm-linux-androideabi
+ TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
+ TOOLCHAIN_VER = 4.6
else
- ifeq ($(ARCH), android_mipsel)
- DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_arch=mips
- DEFINES += mips_arch_variant=mips32r2
- TOOLCHAIN_ARCH = mipsel-linux-android-4.6
+ ifeq ($(ARCH), android_a64)
+ DEFINES = target_arch=a64 v8_target_arch=a64 android_target_arch=arm64
+ TOOLCHAIN_ARCH = aarch64-linux-android
+ TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
+ TOOLCHAIN_VER = 4.8
else
- ifeq ($(ARCH), android_ia32)
- DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
- TOOLCHAIN_ARCH = x86-4.6
+ ifeq ($(ARCH), android_mipsel)
+ DEFINES = target_arch=mipsel v8_target_arch=mipsel
+ DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
+ TOOLCHAIN_ARCH = mipsel-linux-android
+ TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
+ TOOLCHAIN_VER = 4.6
+
else
- $(error Target architecture "${ARCH}" is not supported)
+ ifeq ($(ARCH), android_ia32)
+ DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
+ TOOLCHAIN_ARCH = x86
+ TOOLCHAIN_PREFIX = i686-linux-android
+ TOOLCHAIN_VER = 4.6
+ else
+ $(error Target architecture "${ARCH}" is not supported)
+ endif
endif
endif
endif
-TOOLCHAIN_PATH = ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}/prebuilt
+TOOLCHAIN_PATH = \
+ ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}-${TOOLCHAIN_VER}/prebuilt
ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
+
ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
$(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}". Please \
check that ANDROID_NDK_ROOT and ANDROID_NDK_HOST_ARCH are set \
@@ -77,26 +93,26 @@ endif
DEFINES += host_os=${HOST_OS}
.SECONDEXPANSION:
-$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$(basename $$@)
- @$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
- CXX="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
- AR="$(ANDROID_TOOLCHAIN)/bin/*-ar" \
- RANLIB="$(ANDROID_TOOLCHAIN)/bin/*-ranlib" \
- CC="$(ANDROID_TOOLCHAIN)/bin/*-gcc" \
- LD="$(ANDROID_TOOLCHAIN)/bin/*-ld" \
- LINK="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
- BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
- python -c "print raw_input().capitalize()") \
- builddir="$(shell pwd)/$(OUTDIR)/$@"
+$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
+ @$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
+ CXX="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
+ AR="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ar" \
+ RANLIB="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ranlib" \
+ CC="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-gcc" \
+ LD="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ld" \
+ LINK="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
+ BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
+ python -c "print raw_input().capitalize()") \
+ builddir="$(shell pwd)/$(OUTDIR)/$@"
# Android GYP file generation targets.
-ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_ARCHES))
+ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
$(ANDROID_MAKEFILES):
- @GYP_GENERATORS=make-android \
+ GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \
- CC="${ANDROID_TOOLCHAIN}/bin/*-gcc" \
- CXX="${ANDROID_TOOLCHAIN}/bin/*-g++" \
+ CC="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-gcc" \
+ CXX="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-g++" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
- -S.${ARCH} ${GYPFLAGS}
+ -S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS}
diff --git a/deps/v8/Makefile.nacl b/deps/v8/Makefile.nacl
index 2c79ef113e..fc3eb28ae0 100644
--- a/deps/v8/Makefile.nacl
+++ b/deps/v8/Makefile.nacl
@@ -77,11 +77,11 @@ GYPENV += host_os=${HOST_OS}
# ICU doesn't support NaCl.
GYPENV += v8_enable_i18n_support=0
-NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_ARCHES))
+NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_BUILDS))
.SECONDEXPANSION:
# For some reason the $$(basename $$@) expansion didn't work here...
$(NACL_BUILDS): $(NACL_MAKEFILES)
- @$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
+ @$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
CXX=${NACL_CXX} \
LINK=${NACL_LINK} \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
@@ -90,12 +90,12 @@ $(NACL_BUILDS): $(NACL_MAKEFILES)
# NACL GYP file generation targets.
$(NACL_MAKEFILES):
- @GYP_GENERATORS=make \
+ GYP_GENERATORS=make \
GYP_DEFINES="${GYPENV}" \
CC=${NACL_CC} \
CXX=${NACL_CXX} \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
- -S.$(subst .,,$(suffix $@)) $(GYPFLAGS) \
+ -S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS) \
-Dwno_array_bounds=-Wno-array-bounds
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index 450e9b217c..186fc10dbb 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -1,10 +1,14 @@
bmeurer@chromium.org
danno@chromium.org
+dcarney@chromium.org
dslomov@chromium.org
hpayer@chromium.org
ishell@chromium.org
+jarin@chromium.org
jkummerow@chromium.org
+jochen@chromium.org
machenbach@chromium.org
+marja@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
rossberg@chromium.org
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 75e16e3bd7..fe15157dde 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -69,15 +69,28 @@ def _SkipTreeCheck(input_api, output_api):
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
+def _CheckChangeLogFlag(input_api, output_api):
+ """Checks usage of LOG= flag in the commit message."""
+ results = []
+ if input_api.change.BUG and not 'LOG' in input_api.change.tags:
+ results.append(output_api.PresubmitError(
+ 'An issue reference (BUG=) requires a change log flag (LOG=). '
+ 'Use LOG=Y for including this commit message in the change log. '
+ 'Use LOG=N or leave blank otherwise.'))
+ return results
+
+
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
+ results.extend(_CheckChangeLogFlag(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
+ results.extend(_CheckChangeLogFlag(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
if not _SkipTreeCheck(input_api, output_api):
diff --git a/deps/v8/build/all.gyp b/deps/v8/build/all.gyp
index ad71fb0e45..5fbd8c28e7 100644
--- a/deps/v8/build/all.gyp
+++ b/deps/v8/build/all.gyp
@@ -12,7 +12,13 @@
'../src/d8.gyp:d8',
'../test/cctest/cctest.gyp:*',
],
+ 'conditions': [
+ ['component!="shared_library"', {
+ 'dependencies': [
+ '../tools/lexer-shell.gyp:lexer-shell',
+ ],
+ }],
+ ]
}
]
}
-
diff --git a/deps/v8/build/android.gypi b/deps/v8/build/android.gypi
index ca097255bb..2f32be0e81 100644
--- a/deps/v8/build/android.gypi
+++ b/deps/v8/build/android.gypi
@@ -146,7 +146,7 @@
'-Wl,--icf=safe',
],
}],
- ['target_arch=="arm" and armv7==1', {
+ ['target_arch=="arm" and arm_version==7', {
'cflags': [
'-march=armv7-a',
'-mtune=cortex-a8',
@@ -164,12 +164,12 @@
'-I<(android_stlport_include)',
],
'conditions': [
- ['target_arch=="arm" and armv7==1', {
+ ['target_arch=="arm" and arm_version==7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi-v7a',
],
}],
- ['target_arch=="arm" and armv7==0', {
+ ['target_arch=="arm" and arm_version < 7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi',
],
@@ -184,6 +184,11 @@
'-L<(android_stlport_libs)/x86',
],
}],
+ ['target_arch=="a64"', {
+ 'ldflags': [
+ '-L<(android_stlport_libs)/arm64',
+ ],
+ }],
],
}],
['target_arch=="ia32"', {
@@ -208,10 +213,19 @@
],
'target_conditions': [
['_type=="executable"', {
+ 'conditions': [
+ ['target_arch=="a64"', {
+ 'ldflags': [
+ '-Wl,-dynamic-linker,/system/bin/linker64',
+ ],
+ }, {
+ 'ldflags': [
+ '-Wl,-dynamic-linker,/system/bin/linker',
+ ],
+ }]
+ ],
'ldflags': [
'-Bdynamic',
- '-Wl,-dynamic-linker,/system/bin/linker',
- '-Wl,--gc-sections',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
'<(android_lib)/crtbegin_dynamic.o',
diff --git a/deps/v8/build/features.gypi b/deps/v8/build/features.gypi
index 7863b1c43a..85b8a38465 100644
--- a/deps/v8/build/features.gypi
+++ b/deps/v8/build/features.gypi
@@ -58,6 +58,9 @@
# Enable compiler warnings when using V8_DEPRECATED apis.
'v8_deprecation_warnings%': 0,
+
+ # Use the v8 provided v8::Platform implementation.
+ 'v8_use_default_platform%': 1,
},
'target_defaults': {
'conditions': [
@@ -85,6 +88,9 @@
['v8_enable_i18n_support==1', {
'defines': ['V8_I18N_SUPPORT',],
}],
+ ['v8_use_default_platform==1', {
+ 'defines': ['V8_USE_DEFAULT_PLATFORM',],
+ }],
['v8_compress_startup_data=="bz2"', {
'defines': [
'COMPRESS_STARTUP_DATA_BZ2',
@@ -109,7 +115,7 @@
'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
- 'v8_enable_handle_zapping%': 0,
+ 'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_extra_checks==1', {
diff --git a/deps/v8/build/gyp_v8 b/deps/v8/build/gyp_v8
index 92e6503925..f2a60d1b2c 100755
--- a/deps/v8/build/gyp_v8
+++ b/deps/v8/build/gyp_v8
@@ -36,13 +36,8 @@ import platform
import shlex
import sys
-script_dir = os.path.dirname(__file__)
-v8_root = os.path.normpath(os.path.join(script_dir, os.pardir))
-
-if __name__ == '__main__':
- os.chdir(v8_root)
- script_dir = os.path.dirname(__file__)
- v8_root = '.'
+script_dir = os.path.dirname(os.path.realpath(__file__))
+v8_root = os.path.abspath(os.path.join(script_dir, os.pardir))
sys.path.insert(0, os.path.join(v8_root, 'build', 'gyp', 'pylib'))
import gyp
@@ -142,10 +137,7 @@ if __name__ == '__main__':
# path separators even on Windows due to the use of shlex.split().
args.extend(shlex.split(gyp_file))
else:
- # Note that this must not start with "./" or things break.
- # So we rely on having done os.chdir(v8_root) above and use the
- # relative path.
- args.append(os.path.join('build', 'all.gyp'))
+ args.append(os.path.join(script_dir, 'all.gyp'))
args.extend(['-I' + i for i in additional_include_files(args)])
@@ -153,7 +145,7 @@ if __name__ == '__main__':
args.append('--no-circular-check')
# Set the GYP DEPTH variable to the root of the V8 project.
- args.append('--depth=' + v8_root)
+ args.append('--depth=' + os.path.relpath(v8_root))
# If V8_GYP_SYNTAX_CHECK is set to 1, it will invoke gyp with --check
# to enfore syntax checking.
@@ -167,5 +159,12 @@ if __name__ == '__main__':
# Generate for the architectures supported on the given platform.
gyp_args = list(args)
if platform.system() == 'Linux':
+ # Work around for crbug.com/331475.
+ for f in glob.glob(os.path.join(v8_root, 'out', 'Makefile.*')):
+ os.unlink(f)
+ # --generator-output defines where the Makefile goes.
gyp_args.append('--generator-output=out')
+ # -Goutput_dir defines where the build output goes, relative to the
+ # Makefile. Set it to . so that the build output doesn't end up in out/out.
+ gyp_args.append('-Goutput_dir=.')
run_gyp(gyp_args)
diff --git a/deps/v8/build/gyp_v8.py b/deps/v8/build/gyp_v8.py
new file mode 100644
index 0000000000..462ee674ac
--- /dev/null
+++ b/deps/v8/build/gyp_v8.py
@@ -0,0 +1,41 @@
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This file is (possibly, depending on python version) imported by
+# gyp_v8 when GYP_PARALLEL=1 and it creates sub-processes through the
+# multiprocessing library.
+
+# Importing in Python 2.6 (fixed in 2.7) on Windows doesn't search for imports
+# that don't end in .py (and aren't directories with an __init__.py). This
+# wrapper makes "import gyp_v8" work with those old versions and makes it
+# possible to execute gyp_v8.py directly on Windows where the extension is
+# useful.
+
+import os
+
+path = os.path.abspath(os.path.split(__file__)[0])
+execfile(os.path.join(path, 'gyp_v8'))
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index 4cb5e00bcc..cae63fe7ac 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -37,6 +37,7 @@
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1,
+ 'v8_deprecation_warnings': 1,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'variables': {
@@ -44,14 +45,18 @@
'variables': {
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
- OS=="netbsd" or OS=="mac"', {
+ OS=="netbsd" or OS=="mac" or OS=="qnx"', {
# This handles the Unix platforms we generally deal with.
# Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch
# to gyp.
'host_arch%':
'<!(uname -m | sed -e "s/i.86/ia32/;\
- s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mipsel/")',
+ s/x86_64/x64/;\
+ s/amd64/x64/;\
+ s/aarch64/a64/;\
+ s/arm.*/arm/;\
+ s/mips.*/mipsel/")',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac"
@@ -96,9 +101,10 @@
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
+ (v8_target_arch=="a64" and host_arch!="a64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
- (OS=="android")', {
+ (OS=="android" or OS=="qnx")', {
'want_separate_host_toolset': 1,
}, {
'want_separate_host_toolset': 0,
@@ -116,7 +122,7 @@
}],
],
# Default ARM variable settings.
- 'armv7%': 'default',
+ 'arm_version%': 'default',
'arm_neon%': 0,
'arm_fpu%': 'vfpv3',
'arm_float_abi%': 'default',
@@ -185,6 +191,32 @@
}],
# 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"
# or OS=="netbsd"'
+ ['OS=="qnx"', {
+ 'target_defaults': {
+ 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
+ '-fno-exceptions' ],
+ 'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti' ],
+ 'conditions': [
+ [ 'visibility=="hidden"', {
+ 'cflags': [ '-fvisibility=hidden' ],
+ }],
+ [ 'component=="shared_library"', {
+ 'cflags': [ '-fPIC' ],
+ }],
+ ],
+ 'target_conditions': [
+ [ '_toolset=="host" and host_os=="linux"', {
+ 'cflags': [ '-pthread' ],
+ 'ldflags': [ '-pthread' ],
+ 'libraries': [ '-lrt' ],
+ }],
+ [ '_toolset=="target"', {
+ 'cflags': [ '-Wno-psabi' ],
+ 'libraries': [ '-lbacktrace', '-lsocket', '-lm' ],
+ }],
+ ],
+ },
+ }], # OS=="qnx"
['OS=="win"', {
'target_defaults': {
'defines': [
@@ -272,8 +304,6 @@
'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES',
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden
'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics
- 'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES', # -Werror
- 'GCC_VERSION': 'com.apple.compilers.llvmgcc42',
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
'GCC_WARN_NON_VIRTUAL_DESTRUCTOR': 'YES', # -Wnon-virtual-dtor
# MACOSX_DEPLOYMENT_TARGET maps to -mmacosx-version-min
@@ -291,6 +321,13 @@
'-Wno-unused-parameter',
],
},
+ 'conditions': [
+ ['werror==""', {
+ 'xcode_settings': {'GCC_TREAT_WARNINGS_AS_ERRORS': 'NO'},
+ }, {
+ 'xcode_settings': {'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES'},
+ }],
+ ],
'target_conditions': [
['_type!="static_library"', {
'xcode_settings': {'OTHER_LDFLAGS': ['-Wl,-search_paths_first']},
diff --git a/deps/v8/build/toolchain.gypi b/deps/v8/build/toolchain.gypi
index de41fe0d00..c2a3300733 100644
--- a/deps/v8/build/toolchain.gypi
+++ b/deps/v8/build/toolchain.gypi
@@ -70,6 +70,11 @@
# it's handled in build/standalone.gypi.
'want_separate_host_toolset%': 1,
+ # Toolset the d8 binary should be compiled for. Possible values are 'host'
+ # and 'target'. If you want to run v8 tests, it needs to be set to 'target'.
+ # The setting is ignored if want_separate_host_toolset is 0.
+ 'v8_toolset_for_d8%': 'target',
+
'host_os%': '<(OS)',
'werror%': '-Werror',
# For a shared library build, results in "libv8-<(soname_version).so".
@@ -92,10 +97,10 @@
'conditions': [
['armcompiler=="yes"', {
'conditions': [
- [ 'armv7==1', {
+ [ 'arm_version==7', {
'cflags': ['-march=armv7-a',],
}],
- [ 'armv7==1 or armv7=="default"', {
+ [ 'arm_version==7 or arm_version=="default"', {
'conditions': [
[ 'arm_neon==1', {
'cflags': ['-mfpu=neon',],
@@ -127,7 +132,7 @@
}, {
# armcompiler=="no"
'conditions': [
- [ 'armv7==1 or armv7=="default"', {
+ [ 'arm_version==7 or arm_version=="default"', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
@@ -180,10 +185,10 @@
'conditions': [
['armcompiler=="yes"', {
'conditions': [
- [ 'armv7==1', {
+ [ 'arm_version==7', {
'cflags': ['-march=armv7-a',],
}],
- [ 'armv7==1 or armv7=="default"', {
+ [ 'arm_version==7 or arm_version=="default"', {
'conditions': [
[ 'arm_neon==1', {
'cflags': ['-mfpu=neon',],
@@ -215,7 +220,7 @@
}, {
# armcompiler=="no"
'conditions': [
- [ 'armv7==1 or armv7=="default"', {
+ [ 'arm_version==7 or arm_version=="default"', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
@@ -263,6 +268,11 @@
}], # _toolset=="target"
],
}], # v8_target_arch=="arm"
+ ['v8_target_arch=="a64"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_A64',
+ ],
+ }],
['v8_target_arch=="ia32"', {
'defines': [
'V8_TARGET_ARCH_IA32',
@@ -357,7 +367,7 @@
},
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
- or OS=="netbsd"', {
+ or OS=="netbsd" or OS=="qnx"', {
'conditions': [
[ 'v8_no_strict_aliasing==1', {
'cflags': [ '-fno-strict-aliasing' ],
@@ -368,7 +378,7 @@
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
- or OS=="netbsd" or OS=="mac" or OS=="android") and \
+ or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="mipsel")', {
# Check whether the host compiler and target compiler support the
@@ -376,7 +386,7 @@
'target_conditions': [
['_toolset=="host"', {
'variables': {
- 'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
+ 'm32flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
@@ -386,11 +396,11 @@
}],
['_toolset=="target"', {
'variables': {
- 'm32flag': '<!((echo | $(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
+ 'm32flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)',
'clang%': 0,
},
'conditions': [
- ['(OS!="android" or clang==1) and \
+ ['((OS!="android" and OS!="qnx") or clang==1) and \
nacl_target_arch!="nacl_x64"', {
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
@@ -402,20 +412,21 @@
}],
],
}],
- ['(OS=="linux") and (v8_target_arch=="x64")', {
+ ['(OS=="linux" or OS=="android") and \
+ (v8_target_arch=="x64" or v8_target_arch=="a64")', {
# Check whether the host compiler and target compiler support the
# '-m64' option and set it if so.
'target_conditions': [
['_toolset=="host"', {
'variables': {
- 'm64flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m64 -E - > /dev/null 2>&1) && echo "-m64" || true)',
+ 'm64flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
},
'cflags': [ '<(m64flag)' ],
'ldflags': [ '<(m64flag)' ],
}],
['_toolset=="target"', {
'variables': {
- 'm64flag': '<!((echo | $(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1) && echo "-m64" || true)',
+ 'm64flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
},
'cflags': [ '<(m64flag)' ],
'ldflags': [ '<(m64flag)' ],
@@ -504,10 +515,12 @@
},
},
'conditions': [
- ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
+ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
+ OS=="qnx"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual',
- '<(wno_array_bounds)' ],
+ '<(wno_array_bounds)',
+ ],
'conditions': [
['v8_optimized_debug==0', {
'cflags!': [
diff --git a/deps/v8/codereview.settings b/deps/v8/codereview.settings
new file mode 100644
index 0000000000..3f642f13bd
--- /dev/null
+++ b/deps/v8/codereview.settings
@@ -0,0 +1,7 @@
+CODE_REVIEW_SERVER: https://codereview.chromium.org
+CC_LIST: v8-dev@googlegroups.com
+VIEW_VC: https://code.google.com/p/v8/source/detail?r=
+STATUS: http://v8-status.appspot.com/status
+TRY_ON_UPLOAD: False
+TRYSERVER_SVN_URL: svn://svn.chromium.org/chrome-try-v8
+TRYSERVER_ROOT: v8
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
new file mode 100644
index 0000000000..75fddd59a8
--- /dev/null
+++ b/deps/v8/include/v8-platform.h
@@ -0,0 +1,86 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_V8_PLATFORM_H_
+#define V8_V8_PLATFORM_H_
+
+#include "v8.h"
+
+namespace v8 {
+
+/**
+ * A Task represents a unit of work.
+ */
+class Task {
+ public:
+ virtual ~Task() {}
+
+ virtual void Run() = 0;
+};
+
+/**
+ * V8 Platform abstraction layer.
+ *
+ * The embedder has to provide an implementation of this interface before
+ * initializing the rest of V8.
+ */
+class Platform {
+ public:
+ /**
+ * This enum is used to indicate whether a task is potentially long running,
+ * or causes a long wait. The embedder might want to use this hint to decide
+ * whether to execute the task on a dedicated thread.
+ */
+ enum ExpectedRuntime {
+ kShortRunningTask,
+ kLongRunningTask
+ };
+
+ /**
+ * Schedules a task to be invoked on a background thread. |expected_runtime|
+ * indicates that the task will run a long time. The Platform implementation
+ * takes ownership of |task|. There is no guarantee about order of execution
+ * of tasks wrt order of scheduling, nor is there a guarantee about the
+ * thread the task will be run on.
+ */
+ virtual void CallOnBackgroundThread(Task* task,
+ ExpectedRuntime expected_runtime) = 0;
+
+ /**
+ * Schedules a task to be invoked on a foreground thread wrt a specific
+ * |isolate|. Tasks posted for the same isolate should be execute in order of
+ * scheduling. The definition of "foreground" is opaque to V8.
+ */
+ virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0;
+
+ protected:
+ virtual ~Platform() {}
+};
+
+} // namespace v8
+
+#endif // V8_V8_PLATFORM_H_
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 0882d64527..59c2d5dacd 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -96,9 +96,6 @@ class V8_EXPORT CpuProfileNode {
*/
class V8_EXPORT CpuProfile {
public:
- /** Returns CPU profile UID (assigned by the profiler.) */
- unsigned GetUid() const;
-
/** Returns CPU profile title. */
Handle<String> GetTitle() const;
@@ -132,10 +129,6 @@ class V8_EXPORT CpuProfile {
/**
* Deletes the profile and removes it from CpuProfiler's list.
* All pointers to nodes previously returned become invalid.
- * Profiles with the same uid but obtained using different
- * security token are not deleted, but become inaccessible
- * using FindProfile method. It is embedder's responsibility
- * to call Delete on these profiles.
*/
void Delete();
};
@@ -155,15 +148,6 @@ class V8_EXPORT CpuProfiler {
void SetSamplingInterval(int us);
/**
- * Returns the number of profiles collected (doesn't include
- * profiles that are being collected at the moment of call.)
- */
- int GetProfileCount();
-
- /** Returns a profile by index. */
- const CpuProfile* GetCpuProfile(int index);
-
- /**
* Starts collecting CPU profile. Title may be an empty string. It
* is allowed to have several profiles being collected at
* once. Attempts to start collecting several profiles with the same
@@ -183,13 +167,6 @@ class V8_EXPORT CpuProfiler {
const CpuProfile* StopCpuProfiling(Handle<String> title);
/**
- * Deletes all existing profiles, also cancelling all profiling
- * activity. All previously returned pointers to profiles and their
- * contents become invalid after this call.
- */
- void DeleteAllCpuProfiles();
-
- /**
* Tells the profiler whether the embedder is idle.
*/
void SetIdle(bool is_idle);
@@ -280,19 +257,17 @@ class V8_EXPORT HeapGraphNode {
SnapshotObjectId GetId() const;
/** Returns node's own size, in bytes. */
- int GetSelfSize() const;
+ V8_DEPRECATED("Use GetShallowSize instead",
+ int GetSelfSize() const);
+
+ /** Returns node's own size, in bytes. */
+ size_t GetShallowSize() const;
/** Returns child nodes count of the node. */
int GetChildrenCount() const;
/** Retrieves a child by index. */
const HeapGraphEdge* GetChild(int index) const;
-
- /**
- * Finds and returns a value from the heap corresponding to this node,
- * if the value is still reachable.
- */
- Handle<Value> GetHeapValue() const;
};
@@ -393,6 +368,19 @@ class V8_EXPORT HeapProfiler {
SnapshotObjectId GetObjectId(Handle<Value> value);
/**
+ * Returns heap object with given SnapshotObjectId if the object is alive,
+ * otherwise empty handle is returned.
+ */
+ Handle<Value> FindObjectById(SnapshotObjectId id);
+
+ /**
+ * Clears internal map from SnapshotObjectId to heap object. The new objects
+ * will not be added into it unless a heap snapshot is taken or heap object
+ * tracking is kicked off.
+ */
+ void ClearObjectIds();
+
+ /**
* A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
* it in case heap profiler cannot find id for the object passed as
* parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
@@ -425,8 +413,12 @@ class V8_EXPORT HeapProfiler {
* Starts tracking of heap objects population statistics. After calling
* this method, all heap objects relocations done by the garbage collector
* are being registered.
+ *
+ * |track_allocations| parameter controls whether stack trace of each
+ * allocation in the heap will be recorded and reported as part of
+ * HeapSnapshot.
*/
- void StartTrackingHeapObjects();
+ void StartTrackingHeapObjects(bool track_allocations = false);
/**
* Adds a new time interval entry to the aggregated statistics array. The
@@ -475,19 +467,6 @@ class V8_EXPORT HeapProfiler {
*/
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
- /**
- * Starts recording JS allocations immediately as they arrive and tracking of
- * heap objects population statistics.
- */
- void StartRecordingHeapAllocations();
-
- /**
- * Stops recording JS allocations and tracking of heap objects population
- * statistics, cleans all collected heap objects population statistics data.
- */
- void StopRecordingHeapAllocations();
-
-
private:
HeapProfiler();
~HeapProfiler();
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 44a74ed5fe..dd8f2685bc 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -105,6 +105,7 @@ class NumberObject;
class Object;
class ObjectOperationDescriptor;
class ObjectTemplate;
+class Platform;
class Primitive;
class RawOperationDescriptor;
class Signature;
@@ -114,6 +115,7 @@ class String;
class StringObject;
class Symbol;
class SymbolObject;
+class Private;
class Uint32;
class Utils;
class Value;
@@ -121,8 +123,10 @@ template <class T> class Handle;
template <class T> class Local;
template <class T> class Eternal;
template<class T> class NonCopyablePersistentTraits;
+template<class T> class PersistentBase;
template<class T,
class M = NonCopyablePersistentTraits<T> > class Persistent;
+template<class T> class UniquePersistent;
template<class T, class P> class WeakCallbackObject;
class FunctionTemplate;
class ObjectTemplate;
@@ -255,17 +259,17 @@ template <class T> class Handle {
* The handles' references are not checked.
*/
template <class S> V8_INLINE bool operator==(const Handle<S>& that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(**this);
- internal::Object** b = reinterpret_cast<internal::Object**>(*that);
+ internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
+ internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
if (a == 0) return b == 0;
if (b == 0) return false;
return *a == *b;
}
template <class S> V8_INLINE bool operator==(
- const Persistent<S>& that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(**this);
- internal::Object** b = reinterpret_cast<internal::Object**>(*that);
+ const PersistentBase<S>& that) const {
+ internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
+ internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
if (a == 0) return b == 0;
if (b == 0) return false;
return *a == *b;
@@ -302,7 +306,8 @@ template <class T> class Handle {
V8_INLINE static Handle<T> New(Isolate* isolate, Handle<T> that) {
return New(isolate, that.val_);
}
- V8_INLINE static Handle<T> New(Isolate* isolate, const Persistent<T>& that) {
+ V8_INLINE static Handle<T> New(Isolate* isolate,
+ const PersistentBase<T>& that) {
return New(isolate, that.val_);
}
@@ -318,6 +323,8 @@ template <class T> class Handle {
private:
friend class Utils;
template<class F, class M> friend class Persistent;
+ template<class F> friend class PersistentBase;
+ template<class F> friend class Handle;
template<class F> friend class Local;
template<class F> friend class FunctionCallbackInfo;
template<class F> friend class PropertyCallbackInfo;
@@ -328,6 +335,8 @@ template <class T> class Handle {
friend Handle<Boolean> False(Isolate* isolate);
friend class Context;
friend class HandleScope;
+ friend class Object;
+ friend class Private;
V8_INLINE static Handle<T> New(Isolate* isolate, T* that);
@@ -379,9 +388,8 @@ template <class T> class Local : public Handle<T> {
* the original handle is destroyed/disposed.
*/
V8_INLINE static Local<T> New(Isolate* isolate, Handle<T> that);
- template<class M>
V8_INLINE static Local<T> New(Isolate* isolate,
- const Persistent<T, M>& that);
+ const PersistentBase<T>& that);
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
@@ -392,8 +400,10 @@ template <class T> class Local : public Handle<T> {
private:
friend class Utils;
template<class F> friend class Eternal;
+ template<class F> friend class PersistentBase;
template<class F, class M> friend class Persistent;
template<class F> friend class Handle;
+ template<class F> friend class Local;
template<class F> friend class FunctionCallbackInfo;
template<class F> friend class PropertyCallbackInfo;
friend class String;
@@ -445,15 +455,134 @@ class WeakCallbackData {
};
-// TODO(dcarney): Remove this class.
-template<typename T,
- typename P,
- typename M = NonCopyablePersistentTraits<T> >
-class WeakReferenceCallbacks {
+/**
+ * An object reference that is independent of any handle scope. Where
+ * a Local handle only lives as long as the HandleScope in which it was
+ * allocated, a PersistentBase handle remains valid until it is explicitly
+ * disposed.
+ *
+ * A persistent handle contains a reference to a storage cell within
+ * the v8 engine which holds an object value and which is updated by
+ * the garbage collector whenever the object is moved. A new storage
+ * cell can be created using the constructor or PersistentBase::Reset and
+ * existing handles can be disposed using PersistentBase::Reset.
+ *
+ */
+template <class T> class PersistentBase {
public:
- typedef void (*Revivable)(Isolate* isolate,
- Persistent<T, M>* object,
- P* parameter);
+ /**
+ * If non-empty, destroy the underlying storage cell
+ * IsEmpty() will return true after this call.
+ */
+ V8_INLINE void Reset();
+ /**
+ * If non-empty, destroy the underlying storage cell
+ * and create a new one with the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const Handle<S>& other);
+
+ /**
+ * If non-empty, destroy the underlying storage cell
+ * and create a new one with the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const PersistentBase<S>& other);
+
+ V8_INLINE bool IsEmpty() const { return val_ == 0; }
+
+ template <class S>
+ V8_INLINE bool operator==(const PersistentBase<S>& that) const {
+ internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
+ internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
+ if (a == 0) return b == 0;
+ if (b == 0) return false;
+ return *a == *b;
+ }
+
+ template <class S> V8_INLINE bool operator==(const Handle<S>& that) const {
+ internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
+ internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
+ if (a == 0) return b == 0;
+ if (b == 0) return false;
+ return *a == *b;
+ }
+
+ template <class S>
+ V8_INLINE bool operator!=(const PersistentBase<S>& that) const {
+ return !operator==(that);
+ }
+
+ template <class S> V8_INLINE bool operator!=(const Handle<S>& that) const {
+ return !operator==(that);
+ }
+
+ template<typename P>
+ V8_INLINE void SetWeak(
+ P* parameter,
+ typename WeakCallbackData<T, P>::Callback callback);
+
+ template<typename S, typename P>
+ V8_INLINE void SetWeak(
+ P* parameter,
+ typename WeakCallbackData<S, P>::Callback callback);
+
+ V8_INLINE void ClearWeak();
+
+ /**
+ * Marks the reference to this object independent. Garbage collector is free
+ * to ignore any object groups containing this object. Weak callback for an
+ * independent handle should not assume that it will be preceded by a global
+ * GC prologue callback or followed by a global GC epilogue callback.
+ */
+ V8_INLINE void MarkIndependent();
+
+ /**
+ * Marks the reference to this object partially dependent. Partially dependent
+ * handles only depend on other partially dependent handles and these
+ * dependencies are provided through object groups. It provides a way to build
+ * smaller object groups for young objects that represent only a subset of all
+ * external dependencies. This mark is automatically cleared after each
+ * garbage collection.
+ */
+ V8_INLINE void MarkPartiallyDependent();
+
+ V8_INLINE bool IsIndependent() const;
+
+ /** Checks if the handle holds the only reference to an object. */
+ V8_INLINE bool IsNearDeath() const;
+
+ /** Returns true if the handle's reference is weak. */
+ V8_INLINE bool IsWeak() const;
+
+ /**
+ * Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface
+ * description in v8-profiler.h for details.
+ */
+ V8_INLINE void SetWrapperClassId(uint16_t class_id);
+
+ /**
+ * Returns the class ID previously assigned to this handle or 0 if no class ID
+ * was previously assigned.
+ */
+ V8_INLINE uint16_t WrapperClassId() const;
+
+ private:
+ friend class Isolate;
+ friend class Utils;
+ template<class F> friend class Handle;
+ template<class F> friend class Local;
+ template<class F1, class F2> friend class Persistent;
+ template<class F> friend class UniquePersistent;
+ template<class F> friend class PersistentBase;
+ template<class F> friend class ReturnValue;
+
+ explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
+ PersistentBase(PersistentBase& other); // NOLINT
+ void operator=(PersistentBase&);
+ V8_INLINE static T* New(Isolate* isolate, T* that);
+
+ T* val_;
};
@@ -497,33 +626,26 @@ struct CopyablePersistentTraits {
/**
- * An object reference that is independent of any handle scope. Where
- * a Local handle only lives as long as the HandleScope in which it was
- * allocated, a Persistent handle remains valid until it is explicitly
- * disposed.
- *
- * A persistent handle contains a reference to a storage cell within
- * the v8 engine which holds an object value and which is updated by
- * the garbage collector whenever the object is moved. A new storage
- * cell can be created using the constructor or Persistent::Reset and
- * existing handles can be disposed using Persistent::Reset.
+ * A PersistentBase which allows copy and assignment.
*
* Copy, assignment and destructor bevavior is controlled by the traits
* class M.
+ *
+ * Note: Persistent class hierarchy is subject to future changes.
*/
-template <class T, class M> class Persistent {
+template <class T, class M> class Persistent : public PersistentBase<T> {
public:
/**
* A Persistent with no storage cell.
*/
- V8_INLINE Persistent() : val_(0) { }
+ V8_INLINE Persistent() : PersistentBase<T>(0) { }
/**
* Construct a Persistent from a Handle.
* When the Handle is non-empty, a new storage cell is created
* pointing to the same object, and no flags are set.
*/
template <class S> V8_INLINE Persistent(Isolate* isolate, Handle<S> that)
- : val_(New(isolate, *that)) {
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
TYPE_CHECK(T, S);
}
/**
@@ -533,7 +655,7 @@ template <class T, class M> class Persistent {
*/
template <class S, class M2>
V8_INLINE Persistent(Isolate* isolate, const Persistent<S, M2>& that)
- : val_(New(isolate, *that)) {
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
TYPE_CHECK(T, S);
}
/**
@@ -542,11 +664,11 @@ template <class T, class M> class Persistent {
* traits class is called, allowing the setting of flags based on the
* copied Persistent.
*/
- V8_INLINE Persistent(const Persistent& that) : val_(0) {
+ V8_INLINE Persistent(const Persistent& that) : PersistentBase<T>(0) {
Copy(that);
}
template <class S, class M2>
- V8_INLINE Persistent(const Persistent<S, M2>& that) : val_(0) {
+ V8_INLINE Persistent(const Persistent<S, M2>& that) : PersistentBase<T>(0) {
Copy(that);
}
V8_INLINE Persistent& operator=(const Persistent& that) { // NOLINT
@@ -564,32 +686,9 @@ template <class T, class M> class Persistent {
* can result in a memory leak, it is recommended to always set this flag.
*/
V8_INLINE ~Persistent() {
- if (M::kResetInDestructor) Reset();
+ if (M::kResetInDestructor) this->Reset();
}
- /**
- * If non-empty, destroy the underlying storage cell
- * IsEmpty() will return true after this call.
- */
- V8_INLINE void Reset();
- /**
- * If non-empty, destroy the underlying storage cell
- * and create a new one with the contents of other if other is non empty
- */
- template <class S>
- V8_INLINE void Reset(Isolate* isolate, const Handle<S>& other);
- /**
- * If non-empty, destroy the underlying storage cell
- * and create a new one with the contents of other if other is non empty
- */
- template <class S, class M2>
- V8_INLINE void Reset(Isolate* isolate, const Persistent<S, M2>& other);
-
- V8_DEPRECATED("Use Reset instead",
- V8_INLINE void Dispose()) { Reset(); }
-
- V8_INLINE bool IsEmpty() const { return val_ == 0; }
-
// TODO(dcarney): this is pretty useless, fix or remove
template <class S>
V8_INLINE static Persistent<T>& Cast(Persistent<S>& that) { // NOLINT
@@ -606,110 +705,17 @@ template <class T, class M> class Persistent {
return Persistent<S>::Cast(*this);
}
- template <class S, class M2>
- V8_INLINE bool operator==(const Persistent<S, M2>& that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(**this);
- internal::Object** b = reinterpret_cast<internal::Object**>(*that);
- if (a == 0) return b == 0;
- if (b == 0) return false;
- return *a == *b;
- }
-
- template <class S> V8_INLINE bool operator==(const Handle<S>& that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(**this);
- internal::Object** b = reinterpret_cast<internal::Object**>(*that);
- if (a == 0) return b == 0;
- if (b == 0) return false;
- return *a == *b;
- }
-
- template <class S, class M2>
- V8_INLINE bool operator!=(const Persistent<S, M2>& that) const {
- return !operator==(that);
- }
-
- template <class S> V8_INLINE bool operator!=(const Handle<S>& that) const {
- return !operator==(that);
- }
-
- template<typename P>
- V8_INLINE void SetWeak(
- P* parameter,
- typename WeakCallbackData<T, P>::Callback callback);
-
- template<typename S, typename P>
- V8_INLINE void SetWeak(
- P* parameter,
- typename WeakCallbackData<S, P>::Callback callback);
-
- template<typename S, typename P>
- V8_DEPRECATED(
- "Use SetWeak instead",
- V8_INLINE void MakeWeak(
- P* parameter,
- typename WeakReferenceCallbacks<S, P>::Revivable callback));
-
- template<typename P>
- V8_DEPRECATED(
- "Use SetWeak instead",
- V8_INLINE void MakeWeak(
- P* parameter,
- typename WeakReferenceCallbacks<T, P>::Revivable callback));
-
- V8_INLINE void ClearWeak();
-
- /**
- * Marks the reference to this object independent. Garbage collector is free
- * to ignore any object groups containing this object. Weak callback for an
- * independent handle should not assume that it will be preceded by a global
- * GC prologue callback or followed by a global GC epilogue callback.
- */
- V8_INLINE void MarkIndependent();
-
- /**
- * Marks the reference to this object partially dependent. Partially dependent
- * handles only depend on other partially dependent handles and these
- * dependencies are provided through object groups. It provides a way to build
- * smaller object groups for young objects that represent only a subset of all
- * external dependencies. This mark is automatically cleared after each
- * garbage collection.
- */
- V8_INLINE void MarkPartiallyDependent();
-
- V8_INLINE bool IsIndependent() const;
-
- /** Checks if the handle holds the only reference to an object. */
- V8_INLINE bool IsNearDeath() const;
-
- /** Returns true if the handle's reference is weak. */
- V8_INLINE bool IsWeak() const;
-
- /**
- * Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface
- * description in v8-profiler.h for details.
- */
- V8_INLINE void SetWrapperClassId(uint16_t class_id);
-
- /**
- * Returns the class ID previously assigned to this handle or 0 if no class ID
- * was previously assigned.
- */
- V8_INLINE uint16_t WrapperClassId() const;
-
- V8_DEPRECATED("This will be removed",
- V8_INLINE T* ClearAndLeak());
-
- V8_DEPRECATED("This will be removed",
- V8_INLINE void Clear()) { val_ = 0; }
+ // This will be removed.
+ V8_INLINE T* ClearAndLeak();
// TODO(dcarney): remove
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
private:
#endif
- template <class S> V8_INLINE Persistent(S* that) : val_(that) { }
+ template <class S> V8_INLINE Persistent(S* that) : PersistentBase<T>(that) { }
- V8_INLINE T* operator*() const { return val_; }
+ V8_INLINE T* operator*() const { return this->val_; }
private:
friend class Isolate;
@@ -719,13 +725,81 @@ template <class T, class M> class Persistent {
template<class F1, class F2> friend class Persistent;
template<class F> friend class ReturnValue;
- V8_INLINE static T* New(Isolate* isolate, T* that);
template<class S, class M2>
V8_INLINE void Copy(const Persistent<S, M2>& that);
+};
- T* val_;
+
+/**
+ * A PersistentBase which has move semantics.
+ *
+ * Note: Persistent class hierarchy is subject to future changes.
+ */
+template<class T>
+class UniquePersistent : public PersistentBase<T> {
+ struct RValue {
+ V8_INLINE explicit RValue(UniquePersistent* obj) : object(obj) {}
+ UniquePersistent* object;
+ };
+
+ public:
+ /**
+ * A UniquePersistent with no storage cell.
+ */
+ V8_INLINE UniquePersistent() : PersistentBase<T>(0) { }
+ /**
+ * Construct a UniquePersistent from a Handle.
+ * When the Handle is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S>
+ V8_INLINE UniquePersistent(Isolate* isolate, Handle<S> that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
+ TYPE_CHECK(T, S);
+ }
+ /**
+ * Construct a UniquePersistent from a PersistentBase.
+ * When the Persistent is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S>
+ V8_INLINE UniquePersistent(Isolate* isolate, const PersistentBase<S>& that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, that.val_)) {
+ TYPE_CHECK(T, S);
+ }
+ /**
+ * Move constructor.
+ */
+ V8_INLINE UniquePersistent(RValue rvalue)
+ : PersistentBase<T>(rvalue.object->val_) {
+ rvalue.object->val_ = 0;
+ }
+ V8_INLINE ~UniquePersistent() { this->Reset(); }
+ /**
+ * Move via assignment.
+ */
+ template<class S>
+ V8_INLINE UniquePersistent& operator=(UniquePersistent<S> rhs) {
+ TYPE_CHECK(T, S);
+ this->val_ = rhs.val_;
+ rhs.val_ = 0;
+ return *this;
+ }
+ /**
+ * Cast operator for moves.
+ */
+ V8_INLINE operator RValue() { return RValue(this); }
+ /**
+ * Pass allows returning uniques from functions, etc.
+ */
+ V8_INLINE UniquePersistent Pass() { return UniquePersistent(RValue(this)); }
+
+ private:
+ UniquePersistent(UniquePersistent&);
+ void operator=(UniquePersistent&);
};
+
/**
* A stack-allocated class that governs a number of local handles.
* After a handle scope has been created, all local handles will be
@@ -746,28 +820,28 @@ class V8_EXPORT HandleScope {
~HandleScope();
- template <class T>
- V8_DEPRECATED("Use EscapableHandleScope::Escape instead",
- Local<T> Close(Handle<T> value));
-
/**
* Counts the number of allocated handles.
*/
- static int NumberOfHandles();
+ static int NumberOfHandles(Isolate* isolate);
+
+ V8_INLINE Isolate* GetIsolate() const {
+ return reinterpret_cast<Isolate*>(isolate_);
+ }
+
+ protected:
+ V8_INLINE HandleScope() {}
+
+ void Initialize(Isolate* isolate);
- private:
- /**
- * Creates a new handle with the given value.
- */
static internal::Object** CreateHandle(internal::Isolate* isolate,
internal::Object* value);
- // Uses HeapObject to obtain the current Isolate.
+
+ private:
+ // Uses heap_object to obtain the current Isolate.
static internal::Object** CreateHandle(internal::HeapObject* heap_object,
internal::Object* value);
- V8_INLINE HandleScope() {}
- void Initialize(Isolate* isolate);
-
// Make it hard to create heap-allocated or illegal handle scopes by
// disallowing certain operations.
HandleScope(const HandleScope&);
@@ -775,35 +849,15 @@ class V8_EXPORT HandleScope {
void* operator new(size_t size);
void operator delete(void*, size_t);
- // This Data class is accessible internally as HandleScopeData through a
- // typedef in the ImplementationUtilities class.
- class V8_EXPORT Data {
- public:
- internal::Object** next;
- internal::Object** limit;
- int level;
- V8_INLINE void Initialize() {
- next = limit = NULL;
- level = 0;
- }
- };
-
- void Leave();
-
internal::Isolate* isolate_;
internal::Object** prev_next_;
internal::Object** prev_limit_;
- // TODO(dcarney): remove this field
- // Allow for the active closing of HandleScopes which allows to pass a handle
- // from the HandleScope being closed to the next top most HandleScope.
- bool is_closed_;
- internal::Object** RawClose(internal::Object** value);
-
- friend class ImplementationUtilities;
- friend class EscapableHandleScope;
- template<class F> friend class Handle;
+ // Local::New uses CreateHandle with an Isolate* parameter.
template<class F> friend class Local;
+
+ // Object::GetInternalField and Context::GetEmbedderData use CreateHandle with
+ // a HeapObject* in their shortcuts.
friend class Object;
friend class Context;
};
@@ -883,16 +937,6 @@ class V8_EXPORT ScriptData { // NOLINT
/**
* Pre-compiles the specified script (context-independent).
*
- * \param input Pointer to UTF-8 script source code.
- * \param length Length of UTF-8 script source code.
- */
- static ScriptData* PreCompile(Isolate* isolate,
- const char* input,
- int length);
-
- /**
- * Pre-compiles the specified script (context-independent).
- *
* NOTE: Pre-compilation using this method cannot happen on another thread
* without using Lockers.
*
@@ -1039,11 +1083,6 @@ class V8_EXPORT Script {
Local<Value> Run();
/**
- * Returns the script id value.
- */
- V8_DEPRECATED("Use GetId instead", Local<Value> Id());
-
- /**
* Returns the script id.
*/
int GetId();
@@ -1133,7 +1172,7 @@ class V8_EXPORT Message {
bool IsSharedCrossOrigin() const;
// TODO(1245381): Print to a string instead of on a FILE.
- static void PrintCurrentStackTrace(FILE* out);
+ static void PrintCurrentStackTrace(Isolate* isolate, FILE* out);
static const int kNoLineNumberInfo = 0;
static const int kNoColumnInfo = 0;
@@ -1188,6 +1227,7 @@ class V8_EXPORT StackTrace {
* StackFrame.
*/
static Local<StackTrace> CurrentStackTrace(
+ Isolate* isolate,
int frame_limit,
StackTraceOptions options = kOverview);
};
@@ -1520,7 +1560,7 @@ class V8_EXPORT Primitive : public Value { };
class V8_EXPORT Boolean : public Primitive {
public:
bool Value() const;
- V8_INLINE static Handle<Boolean> New(bool value);
+ V8_INLINE static Handle<Boolean> New(Isolate* isolate, bool value);
};
@@ -1588,7 +1628,11 @@ class V8_EXPORT String : public Primitive {
NO_OPTIONS = 0,
HINT_MANY_WRITES_EXPECTED = 1,
NO_NULL_TERMINATION = 2,
- PRESERVE_ASCII_NULL = 4
+ PRESERVE_ASCII_NULL = 4,
+ // Used by WriteUtf8 to replace orphan surrogate code units with the
+ // unicode replacement character. Needs to be set to guarantee valid UTF-8
+ // output.
+ REPLACE_INVALID_UTF8 = 8
};
// 16-bit character codes.
@@ -1610,7 +1654,6 @@ class V8_EXPORT String : public Primitive {
/**
* A zero length string.
*/
- static v8::Local<v8::String> Empty();
V8_INLINE static v8::Local<v8::String> Empty(Isolate* isolate);
/**
@@ -1726,30 +1769,6 @@ class V8_EXPORT String : public Primitive {
V8_INLINE static String* Cast(v8::Value* obj);
- /**
- * Allocates a new string from either UTF-8 encoded or ASCII data.
- * The second parameter 'length' gives the buffer length. If omitted,
- * the function calls 'strlen' to determine the buffer length.
- */
- V8_DEPRECATED(
- "Use NewFromOneByte instead",
- V8_INLINE static Local<String> New(const char* data, int length = -1));
-
- /** Allocates a new string from 16-bit character codes.*/
- V8_DEPRECATED(
- "Use NewFromTwoByte instead",
- V8_INLINE static Local<String> New(
- const uint16_t* data, int length = -1));
-
- /**
- * Creates an internalized string (historically called a "symbol",
- * not to be confused with ES6 symbols). Returns one if it exists already.
- */
- V8_DEPRECATED(
- "Use NewFromUtf8 instead",
- V8_INLINE static Local<String> NewSymbol(
- const char* data, int length = -1));
-
enum NewStringType {
kNormalString, kInternalizedString, kUndetectableString
};
@@ -1788,7 +1807,8 @@ class V8_EXPORT String : public Primitive {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
- static Local<String> NewExternal(ExternalStringResource* resource);
+ static Local<String> NewExternal(Isolate* isolate,
+ ExternalStringResource* resource);
/**
* Associate an external string resource with this string by transforming it
@@ -1809,7 +1829,8 @@ class V8_EXPORT String : public Primitive {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
- static Local<String> NewExternal(ExternalAsciiStringResource* resource);
+ static Local<String> NewExternal(Isolate* isolate,
+ ExternalAsciiStringResource* resource);
/**
* Associate an external string resource with this string by transforming it
@@ -1827,18 +1848,6 @@ class V8_EXPORT String : public Primitive {
*/
bool CanMakeExternal();
- /** Creates an undetectable string from the supplied ASCII or UTF-8 data.*/
- V8_DEPRECATED(
- "Use NewFromUtf8 instead",
- V8_INLINE static Local<String> NewUndetectable(const char* data,
- int length = -1));
-
- /** Creates an undetectable string from the supplied 16-bit character codes.*/
- V8_DEPRECATED(
- "Use NewFromTwoByte instead",
- V8_INLINE static Local<String> NewUndetectable(const uint16_t* data,
- int length = -1));
-
/**
* Converts an object to a UTF-8-encoded character array. Useful if
* you want to print the object. If conversion to a string fails
@@ -1863,30 +1872,6 @@ class V8_EXPORT String : public Primitive {
};
/**
- * Converts an object to an ASCII string.
- * Useful if you want to print the object.
- * If conversion to a string fails (eg. due to an exception in the toString()
- * method of the object) then the length() method returns 0 and the * operator
- * returns NULL.
- */
- class V8_EXPORT AsciiValue {
- public:
- V8_DEPRECATED("Use Utf8Value instead",
- explicit AsciiValue(Handle<v8::Value> obj));
- ~AsciiValue();
- char* operator*() { return str_; }
- const char* operator*() const { return str_; }
- int length() const { return length_; }
- private:
- char* str_;
- int length_;
-
- // Disallow copying and assigning.
- AsciiValue(const AsciiValue&);
- void operator=(const AsciiValue&);
- };
-
- /**
* Converts an object to a two-byte string.
* If conversion to a string fails (eg. due to an exception in the toString()
* method of the object) then the length() method returns 0 and the * operator
@@ -1926,11 +1911,9 @@ class V8_EXPORT Symbol : public Primitive {
// Returns the print name string of the symbol, or undefined if none.
Local<Value> Name() const;
- // Create a symbol without a print name.
- static Local<Symbol> New(Isolate* isolate);
-
- // Create a symbol with a print name.
- static Local<Symbol> New(Isolate *isolate, const char* data, int length = -1);
+ // Create a symbol. If data is not NULL, it will be used as a print name.
+ static Local<Symbol> New(
+ Isolate *isolate, const char* data = NULL, int length = -1);
V8_INLINE static Symbol* Cast(v8::Value* obj);
private:
@@ -1940,12 +1923,30 @@ class V8_EXPORT Symbol : public Primitive {
/**
+ * A private symbol
+ *
+ * This is an experimental feature. Use at your own risk.
+ */
+class V8_EXPORT Private : public Data {
+ public:
+ // Returns the print name string of the private symbol, or undefined if none.
+ Local<Value> Name() const;
+
+ // Create a private symbol. If data is not NULL, it will be the print name.
+ static Local<Private> New(
+ Isolate *isolate, const char* data = NULL, int length = -1);
+
+ private:
+ Private();
+};
+
+
+/**
* A JavaScript number value (ECMA-262, 4.3.20)
*/
class V8_EXPORT Number : public Primitive {
public:
double Value() const;
- static Local<Number> New(double value);
static Local<Number> New(Isolate* isolate, double value);
V8_INLINE static Number* Cast(v8::Value* obj);
private:
@@ -1959,10 +1960,8 @@ class V8_EXPORT Number : public Primitive {
*/
class V8_EXPORT Integer : public Number {
public:
- static Local<Integer> New(int32_t value);
- static Local<Integer> NewFromUnsigned(uint32_t value);
- static Local<Integer> New(int32_t value, Isolate*);
- static Local<Integer> NewFromUnsigned(uint32_t value, Isolate*);
+ static Local<Integer> New(Isolate* isolate, int32_t value);
+ static Local<Integer> NewFromUnsigned(Isolate* isolate, uint32_t value);
int64_t Value() const;
V8_INLINE static Integer* Cast(v8::Value* obj);
private:
@@ -2001,15 +2000,26 @@ enum PropertyAttribute {
};
enum ExternalArrayType {
- kExternalByteArray = 1,
- kExternalUnsignedByteArray,
- kExternalShortArray,
- kExternalUnsignedShortArray,
- kExternalIntArray,
- kExternalUnsignedIntArray,
- kExternalFloatArray,
- kExternalDoubleArray,
- kExternalPixelArray
+ kExternalInt8Array = 1,
+ kExternalUint8Array,
+ kExternalInt16Array,
+ kExternalUint16Array,
+ kExternalInt32Array,
+ kExternalUint32Array,
+ kExternalFloat32Array,
+ kExternalFloat64Array,
+ kExternalUint8ClampedArray,
+
+ // Legacy constant names
+ kExternalByteArray = kExternalInt8Array,
+ kExternalUnsignedByteArray = kExternalUint8Array,
+ kExternalShortArray = kExternalInt16Array,
+ kExternalUnsignedShortArray = kExternalUint16Array,
+ kExternalIntArray = kExternalInt32Array,
+ kExternalUnsignedIntArray = kExternalUint32Array,
+ kExternalFloatArray = kExternalFloat32Array,
+ kExternalDoubleArray = kExternalFloat64Array,
+ kExternalPixelArray = kExternalUint8ClampedArray
};
/**
@@ -2109,6 +2119,17 @@ class V8_EXPORT Object : public Value {
AccessControl settings = DEFAULT);
/**
+ * Functionality for private properties.
+ * This is an experimental feature, use at your own risk.
+ * Note: Private properties are inherited. Do not rely on this, since it may
+ * change.
+ */
+ bool HasPrivate(Handle<Private> key);
+ bool SetPrivate(Handle<Private> key, Handle<Value> value);
+ bool DeletePrivate(Handle<Private> key);
+ Local<Value> GetPrivate(Handle<Private> key);
+
+ /**
* Returns an array containing the names of the enumerable properties
* of this object, including properties from prototype objects. The
* array returned by this method contains the same values as would
@@ -2304,7 +2325,8 @@ class V8_EXPORT Object : public Value {
*/
Local<Value> CallAsConstructor(int argc, Handle<Value> argv[]);
- static Local<Object> New();
+ static Local<Object> New(Isolate* isolate);
+
V8_INLINE static Object* Cast(Value* obj);
private:
@@ -2332,7 +2354,7 @@ class V8_EXPORT Array : public Object {
* Creates a JavaScript array with the given length. If the length
* is negative the returned array will have length 0.
*/
- static Local<Array> New(int length = 0);
+ static Local<Array> New(Isolate* isolate, int length = 0);
V8_INLINE static Array* Cast(Value* obj);
private:
@@ -2501,14 +2523,15 @@ class V8_EXPORT Function : public Object {
bool IsBuiltin() const;
/**
- * Returns scriptId object.
+ * Returns scriptId.
*/
- V8_DEPRECATED("Use ScriptId instead", Handle<Value> GetScriptId()) const;
+ int ScriptId() const;
/**
- * Returns scriptId.
+ * Returns the original function if this function is bound, else returns
+ * v8::Undefined.
*/
- int ScriptId() const;
+ Local<Value> GetBoundFunction() const;
ScriptOrigin GetScriptOrigin() const;
V8_INLINE static Function* Cast(Value* obj);
@@ -2595,7 +2618,7 @@ class V8_EXPORT ArrayBuffer : public Object {
* will be deallocated when it is garbage-collected,
* unless the object is externalized.
*/
- static Local<ArrayBuffer> New(size_t byte_length);
+ static Local<ArrayBuffer> New(Isolate* isolate, size_t byte_length);
/**
* Create a new ArrayBuffer over an existing memory block.
@@ -2603,7 +2626,8 @@ class V8_EXPORT ArrayBuffer : public Object {
* The memory block will not be reclaimed when a created ArrayBuffer
* is garbage-collected.
*/
- static Local<ArrayBuffer> New(void* data, size_t byte_length);
+ static Local<ArrayBuffer> New(Isolate* isolate, void* data,
+ size_t byte_length);
/**
* Returns true if ArrayBuffer is extrenalized, that is, does not
@@ -2863,11 +2887,7 @@ class V8_EXPORT DataView : public ArrayBufferView {
*/
class V8_EXPORT Date : public Object {
public:
- static Local<Value> New(double time);
-
- V8_DEPRECATED(
- "Use ValueOf instead",
- double NumberValue()) const { return ValueOf(); }
+ static Local<Value> New(Isolate* isolate, double time);
/**
* A specialization of Value::NumberValue that is more efficient
@@ -2889,7 +2909,7 @@ class V8_EXPORT Date : public Object {
* This API should not be called more than needed as it will
* negatively impact the performance of date operations.
*/
- static void DateTimeConfigurationChangeNotification();
+ static void DateTimeConfigurationChangeNotification(Isolate* isolate);
private:
static void CheckCast(v8::Value* obj);
@@ -2901,15 +2921,8 @@ class V8_EXPORT Date : public Object {
*/
class V8_EXPORT NumberObject : public Object {
public:
- static Local<Value> New(double value);
-
- V8_DEPRECATED(
- "Use ValueOf instead",
- double NumberValue()) const { return ValueOf(); }
+ static Local<Value> New(Isolate* isolate, double value);
- /**
- * Returns the Number held by the object.
- */
double ValueOf() const;
V8_INLINE static NumberObject* Cast(v8::Value* obj);
@@ -2926,13 +2939,6 @@ class V8_EXPORT BooleanObject : public Object {
public:
static Local<Value> New(bool value);
- V8_DEPRECATED(
- "Use ValueOf instead",
- bool BooleanValue()) const { return ValueOf(); }
-
- /**
- * Returns the Boolean held by the object.
- */
bool ValueOf() const;
V8_INLINE static BooleanObject* Cast(v8::Value* obj);
@@ -2949,13 +2955,6 @@ class V8_EXPORT StringObject : public Object {
public:
static Local<Value> New(Handle<String> value);
- V8_DEPRECATED(
- "Use ValueOf instead",
- Local<String> StringValue()) const { return ValueOf(); }
-
- /**
- * Returns the String held by the object.
- */
Local<String> ValueOf() const;
V8_INLINE static StringObject* Cast(v8::Value* obj);
@@ -2974,13 +2973,6 @@ class V8_EXPORT SymbolObject : public Object {
public:
static Local<Value> New(Isolate* isolate, Handle<Symbol> value);
- V8_DEPRECATED(
- "Use ValueOf instead",
- Local<Symbol> SymbolValue()) const { return ValueOf(); }
-
- /**
- * Returns the Symbol held by the object.
- */
Local<Symbol> ValueOf() const;
V8_INLINE static SymbolObject* Cast(v8::Value* obj);
@@ -3042,7 +3034,7 @@ class V8_EXPORT RegExp : public Object {
*/
class V8_EXPORT External : public Value {
public:
- static Local<External> New(void* value);
+ static Local<External> New(Isolate* isolate, void* value);
V8_INLINE static External* Cast(Value* obj);
void* Value() const;
private:
@@ -3061,7 +3053,7 @@ class V8_EXPORT Template : public Data {
/** Adds a property to each instance created by this template.*/
void Set(Handle<String> name, Handle<Data> value,
PropertyAttribute attributes = None);
- V8_INLINE void Set(const char* name, Handle<Data> value);
+ V8_INLINE void Set(Isolate* isolate, const char* name, Handle<Data> value);
void SetAccessorProperty(
Local<String> name,
@@ -3344,6 +3336,7 @@ class V8_EXPORT FunctionTemplate : public Template {
public:
/** Creates a function template.*/
static Local<FunctionTemplate> New(
+ Isolate* isolate,
FunctionCallback callback = 0,
Handle<Value> data = Handle<Value>(),
Handle<Signature> signature = Handle<Signature>(),
@@ -3430,6 +3423,8 @@ class V8_EXPORT FunctionTemplate : public Template {
class V8_EXPORT ObjectTemplate : public Template {
public:
/** Creates an ObjectTemplate. */
+ static Local<ObjectTemplate> New(Isolate* isolate);
+ // Will be deprecated soon.
static Local<ObjectTemplate> New();
/** Creates a new instance of this template.*/
@@ -3571,7 +3566,8 @@ class V8_EXPORT ObjectTemplate : public Template {
private:
ObjectTemplate();
- static Local<ObjectTemplate> New(Handle<FunctionTemplate> constructor);
+ static Local<ObjectTemplate> New(internal::Isolate* isolate,
+ Handle<FunctionTemplate> constructor);
friend class FunctionTemplate;
};
@@ -3582,10 +3578,12 @@ class V8_EXPORT ObjectTemplate : public Template {
*/
class V8_EXPORT Signature : public Data {
public:
- static Local<Signature> New(Handle<FunctionTemplate> receiver =
+ static Local<Signature> New(Isolate* isolate,
+ Handle<FunctionTemplate> receiver =
Handle<FunctionTemplate>(),
int argc = 0,
Handle<FunctionTemplate> argv[] = 0);
+
private:
Signature();
};
@@ -3597,8 +3595,10 @@ class V8_EXPORT Signature : public Data {
*/
class V8_EXPORT AccessorSignature : public Data {
public:
- static Local<AccessorSignature> New(Handle<FunctionTemplate> receiver =
+ static Local<AccessorSignature> New(Isolate* isolate,
+ Handle<FunctionTemplate> receiver =
Handle<FunctionTemplate>());
+
private:
AccessorSignature();
};
@@ -3702,8 +3702,8 @@ class V8_EXPORT Extension { // NOLINT
const char** deps = 0,
int source_length = -1);
virtual ~Extension() { }
- virtual v8::Handle<v8::FunctionTemplate>
- GetNativeFunction(v8::Handle<v8::String> name) {
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Handle<v8::String> name) {
return v8::Handle<v8::FunctionTemplate>();
}
@@ -3733,25 +3733,8 @@ class V8_EXPORT Extension { // NOLINT
void V8_EXPORT RegisterExtension(Extension* extension);
-/**
- * Ignore
- */
-class V8_EXPORT DeclareExtension {
- public:
- V8_INLINE DeclareExtension(Extension* extension) {
- RegisterExtension(extension);
- }
-};
-
-
// --- Statics ---
-
-Handle<Primitive> V8_EXPORT Undefined();
-Handle<Primitive> V8_EXPORT Null();
-Handle<Boolean> V8_EXPORT True();
-Handle<Boolean> V8_EXPORT False();
-
V8_INLINE Handle<Primitive> Undefined(Isolate* isolate);
V8_INLINE Handle<Primitive> Null(Isolate* isolate);
V8_INLINE Handle<Boolean> True(Isolate* isolate);
@@ -3770,28 +3753,48 @@ V8_INLINE Handle<Boolean> False(Isolate* isolate);
class V8_EXPORT ResourceConstraints {
public:
ResourceConstraints();
+
+ /**
+ * Configures the constraints with reasonable default values based on the
+ * capabilities of the current device the VM is running on.
+ *
+ * \param physical_memory The total amount of physical memory on the current
+ * device, in bytes.
+ * \param number_of_processors The number of CPUs available on the current
+ * device.
+ */
+ void ConfigureDefaults(uint64_t physical_memory,
+ uint32_t number_of_processors);
+
int max_young_space_size() const { return max_young_space_size_; }
void set_max_young_space_size(int value) { max_young_space_size_ = value; }
int max_old_space_size() const { return max_old_space_size_; }
void set_max_old_space_size(int value) { max_old_space_size_ = value; }
- int max_executable_size() { return max_executable_size_; }
+ int max_executable_size() const { return max_executable_size_; }
void set_max_executable_size(int value) { max_executable_size_ = value; }
uint32_t* stack_limit() const { return stack_limit_; }
// Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
+ int max_available_threads() const { return max_available_threads_; }
+ // Set the number of threads available to V8, assuming at least 1.
+ void set_max_available_threads(int value) {
+ max_available_threads_ = value;
+ }
private:
int max_young_space_size_;
int max_old_space_size_;
int max_executable_size_;
uint32_t* stack_limit_;
+ int max_available_threads_;
};
/**
- * Sets the given ResourceConstraints on the current isolate.
+ * Sets the given ResourceConstraints on the given Isolate.
*/
-bool V8_EXPORT SetResourceConstraints(ResourceConstraints* constraints);
+bool V8_EXPORT SetResourceConstraints(Isolate* isolate,
+ ResourceConstraints* constraints);
// --- Exceptions ---
@@ -3803,10 +3806,6 @@ typedef void (*FatalErrorCallback)(const char* location, const char* message);
typedef void (*MessageCallback)(Handle<Message> message, Handle<Value> error);
-V8_DEPRECATED(
- "Use Isolate::ThrowException instead",
- Handle<Value> V8_EXPORT ThrowException(Handle<Value> exception));
-
/**
* Create new error objects by calling the corresponding error object
* constructor with the message.
@@ -3890,12 +3889,15 @@ enum GCType {
enum GCCallbackFlags {
kNoGCCallbackFlags = 0,
kGCCallbackFlagCompacted = 1 << 0,
- kGCCallbackFlagConstructRetainedObjectInfos = 1 << 1
+ kGCCallbackFlagConstructRetainedObjectInfos = 1 << 1,
+ kGCCallbackFlagForced = 1 << 2
};
typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags);
typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags);
+typedef void (*InterruptCallback)(Isolate* isolate, void* data);
+
/**
* Collection of V8 heap information.
@@ -3958,6 +3960,15 @@ class V8_EXPORT Isolate {
};
/**
+ * Types of garbage collections that can be requested via
+ * RequestGarbageCollectionForTesting.
+ */
+ enum GarbageCollectionType {
+ kFullGarbageCollection,
+ kMinorGarbageCollection
+ };
+
+ /**
* Creates a new isolate. Does not change the currently entered
* isolate.
*
@@ -4000,15 +4011,22 @@ class V8_EXPORT Isolate {
void Dispose();
/**
- * Associate embedder-specific data with the isolate
+ * Associate embedder-specific data with the isolate. |slot| has to be
+ * between 0 and GetNumberOfDataSlots() - 1.
*/
- V8_INLINE void SetData(void* data);
+ V8_INLINE void SetData(uint32_t slot, void* data);
/**
* Retrieve embedder-specific data from the isolate.
- * Returns NULL if SetData has never been called.
+ * Returns NULL if SetData has never been called for the given |slot|.
+ */
+ V8_INLINE void* GetData(uint32_t slot);
+
+ /**
+ * Returns the maximum number of available embedder data slots. Valid slots
+ * are in the range of 0 - GetNumberOfDataSlots() - 1.
*/
- V8_INLINE void* GetData();
+ V8_INLINE static uint32_t GetNumberOfDataSlots();
/**
* Get statistics about the heap memory usage.
@@ -4028,7 +4046,7 @@ class V8_EXPORT Isolate {
* kept alive by JavaScript objects.
* \returns the adjusted value.
*/
- intptr_t AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes);
+ int64_t AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes);
/**
* Returns heap profiler for this isolate. Will return NULL until the isolate
@@ -4145,6 +4163,34 @@ class V8_EXPORT Isolate {
*/
void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
+ /**
+ * Request V8 to interrupt long running JavaScript code and invoke
+ * the given |callback| passing the given |data| to it. After |callback|
+ * returns control will be returned to the JavaScript code.
+ * At any given moment V8 can remember only a single callback for the very
+ * last interrupt request.
+ * Can be called from another thread without acquiring a |Locker|.
+ * Registered |callback| must not reenter interrupted Isolate.
+ */
+ void RequestInterrupt(InterruptCallback callback, void* data);
+
+ /**
+ * Clear interrupt request created by |RequestInterrupt|.
+ * Can be called from another thread without acquiring a |Locker|.
+ */
+ void ClearInterrupt();
+
+ /**
+ * Request garbage collection in this Isolate. It is only valid to call this
+ * function if --expose_gc was specified.
+ *
+ * This should only be used for testing purposes and not to enforce a garbage
+ * collection schedule. It has strong negative impact on the garbage
+ * collection performance. Use IdleNotification() or LowMemoryNotification()
+ * instead to influence the garbage collection schedule.
+ */
+ void RequestGarbageCollectionForTesting(GarbageCollectionType type);
+
private:
Isolate();
Isolate(const Isolate&);
@@ -4342,24 +4388,6 @@ class V8_EXPORT PersistentHandleVisitor { // NOLINT
/**
- * Asserts that no action is performed that could cause a handle's value
- * to be modified. Useful when otherwise unsafe handle operations need to
- * be performed.
- */
-class V8_EXPORT AssertNoGCScope {
-#ifndef DEBUG
- // TODO(yangguo): remove isolate argument.
- V8_INLINE AssertNoGCScope(Isolate* isolate) {}
-#else
- AssertNoGCScope(Isolate* isolate);
- ~AssertNoGCScope();
- private:
- void* disallow_heap_allocation_;
-#endif
-};
-
-
-/**
* Container class for static utility functions.
*/
class V8_EXPORT V8 {
@@ -4551,6 +4579,22 @@ class V8_EXPORT V8 {
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
/**
+ * Experimental: Runs the Microtask Work Queue until empty
+ */
+ static void RunMicrotasks(Isolate* isolate);
+
+ /**
+ * Experimental: Enqueues the callback to the Microtask Work Queue
+ */
+ static void EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask);
+
+ /**
+ * Experimental: Controls whether the Microtask Work Queue is automatically
+ * run when the script call depth decrements to zero.
+ */
+ static void SetAutorunMicrotasks(Isolate *source, bool autorun);
+
+ /**
* Initializes from snapshot if possible. Otherwise, attempts to
* initialize from scratch. This function is called implicitly if
* you use the API without calling it first.
@@ -4609,11 +4653,6 @@ class V8_EXPORT V8 {
static void SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler);
- V8_DEPRECATED(
- "Use Isolate::AdjustAmountOfExternalAllocatedMemory instead",
- static intptr_t AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes));
-
/**
* Forcefully terminate the current thread of JavaScript execution
* in the given isolate. If no isolate is provided, the default
@@ -4724,6 +4763,18 @@ class V8_EXPORT V8 {
*/
static bool InitializeICU();
+ /**
+ * Sets the v8::Platform to use. This should be invoked before V8 is
+ * initialized.
+ */
+ static void InitializePlatform(Platform* platform);
+
+ /**
+ * Clears all references to the v8::Platform. This should be invoked after
+ * V8 was disposed.
+ */
+ static void ShutdownPlatform();
+
private:
V8();
@@ -4731,12 +4782,10 @@ class V8_EXPORT V8 {
internal::Object** handle);
static internal::Object** CopyPersistent(internal::Object** handle);
static void DisposeGlobal(internal::Object** global_handle);
- typedef WeakReferenceCallbacks<Value, void>::Revivable RevivableCallback;
typedef WeakCallbackData<Value, void>::Callback WeakCallback;
static void MakeWeak(internal::Object** global_handle,
void* data,
- WeakCallback weak_callback,
- RevivableCallback weak_reference_callback);
+ WeakCallback weak_callback);
static void ClearWeak(internal::Object** global_handle);
static void Eternalize(Isolate* isolate,
Value* handle,
@@ -4746,6 +4795,7 @@ class V8_EXPORT V8 {
template <class T> friend class Handle;
template <class T> friend class Local;
template <class T> friend class Eternal;
+ template <class T> friend class PersistentBase;
template <class T, class M> friend class Persistent;
friend class Context;
};
@@ -4885,15 +4935,19 @@ class V8_EXPORT TryCatch {
/**
- * Ignore
+ * A container for extension names.
*/
class V8_EXPORT ExtensionConfiguration {
public:
+ ExtensionConfiguration() : name_count_(0), names_(NULL) { }
ExtensionConfiguration(int name_count, const char* names[])
: name_count_(name_count), names_(names) { }
+
+ const char** begin() const { return &names_[0]; }
+ const char** end() const { return &names_[name_count_]; }
+
private:
- friend class ImplementationUtilities;
- int name_count_;
+ const int name_count_;
const char** names_;
};
@@ -4905,20 +4959,16 @@ class V8_EXPORT ExtensionConfiguration {
class V8_EXPORT Context {
public:
/**
- * Returns the global proxy object or global object itself for
- * detached contexts.
+ * Returns the global proxy object.
*
- * Global proxy object is a thin wrapper whose prototype points to
- * actual context's global object with the properties like Object, etc.
- * This is done that way for security reasons (for more details see
+ * Global proxy object is a thin wrapper whose prototype points to actual
+ * context's global object with the properties like Object, etc. This is done
+ * that way for security reasons (for more details see
* https://wiki.mozilla.org/Gecko:SplitWindow).
*
* Please note that changes to global proxy object prototype most probably
- * would break VM---v8 expects only global object as a prototype of
- * global proxy object.
- *
- * If DetachGlobal() has been invoked, Global() would return actual global
- * object until global is reattached with ReattachGlobal().
+ * would break VM---v8 expects only global object as a prototype of global
+ * proxy object.
*/
Local<Object> Global();
@@ -4929,18 +4979,6 @@ class V8_EXPORT Context {
void DetachGlobal();
/**
- * Reattaches a global object to a context. This can be used to
- * restore the connection between a global object and a context
- * after DetachGlobal has been called.
- *
- * \param global_object The global object to reattach to the
- * context. For this to work, the global object must be the global
- * object that was associated with this context before a call to
- * DetachGlobal.
- */
- void ReattachGlobal(Handle<Object> global_object);
-
- /**
* Creates a new context and returns a handle to the newly allocated
* context.
*
@@ -4964,15 +5002,6 @@ class V8_EXPORT Context {
Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
Handle<Value> global_object = Handle<Value>());
- V8_DEPRECATED("Use Isolate::GetEnteredContext instead",
- static Local<Context> GetEntered());
-
- V8_DEPRECATED("Use Isolate::GetCurrentContext instead",
- static Local<Context> GetCurrent());
-
- V8_DEPRECATED("Use Isolate::GetCallingContext instead",
- static Local<Context> GetCalling());
-
/**
* Sets the security token for the context. To access an object in
* another context, the security tokens must match.
@@ -5002,9 +5031,6 @@ class V8_EXPORT Context {
/** Returns true if the context has experienced an out of memory situation. */
bool HasOutOfMemoryException();
- V8_DEPRECATED("Use Isolate::InContext instead",
- static bool InContext());
-
/** Returns an isolate associated with a current context. */
v8::Isolate* GetIsolate();
@@ -5074,12 +5100,6 @@ class V8_EXPORT Context {
explicit V8_INLINE Scope(Handle<Context> context) : context_(context) {
context_->Enter();
}
- V8_DEPRECATED(
- "Use Handle version instead",
- V8_INLINE Scope(Isolate* isolate, Persistent<Context>& context)) // NOLINT
- : context_(Handle<Context>::New(isolate, context)) {
- context_->Enter();
- }
V8_INLINE ~Scope() { context_->Exit(); }
private:
@@ -5198,20 +5218,6 @@ class V8_EXPORT Locker {
~Locker();
/**
- * Start preemption.
- *
- * When preemption is started, a timer is fired every n milliseconds
- * that will switch between multiple threads that are in contention
- * for the V8 lock.
- */
- static void StartPreemption(Isolate* isolate, int every_n_ms);
-
- /**
- * Stop preemption.
- */
- static void StopPreemption(Isolate* isolate);
-
- /**
* Returns whether or not the locker for a given isolate, is locked by the
* current thread.
*/
@@ -5402,13 +5408,13 @@ class Internals {
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kExternalAsciiRepresentationTag = 0x06;
- static const int kIsolateEmbedderDataOffset = 1 * kApiPointerSize;
- static const int kIsolateRootsOffset = 3 * kApiPointerSize;
+ static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
+ static const int kIsolateRootsOffset = 5 * kApiPointerSize;
static const int kUndefinedValueRootIndex = 5;
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 132;
+ static const int kEmptyStringRootIndex = 141;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@@ -5419,7 +5425,7 @@ class Internals {
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
- static const int kJSObjectType = 0xb2;
+ static const int kJSObjectType = 0xbb;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@@ -5427,6 +5433,8 @@ class Internals {
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
+ static const uint32_t kNumIsolateDataSlots = 4;
+
V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
#ifdef V8_ENABLE_CHECKS
@@ -5490,15 +5498,17 @@ class Internals {
*addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
}
- V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, void* data) {
- uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
- kIsolateEmbedderDataOffset;
+ V8_INLINE static void SetEmbedderData(v8::Isolate *isolate,
+ uint32_t slot,
+ void *data) {
+ uint8_t *addr = reinterpret_cast<uint8_t *>(isolate) +
+ kIsolateEmbedderDataOffset + slot * kApiPointerSize;
*reinterpret_cast<void**>(addr) = data;
}
- V8_INLINE static void* GetEmbedderData(v8::Isolate* isolate) {
+ V8_INLINE static void* GetEmbedderData(v8::Isolate* isolate, uint32_t slot) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
- kIsolateEmbedderDataOffset;
+ kIsolateEmbedderDataOffset + slot * kApiPointerSize;
return *reinterpret_cast<void**>(addr);
}
@@ -5548,8 +5558,7 @@ Local<T> Local<T>::New(Isolate* isolate, Handle<T> that) {
}
template <class T>
-template <class M>
-Local<T> Local<T>::New(Isolate* isolate, const Persistent<T, M>& that) {
+Local<T> Local<T>::New(Isolate* isolate, const PersistentBase<T>& that) {
return New(isolate, that.val_);
}
@@ -5587,8 +5596,8 @@ Local<T> Eternal<T>::Get(Isolate* isolate) {
}
-template <class T, class M>
-T* Persistent<T, M>::New(Isolate* isolate, T* that) {
+template <class T>
+T* PersistentBase<T>::New(Isolate* isolate, T* that) {
if (that == NULL) return NULL;
internal::Object** p = reinterpret_cast<internal::Object**>(that);
return reinterpret_cast<T*>(
@@ -5601,7 +5610,7 @@ template <class T, class M>
template <class S, class M2>
void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
TYPE_CHECK(T, S);
- Reset();
+ this->Reset();
if (that.IsEmpty()) return;
internal::Object** p = reinterpret_cast<internal::Object**>(that.val_);
this->val_ = reinterpret_cast<T*>(V8::CopyPersistent(p));
@@ -5609,8 +5618,8 @@ void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
}
-template <class T, class M>
-bool Persistent<T, M>::IsIndependent() const {
+template <class T>
+bool PersistentBase<T>::IsIndependent() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
return I::GetNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
@@ -5618,8 +5627,8 @@ bool Persistent<T, M>::IsIndependent() const {
}
-template <class T, class M>
-bool Persistent<T, M>::IsNearDeath() const {
+template <class T>
+bool PersistentBase<T>::IsNearDeath() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
uint8_t node_state =
@@ -5629,8 +5638,8 @@ bool Persistent<T, M>::IsNearDeath() const {
}
-template <class T, class M>
-bool Persistent<T, M>::IsWeak() const {
+template <class T>
+bool PersistentBase<T>::IsWeak() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
return I::GetNodeState(reinterpret_cast<internal::Object**>(this->val_)) ==
@@ -5638,17 +5647,17 @@ bool Persistent<T, M>::IsWeak() const {
}
-template <class T, class M>
-void Persistent<T, M>::Reset() {
+template <class T>
+void PersistentBase<T>::Reset() {
if (this->IsEmpty()) return;
V8::DisposeGlobal(reinterpret_cast<internal::Object**>(this->val_));
val_ = 0;
}
-template <class T, class M>
+template <class T>
template <class S>
-void Persistent<T, M>::Reset(Isolate* isolate, const Handle<S>& other) {
+void PersistentBase<T>::Reset(Isolate* isolate, const Handle<S>& other) {
TYPE_CHECK(T, S);
Reset();
if (other.IsEmpty()) return;
@@ -5656,10 +5665,10 @@ void Persistent<T, M>::Reset(Isolate* isolate, const Handle<S>& other) {
}
-template <class T, class M>
-template <class S, class M2>
-void Persistent<T, M>::Reset(Isolate* isolate,
- const Persistent<S, M2>& other) {
+template <class T>
+template <class S>
+void PersistentBase<T>::Reset(Isolate* isolate,
+ const PersistentBase<S>& other) {
TYPE_CHECK(T, S);
Reset();
if (other.IsEmpty()) return;
@@ -5667,60 +5676,36 @@ void Persistent<T, M>::Reset(Isolate* isolate,
}
-template <class T, class M>
+template <class T>
template <typename S, typename P>
-void Persistent<T, M>::SetWeak(
+void PersistentBase<T>::SetWeak(
P* parameter,
typename WeakCallbackData<S, P>::Callback callback) {
TYPE_CHECK(S, T);
typedef typename WeakCallbackData<Value, void>::Callback Callback;
V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_),
parameter,
- reinterpret_cast<Callback>(callback),
- NULL);
+ reinterpret_cast<Callback>(callback));
}
-template <class T, class M>
+template <class T>
template <typename P>
-void Persistent<T, M>::SetWeak(
+void PersistentBase<T>::SetWeak(
P* parameter,
typename WeakCallbackData<T, P>::Callback callback) {
SetWeak<T, P>(parameter, callback);
}
-template <class T, class M>
-template <typename S, typename P>
-void Persistent<T, M>::MakeWeak(
- P* parameters,
- typename WeakReferenceCallbacks<S, P>::Revivable callback) {
- TYPE_CHECK(S, T);
- typedef typename WeakReferenceCallbacks<Value, void>::Revivable Revivable;
- V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_),
- parameters,
- NULL,
- reinterpret_cast<Revivable>(callback));
-}
-
-
-template <class T, class M>
-template <typename P>
-void Persistent<T, M>::MakeWeak(
- P* parameters,
- typename WeakReferenceCallbacks<T, P>::Revivable callback) {
- MakeWeak<T, P>(parameters, callback);
-}
-
-
-template <class T, class M>
-void Persistent<T, M>::ClearWeak() {
+template <class T>
+void PersistentBase<T>::ClearWeak() {
V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_));
}
-template <class T, class M>
-void Persistent<T, M>::MarkIndependent() {
+template <class T>
+void PersistentBase<T>::MarkIndependent() {
typedef internal::Internals I;
if (this->IsEmpty()) return;
I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
@@ -5729,8 +5714,8 @@ void Persistent<T, M>::MarkIndependent() {
}
-template <class T, class M>
-void Persistent<T, M>::MarkPartiallyDependent() {
+template <class T>
+void PersistentBase<T>::MarkPartiallyDependent() {
typedef internal::Internals I;
if (this->IsEmpty()) return;
I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
@@ -5742,14 +5727,14 @@ void Persistent<T, M>::MarkPartiallyDependent() {
template <class T, class M>
T* Persistent<T, M>::ClearAndLeak() {
T* old;
- old = val_;
- val_ = NULL;
+ old = this->val_;
+ this->val_ = NULL;
return old;
}
-template <class T, class M>
-void Persistent<T, M>::SetWrapperClassId(uint16_t class_id) {
+template <class T>
+void PersistentBase<T>::SetWrapperClassId(uint16_t class_id) {
typedef internal::Internals I;
if (this->IsEmpty()) return;
internal::Object** obj = reinterpret_cast<internal::Object**>(this->val_);
@@ -5758,8 +5743,8 @@ void Persistent<T, M>::SetWrapperClassId(uint16_t class_id) {
}
-template <class T, class M>
-uint16_t Persistent<T, M>::WrapperClassId() const {
+template <class T>
+uint16_t PersistentBase<T>::WrapperClassId() const {
typedef internal::Internals I;
if (this->IsEmpty()) return 0;
internal::Object** obj = reinterpret_cast<internal::Object**>(this->val_);
@@ -5807,20 +5792,19 @@ void ReturnValue<T>::Set(int32_t i) {
*value_ = I::IntToSmi(i);
return;
}
- Set(Integer::New(i, GetIsolate()));
+ Set(Integer::New(GetIsolate(), i));
}
template<typename T>
void ReturnValue<T>::Set(uint32_t i) {
TYPE_CHECK(T, Integer);
- typedef internal::Internals I;
// Can't simply use INT32_MAX here for whatever reason.
bool fits_into_int32_t = (i & (1U << 31)) == 0;
if (V8_LIKELY(fits_into_int32_t)) {
Set(static_cast<int32_t>(i));
return;
}
- Set(Integer::NewFromUnsigned(i, GetIsolate()));
+ Set(Integer::NewFromUnsigned(GetIsolate(), i));
}
template<typename T>
@@ -5938,13 +5922,6 @@ int FunctionCallbackInfo<T>::Length() const {
}
-template <class T>
-Local<T> HandleScope::Close(Handle<T> value) {
- internal::Object** before = reinterpret_cast<internal::Object**>(*value);
- internal::Object** after = RawClose(before);
- return Local<T>(reinterpret_cast<T*>(after));
-}
-
Handle<Value> ScriptOrigin::ResourceName() const {
return resource_name_;
}
@@ -5964,14 +5941,13 @@ Handle<Boolean> ScriptOrigin::ResourceIsSharedCrossOrigin() const {
}
-Handle<Boolean> Boolean::New(bool value) {
- Isolate* isolate = Isolate::GetCurrent();
+Handle<Boolean> Boolean::New(Isolate* isolate, bool value) {
return value ? True(isolate) : False(isolate);
}
-void Template::Set(const char* name, v8::Handle<Data> value) {
- Set(v8::String::New(name), value);
+void Template::Set(Isolate* isolate, const char* name, v8::Handle<Data> value) {
+ Set(v8::String::NewFromUtf8(isolate, name), value);
}
@@ -6027,32 +6003,6 @@ Local<String> String::Empty(Isolate* isolate) {
}
-Local<String> String::New(const char* data, int length) {
- return NewFromUtf8(Isolate::GetCurrent(), data, kNormalString, length);
-}
-
-
-Local<String> String::New(const uint16_t* data, int length) {
- return NewFromTwoByte(Isolate::GetCurrent(), data, kNormalString, length);
-}
-
-
-Local<String> String::NewSymbol(const char* data, int length) {
- return NewFromUtf8(Isolate::GetCurrent(), data, kInternalizedString, length);
-}
-
-
-Local<String> String::NewUndetectable(const char* data, int length) {
- return NewFromUtf8(Isolate::GetCurrent(), data, kUndetectableString, length);
-}
-
-
-Local<String> String::NewUndetectable(const uint16_t* data, int length) {
- return NewFromTwoByte(
- Isolate::GetCurrent(), data, kUndetectableString, length);
-}
-
-
String::ExternalStringResource* String::GetExternalStringResource() const {
typedef internal::Object O;
typedef internal::Internals I;
@@ -6423,15 +6373,21 @@ Handle<Boolean> False(Isolate* isolate) {
}
-void Isolate::SetData(void* data) {
+void Isolate::SetData(uint32_t slot, void* data) {
+ typedef internal::Internals I;
+ I::SetEmbedderData(this, slot, data);
+}
+
+
+void* Isolate::GetData(uint32_t slot) {
typedef internal::Internals I;
- I::SetEmbedderData(this, data);
+ return I::GetEmbedderData(this, slot);
}
-void* Isolate::GetData() {
+uint32_t Isolate::GetNumberOfDataSlots() {
typedef internal::Internals I;
- return I::GetEmbedderData(this);
+ return I::kNumIsolateDataSlots;
}
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index 834f9c50ec..0d99e56586 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -88,6 +88,7 @@
// V8_OS_NETBSD - NetBSD
// V8_OS_OPENBSD - OpenBSD
// V8_OS_POSIX - POSIX compatible (mostly everything except Windows)
+// V8_OS_QNX - QNX Neutrino
// V8_OS_SOLARIS - Sun Solaris and OpenSolaris
// V8_OS_WIN - Microsoft Windows
@@ -127,6 +128,9 @@
# define V8_OS_BSD 1
# define V8_OS_OPENBSD 1
# define V8_OS_POSIX 1
+#elif defined(__QNXNTO__)
+# define V8_OS_POSIX 1
+# define V8_OS_QNX 1
#elif defined(_WIN32)
# define V8_OS_WIN 1
#endif
@@ -135,6 +139,7 @@
// -----------------------------------------------------------------------------
// C library detection
//
+// V8_LIBC_MSVCRT - MSVC libc
// V8_LIBC_BIONIC - Bionic libc
// V8_LIBC_BSD - BSD libc derivate
// V8_LIBC_GLIBC - GNU C library
@@ -146,7 +151,9 @@
// ...
// #endif
-#if defined(__BIONIC__)
+#if defined (_MSC_VER)
+# define V8_LIBC_MSVCRT 1
+#elif defined(__BIONIC__)
# define V8_LIBC_BIONIC 1
# define V8_LIBC_BSD 1
#elif defined(__UCLIBC__)
@@ -187,6 +194,7 @@
// supported
// V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported
// V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported
+// V8_HAS_ATTRIBUTE_UNUSED - __attribute__((unused)) supported
// V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported
// V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
// supported
@@ -216,6 +224,7 @@
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
+# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(__has_attribute(warn_unused_result))
@@ -247,6 +256,7 @@
# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE (V8_GNUC_PREREQ(4, 5, 0))
# define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
+# define V8_HAS_ATTRIBUTE_UNUSED (V8_GNUC_PREREQ(2, 95, 0))
# define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0))
@@ -334,6 +344,14 @@ declarator __attribute__((deprecated))
#endif
+// A macro to mark variables or types as unused, avoiding compiler warnings.
+#if V8_HAS_ATTRIBUTE_UNUSED
+# define V8_UNUSED __attribute__((unused))
+#else
+# define V8_UNUSED
+#endif
+
+
// Annotate a function indicating the caller must examine the return value.
// Use like:
// int foo() V8_WARN_UNUSED_RESULT;
diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc
index 5068c885e9..1d6a3bdba6 100644
--- a/deps/v8/samples/lineprocessor.cc
+++ b/deps/v8/samples/lineprocessor.cc
@@ -99,7 +99,7 @@ enum MainCycleType {
const char* ToCString(const v8::String::Utf8Value& value);
void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
-v8::Handle<v8::String> ReadFile(const char* name);
+v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name);
v8::Handle<v8::String> ReadLine();
void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -174,14 +174,14 @@ int RunMain(int argc, char* argv[]) {
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
- script_source = v8::String::New(argv[i + 1]);
- script_name = v8::String::New("unnamed");
+ script_source = v8::String::NewFromUtf8(isolate, argv[i + 1]);
+ script_name = v8::String::NewFromUtf8(isolate, "unnamed");
i++;
script_param_counter++;
} else {
// Use argument as a name of file to load.
- script_source = ReadFile(str);
- script_name = v8::String::New(str);
+ script_source = ReadFile(isolate, str);
+ script_name = v8::String::NewFromUtf8(isolate, str);
if (script_source.IsEmpty()) {
printf("Error reading '%s'\n", str);
return 1;
@@ -200,15 +200,16 @@ int RunMain(int argc, char* argv[]) {
}
// Create a template for the global object.
- v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
// Bind the global 'print' function to the C++ Print callback.
- global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
+ global->Set(v8::String::NewFromUtf8(isolate, "print"),
+ v8::FunctionTemplate::New(isolate, Print));
if (cycle_type == CycleInJs) {
// Bind the global 'read_line' function to the C++ Print callback.
- global->Set(v8::String::New("read_line"),
- v8::FunctionTemplate::New(ReadLine));
+ global->Set(v8::String::NewFromUtf8(isolate, "read_line"),
+ v8::FunctionTemplate::New(isolate, ReadLine));
}
// Create a new execution environment containing the built-in
@@ -277,7 +278,8 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Locker lock(isolate);
#endif // ENABLE_DEBUGGER_SUPPORT
- v8::Handle<v8::String> fun_name = v8::String::New("ProcessLine");
+ v8::Handle<v8::String> fun_name =
+ v8::String::NewFromUtf8(isolate, "ProcessLine");
v8::Handle<v8::Value> process_val = context->Global()->Get(fun_name);
// If there is no Process function, or if it is not a function,
@@ -338,7 +340,7 @@ const char* ToCString(const v8::String::Utf8Value& value) {
// Reads a file into a v8 string.
-v8::Handle<v8::String> ReadFile(const char* name) {
+v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
FILE* file = fopen(name, "rb");
if (file == NULL) return v8::Handle<v8::String>();
@@ -353,7 +355,8 @@ v8::Handle<v8::String> ReadFile(const char* name) {
i += read;
}
fclose(file);
- v8::Handle<v8::String> result = v8::String::New(chars, size);
+ v8::Handle<v8::String> result =
+ v8::String::NewFromUtf8(isolate, chars, v8::String::kNormalString, size);
delete[] chars;
return result;
}
@@ -417,7 +420,8 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
// function is called. Reads a string from standard input and returns.
void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() > 0) {
- args.GetIsolate()->ThrowException(v8::String::New("Unexpected arguments"));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(args.GetIsolate(), "Unexpected arguments"));
return;
}
args.GetReturnValue().Set(ReadLine());
@@ -435,8 +439,9 @@ v8::Handle<v8::String> ReadLine() {
#endif // ENABLE_DEBUGGER_SUPPORT
res = fgets(buffer, kBufferSize, stdin);
}
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
if (res == NULL) {
- v8::Handle<v8::Primitive> t = v8::Undefined(v8::Isolate::GetCurrent());
+ v8::Handle<v8::Primitive> t = v8::Undefined(isolate);
return v8::Handle<v8::String>::Cast(t);
}
// Remove newline char
@@ -446,5 +451,5 @@ v8::Handle<v8::String> ReadLine() {
break;
}
}
- return v8::String::New(buffer);
+ return v8::String::NewFromUtf8(isolate, buffer);
}
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index e6f2ee3add..37b4d39208 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -160,8 +160,9 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// Create a template for the global object where we set the
// built-in global functions.
- Handle<ObjectTemplate> global = ObjectTemplate::New();
- global->Set(String::New("log"), FunctionTemplate::New(LogCallback));
+ Handle<ObjectTemplate> global = ObjectTemplate::New(GetIsolate());
+ global->Set(String::NewFromUtf8(GetIsolate(), "log"),
+ FunctionTemplate::New(GetIsolate(), LogCallback));
// Each processor gets its own context so different processors don't
// affect each other. Context::New returns a persistent handle which
@@ -185,7 +186,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// The script compiled and ran correctly. Now we fetch out the
// Process function from the global object.
- Handle<String> process_name = String::New("Process");
+ Handle<String> process_name = String::NewFromUtf8(GetIsolate(), "Process");
Handle<Value> process_val = context->Global()->Get(process_name);
// If there is no Process function, or if it is not a function,
@@ -244,10 +245,12 @@ bool JsHttpRequestProcessor::InstallMaps(map<string, string>* opts,
v8::Local<v8::Context>::New(GetIsolate(), context_);
// Set the options object as a property on the global object.
- context->Global()->Set(String::New("options"), opts_obj);
+ context->Global()->Set(String::NewFromUtf8(GetIsolate(), "options"),
+ opts_obj);
Handle<Object> output_obj = WrapMap(output);
- context->Global()->Set(String::New("output"), output_obj);
+ context->Global()->Set(String::NewFromUtf8(GetIsolate(), "output"),
+ output_obj);
return true;
}
@@ -291,8 +294,8 @@ JsHttpRequestProcessor::~JsHttpRequestProcessor() {
// Dispose the persistent handles. When noone else has any
// references to the objects stored in the handles they will be
// automatically reclaimed.
- context_.Dispose();
- process_.Dispose();
+ context_.Reset();
+ process_.Reset();
}
@@ -308,7 +311,7 @@ Persistent<ObjectTemplate> JsHttpRequestProcessor::map_template_;
// JavaScript object.
Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// Handle scope for temporary handles.
- HandleScope handle_scope(GetIsolate());
+ EscapableHandleScope handle_scope(GetIsolate());
// Fetch the template for creating JavaScript map wrappers.
// It only has to be created once, which we do on demand.
@@ -320,11 +323,11 @@ Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
Local<ObjectTemplate>::New(GetIsolate(), map_template_);
// Create an empty map wrapper.
- Handle<Object> result = templ->NewInstance();
+ Local<Object> result = templ->NewInstance();
// Wrap the raw C++ pointer in an External so it can be referenced
// from within JavaScript.
- Handle<External> map_ptr = External::New(obj);
+ Handle<External> map_ptr = External::New(GetIsolate(), obj);
// Store the map pointer in the JavaScript wrapper.
result->SetInternalField(0, map_ptr);
@@ -333,7 +336,7 @@ Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// of these handles will go away when the handle scope is deleted
// we need to call Close to let one, the result, escape into the
// outer handle scope.
- return handle_scope.Close(result);
+ return handle_scope.Escape(result);
}
@@ -370,8 +373,9 @@ void JsHttpRequestProcessor::MapGet(Local<String> name,
// Otherwise fetch the value and wrap it in a JavaScript string
const string& value = (*iter).second;
- info.GetReturnValue().Set(
- String::New(value.c_str(), static_cast<int>(value.length())));
+ info.GetReturnValue().Set(String::NewFromUtf8(
+ info.GetIsolate(), value.c_str(), String::kNormalString,
+ static_cast<int>(value.length())));
}
@@ -395,14 +399,14 @@ void JsHttpRequestProcessor::MapSet(Local<String> name,
Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate(
Isolate* isolate) {
- HandleScope handle_scope(isolate);
+ EscapableHandleScope handle_scope(isolate);
- Handle<ObjectTemplate> result = ObjectTemplate::New();
+ Local<ObjectTemplate> result = ObjectTemplate::New(isolate);
result->SetInternalFieldCount(1);
result->SetNamedPropertyHandler(MapGet, MapSet);
// Again, return the result through the current handle scope.
- return handle_scope.Close(result);
+ return handle_scope.Escape(result);
}
@@ -416,7 +420,7 @@ Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate(
*/
Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
// Handle scope for temporary handles.
- HandleScope handle_scope(GetIsolate());
+ EscapableHandleScope handle_scope(GetIsolate());
// Fetch the template for creating JavaScript http request wrappers.
// It only has to be created once, which we do on demand.
@@ -428,11 +432,11 @@ Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
Local<ObjectTemplate>::New(GetIsolate(), request_template_);
// Create an empty http request wrapper.
- Handle<Object> result = templ->NewInstance();
+ Local<Object> result = templ->NewInstance();
// Wrap the raw C++ pointer in an External so it can be referenced
// from within JavaScript.
- Handle<External> request_ptr = External::New(request);
+ Handle<External> request_ptr = External::New(GetIsolate(), request);
// Store the request pointer in the JavaScript wrapper.
result->SetInternalField(0, request_ptr);
@@ -441,7 +445,7 @@ Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
// of these handles will go away when the handle scope is deleted
// we need to call Close to let one, the result, escape into the
// outer handle scope.
- return handle_scope.Close(result);
+ return handle_scope.Escape(result);
}
@@ -465,8 +469,9 @@ void JsHttpRequestProcessor::GetPath(Local<String> name,
const string& path = request->Path();
// Wrap the result in a JavaScript string and return it.
- info.GetReturnValue().Set(
- String::New(path.c_str(), static_cast<int>(path.length())));
+ info.GetReturnValue().Set(String::NewFromUtf8(
+ info.GetIsolate(), path.c_str(), String::kNormalString,
+ static_cast<int>(path.length())));
}
@@ -475,8 +480,9 @@ void JsHttpRequestProcessor::GetReferrer(
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Referrer();
- info.GetReturnValue().Set(
- String::New(path.c_str(), static_cast<int>(path.length())));
+ info.GetReturnValue().Set(String::NewFromUtf8(
+ info.GetIsolate(), path.c_str(), String::kNormalString,
+ static_cast<int>(path.length())));
}
@@ -484,8 +490,9 @@ void JsHttpRequestProcessor::GetHost(Local<String> name,
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Host();
- info.GetReturnValue().Set(
- String::New(path.c_str(), static_cast<int>(path.length())));
+ info.GetReturnValue().Set(String::NewFromUtf8(
+ info.GetIsolate(), path.c_str(), String::kNormalString,
+ static_cast<int>(path.length())));
}
@@ -494,26 +501,35 @@ void JsHttpRequestProcessor::GetUserAgent(
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->UserAgent();
- info.GetReturnValue().Set(
- String::New(path.c_str(), static_cast<int>(path.length())));
+ info.GetReturnValue().Set(String::NewFromUtf8(
+ info.GetIsolate(), path.c_str(), String::kNormalString,
+ static_cast<int>(path.length())));
}
Handle<ObjectTemplate> JsHttpRequestProcessor::MakeRequestTemplate(
Isolate* isolate) {
- HandleScope handle_scope(isolate);
+ EscapableHandleScope handle_scope(isolate);
- Handle<ObjectTemplate> result = ObjectTemplate::New();
+ Local<ObjectTemplate> result = ObjectTemplate::New(isolate);
result->SetInternalFieldCount(1);
// Add accessors for each of the fields of the request.
- result->SetAccessor(String::NewSymbol("path"), GetPath);
- result->SetAccessor(String::NewSymbol("referrer"), GetReferrer);
- result->SetAccessor(String::NewSymbol("host"), GetHost);
- result->SetAccessor(String::NewSymbol("userAgent"), GetUserAgent);
+ result->SetAccessor(
+ String::NewFromUtf8(isolate, "path", String::kInternalizedString),
+ GetPath);
+ result->SetAccessor(
+ String::NewFromUtf8(isolate, "referrer", String::kInternalizedString),
+ GetReferrer);
+ result->SetAccessor(
+ String::NewFromUtf8(isolate, "host", String::kInternalizedString),
+ GetHost);
+ result->SetAccessor(
+ String::NewFromUtf8(isolate, "userAgent", String::kInternalizedString),
+ GetUserAgent);
// Again, return the result through the current handle scope.
- return handle_scope.Close(result);
+ return handle_scope.Escape(result);
}
@@ -575,7 +591,7 @@ void ParseOptions(int argc,
// Reads a file into a v8 string.
-Handle<String> ReadFile(const string& name) {
+Handle<String> ReadFile(Isolate* isolate, const string& name) {
FILE* file = fopen(name.c_str(), "rb");
if (file == NULL) return Handle<String>();
@@ -590,7 +606,8 @@ Handle<String> ReadFile(const string& name) {
i += read;
}
fclose(file);
- Handle<String> result = String::New(chars, size);
+ Handle<String> result =
+ String::NewFromUtf8(isolate, chars, String::kNormalString, size);
delete[] chars;
return result;
}
@@ -636,7 +653,7 @@ int main(int argc, char* argv[]) {
}
Isolate* isolate = Isolate::GetCurrent();
HandleScope scope(isolate);
- Handle<String> source = ReadFile(file);
+ Handle<String> source = ReadFile(isolate, file);
if (source.IsEmpty()) {
fprintf(stderr, "Error reading '%s'.\n", file.c_str());
return 1;
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 06bd8f67eb..92a473231b 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -58,7 +58,7 @@ void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
void Load(const v8::FunctionCallbackInfo<v8::Value>& args);
void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
-v8::Handle<v8::String> ReadFile(const char* name);
+v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name);
void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
@@ -98,17 +98,22 @@ const char* ToCString(const v8::String::Utf8Value& value) {
// functions.
v8::Handle<v8::Context> CreateShellContext(v8::Isolate* isolate) {
// Create a template for the global object.
- v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
// Bind the global 'print' function to the C++ Print callback.
- global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
+ global->Set(v8::String::NewFromUtf8(isolate, "print"),
+ v8::FunctionTemplate::New(isolate, Print));
// Bind the global 'read' function to the C++ Read callback.
- global->Set(v8::String::New("read"), v8::FunctionTemplate::New(Read));
+ global->Set(v8::String::NewFromUtf8(isolate, "read"),
+ v8::FunctionTemplate::New(isolate, Read));
// Bind the global 'load' function to the C++ Load callback.
- global->Set(v8::String::New("load"), v8::FunctionTemplate::New(Load));
+ global->Set(v8::String::NewFromUtf8(isolate, "load"),
+ v8::FunctionTemplate::New(isolate, Load));
// Bind the 'quit' function
- global->Set(v8::String::New("quit"), v8::FunctionTemplate::New(Quit));
+ global->Set(v8::String::NewFromUtf8(isolate, "quit"),
+ v8::FunctionTemplate::New(isolate, Quit));
// Bind the 'version' function
- global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
+ global->Set(v8::String::NewFromUtf8(isolate, "version"),
+ v8::FunctionTemplate::New(isolate, Version));
return v8::Context::New(isolate, NULL, global);
}
@@ -141,19 +146,19 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
args.GetIsolate()->ThrowException(
- v8::String::New("Bad parameters"));
+ v8::String::NewFromUtf8(args.GetIsolate(), "Bad parameters"));
return;
}
v8::String::Utf8Value file(args[0]);
if (*file == NULL) {
args.GetIsolate()->ThrowException(
- v8::String::New("Error loading file"));
+ v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
return;
}
- v8::Handle<v8::String> source = ReadFile(*file);
+ v8::Handle<v8::String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
args.GetIsolate()->ThrowException(
- v8::String::New("Error loading file"));
+ v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
return;
}
args.GetReturnValue().Set(source);
@@ -169,22 +174,22 @@ void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::String::Utf8Value file(args[i]);
if (*file == NULL) {
args.GetIsolate()->ThrowException(
- v8::String::New("Error loading file"));
+ v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
return;
}
- v8::Handle<v8::String> source = ReadFile(*file);
+ v8::Handle<v8::String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
args.GetIsolate()->ThrowException(
- v8::String::New("Error loading file"));
+ v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
return;
}
if (!ExecuteString(args.GetIsolate(),
source,
- v8::String::New(*file),
+ v8::String::NewFromUtf8(args.GetIsolate(), *file),
false,
false)) {
args.GetIsolate()->ThrowException(
- v8::String::New("Error executing file"));
+ v8::String::NewFromUtf8(args.GetIsolate(), "Error executing file"));
return;
}
}
@@ -204,12 +209,13 @@ void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
- args.GetReturnValue().Set(v8::String::New(v8::V8::GetVersion()));
+ args.GetReturnValue().Set(
+ v8::String::NewFromUtf8(args.GetIsolate(), v8::V8::GetVersion()));
}
// Reads a file into a v8 string.
-v8::Handle<v8::String> ReadFile(const char* name) {
+v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
FILE* file = fopen(name, "rb");
if (file == NULL) return v8::Handle<v8::String>();
@@ -224,7 +230,8 @@ v8::Handle<v8::String> ReadFile(const char* name) {
i += read;
}
fclose(file);
- v8::Handle<v8::String> result = v8::String::New(chars, size);
+ v8::Handle<v8::String> result =
+ v8::String::NewFromUtf8(isolate, chars, v8::String::kNormalString, size);
delete[] chars;
return result;
}
@@ -245,13 +252,15 @@ int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
"Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
// Execute argument given to -e option directly.
- v8::Handle<v8::String> file_name = v8::String::New("unnamed");
- v8::Handle<v8::String> source = v8::String::New(argv[++i]);
+ v8::Handle<v8::String> file_name =
+ v8::String::NewFromUtf8(isolate, "unnamed");
+ v8::Handle<v8::String> source =
+ v8::String::NewFromUtf8(isolate, argv[++i]);
if (!ExecuteString(isolate, source, file_name, false, true)) return 1;
} else {
// Use all other arguments as names of files to load and run.
- v8::Handle<v8::String> file_name = v8::String::New(str);
- v8::Handle<v8::String> source = ReadFile(str);
+ v8::Handle<v8::String> file_name = v8::String::NewFromUtf8(isolate, str);
+ v8::Handle<v8::String> source = ReadFile(isolate, str);
if (source.IsEmpty()) {
fprintf(stderr, "Error reading '%s'\n", str);
continue;
@@ -269,7 +278,8 @@ void RunShell(v8::Handle<v8::Context> context) {
static const int kBufferSize = 256;
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
- v8::Local<v8::String> name(v8::String::New("(shell)"));
+ v8::Local<v8::String> name(
+ v8::String::NewFromUtf8(context->GetIsolate(), "(shell)"));
while (true) {
char buffer[kBufferSize];
fprintf(stderr, "> ");
@@ -277,7 +287,7 @@ void RunShell(v8::Handle<v8::Context> context) {
if (str == NULL) break;
v8::HandleScope handle_scope(context->GetIsolate());
ExecuteString(context->GetIsolate(),
- v8::String::New(str),
+ v8::String::NewFromUtf8(context->GetIsolate(), str),
name,
true,
true);
diff --git a/deps/v8/src/a64/OWNERS b/deps/v8/src/a64/OWNERS
new file mode 100644
index 0000000000..906a5ce641
--- /dev/null
+++ b/deps/v8/src/a64/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/a64/assembler-a64-inl.h b/deps/v8/src/a64/assembler-a64-inl.h
new file mode 100644
index 0000000000..e68dee0738
--- /dev/null
+++ b/deps/v8/src/a64/assembler-a64-inl.h
@@ -0,0 +1,1200 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_ASSEMBLER_A64_INL_H_
+#define V8_A64_ASSEMBLER_A64_INL_H_
+
+#include "a64/assembler-a64.h"
+#include "cpu.h"
+#include "debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+void RelocInfo::apply(intptr_t delta) {
+ UNIMPLEMENTED();
+}
+
+
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(pc_, target);
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+inline unsigned CPURegister::code() const {
+ ASSERT(IsValid());
+ return reg_code;
+}
+
+
+inline CPURegister::RegisterType CPURegister::type() const {
+ ASSERT(IsValidOrNone());
+ return reg_type;
+}
+
+
+inline RegList CPURegister::Bit() const {
+ ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
+ return IsValid() ? 1UL << reg_code : 0;
+}
+
+
+inline unsigned CPURegister::SizeInBits() const {
+ ASSERT(IsValid());
+ return reg_size;
+}
+
+
+inline int CPURegister::SizeInBytes() const {
+ ASSERT(IsValid());
+ ASSERT(SizeInBits() % 8 == 0);
+ return reg_size / 8;
+}
+
+
+inline bool CPURegister::Is32Bits() const {
+ ASSERT(IsValid());
+ return reg_size == 32;
+}
+
+
+inline bool CPURegister::Is64Bits() const {
+ ASSERT(IsValid());
+ return reg_size == 64;
+}
+
+
+inline bool CPURegister::IsValid() const {
+ if (IsValidRegister() || IsValidFPRegister()) {
+ ASSERT(!IsNone());
+ return true;
+ } else {
+ ASSERT(IsNone());
+ return false;
+ }
+}
+
+
+inline bool CPURegister::IsValidRegister() const {
+ return IsRegister() &&
+ ((reg_size == kWRegSize) || (reg_size == kXRegSize)) &&
+ ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
+}
+
+
+inline bool CPURegister::IsValidFPRegister() const {
+ return IsFPRegister() &&
+ ((reg_size == kSRegSize) || (reg_size == kDRegSize)) &&
+ (reg_code < kNumberOfFPRegisters);
+}
+
+
+inline bool CPURegister::IsNone() const {
+ // kNoRegister types should always have size 0 and code 0.
+ ASSERT((reg_type != kNoRegister) || (reg_code == 0));
+ ASSERT((reg_type != kNoRegister) || (reg_size == 0));
+
+ return reg_type == kNoRegister;
+}
+
+
+inline bool CPURegister::Is(const CPURegister& other) const {
+ ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return (reg_code == other.reg_code) && (reg_size == other.reg_size) &&
+ (reg_type == other.reg_type);
+}
+
+
+inline bool CPURegister::IsRegister() const {
+ return reg_type == kRegister;
+}
+
+
+inline bool CPURegister::IsFPRegister() const {
+ return reg_type == kFPRegister;
+}
+
+
+inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
+ return (reg_size == other.reg_size) && (reg_type == other.reg_type);
+}
+
+
+inline bool CPURegister::IsValidOrNone() const {
+ return IsValid() || IsNone();
+}
+
+
+inline bool CPURegister::IsZero() const {
+ ASSERT(IsValid());
+ return IsRegister() && (reg_code == kZeroRegCode);
+}
+
+
+inline bool CPURegister::IsSP() const {
+ ASSERT(IsValid());
+ return IsRegister() && (reg_code == kSPRegInternalCode);
+}
+
+
+inline void CPURegList::Combine(const CPURegList& other) {
+ ASSERT(IsValid());
+ ASSERT(other.type() == type_);
+ ASSERT(other.RegisterSizeInBits() == size_);
+ list_ |= other.list();
+}
+
+
+inline void CPURegList::Remove(const CPURegList& other) {
+ ASSERT(IsValid());
+ ASSERT(other.type() == type_);
+ ASSERT(other.RegisterSizeInBits() == size_);
+ list_ &= ~other.list();
+}
+
+
+inline void CPURegList::Combine(const CPURegister& other) {
+ ASSERT(other.type() == type_);
+ ASSERT(other.SizeInBits() == size_);
+ Combine(other.code());
+}
+
+
+inline void CPURegList::Remove(const CPURegister& other) {
+ ASSERT(other.type() == type_);
+ ASSERT(other.SizeInBits() == size_);
+ Remove(other.code());
+}
+
+
+inline void CPURegList::Combine(int code) {
+ ASSERT(IsValid());
+ ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+ list_ |= (1UL << code);
+}
+
+
+inline void CPURegList::Remove(int code) {
+ ASSERT(IsValid());
+ ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+ list_ &= ~(1UL << code);
+}
+
+
+inline Register Register::XRegFromCode(unsigned code) {
+ // This function returns the zero register when code = 31. The stack pointer
+ // can not be returned.
+ ASSERT(code < kNumberOfRegisters);
+ return Register::Create(code, kXRegSize);
+}
+
+
+inline Register Register::WRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfRegisters);
+ return Register::Create(code, kWRegSize);
+}
+
+
+inline FPRegister FPRegister::SRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return FPRegister::Create(code, kSRegSize);
+}
+
+
+inline FPRegister FPRegister::DRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return FPRegister::Create(code, kDRegSize);
+}
+
+
+inline Register CPURegister::W() const {
+ ASSERT(IsValidRegister());
+ return Register::WRegFromCode(reg_code);
+}
+
+
+inline Register CPURegister::X() const {
+ ASSERT(IsValidRegister());
+ return Register::XRegFromCode(reg_code);
+}
+
+
+inline FPRegister CPURegister::S() const {
+ ASSERT(IsValidFPRegister());
+ return FPRegister::SRegFromCode(reg_code);
+}
+
+
+inline FPRegister CPURegister::D() const {
+ ASSERT(IsValidFPRegister());
+ return FPRegister::DRegFromCode(reg_code);
+}
+
+
+// Operand.
+template<typename T>
+Operand::Operand(Handle<T> value) : reg_(NoReg) {
+ initialize_handle(value);
+}
+
+
+// Default initializer is for int types
+template<typename int_t>
+struct OperandInitializer {
+ static const bool kIsIntType = true;
+ static inline RelocInfo::Mode rmode_for(int_t) {
+ return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
+ }
+ static inline int64_t immediate_for(int_t t) {
+ STATIC_ASSERT(sizeof(int_t) <= 8);
+ return t;
+ }
+};
+
+
+template<>
+struct OperandInitializer<Smi*> {
+ static const bool kIsIntType = false;
+ static inline RelocInfo::Mode rmode_for(Smi* t) {
+ return RelocInfo::NONE64;
+ }
+ static inline int64_t immediate_for(Smi* t) {;
+ return reinterpret_cast<int64_t>(t);
+ }
+};
+
+
+template<>
+struct OperandInitializer<ExternalReference> {
+ static const bool kIsIntType = false;
+ static inline RelocInfo::Mode rmode_for(ExternalReference t) {
+ return RelocInfo::EXTERNAL_REFERENCE;
+ }
+ static inline int64_t immediate_for(ExternalReference t) {;
+ return reinterpret_cast<int64_t>(t.address());
+ }
+};
+
+
+template<typename T>
+Operand::Operand(T t)
+ : immediate_(OperandInitializer<T>::immediate_for(t)),
+ reg_(NoReg),
+ rmode_(OperandInitializer<T>::rmode_for(t)) {}
+
+
+template<typename T>
+Operand::Operand(T t, RelocInfo::Mode rmode)
+ : immediate_(OperandInitializer<T>::immediate_for(t)),
+ reg_(NoReg),
+ rmode_(rmode) {
+ STATIC_ASSERT(OperandInitializer<T>::kIsIntType);
+}
+
+
+Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
+ : reg_(reg),
+ shift_(shift),
+ extend_(NO_EXTEND),
+ shift_amount_(shift_amount),
+ rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
+ ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
+ ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
+ ASSERT(!reg.IsSP());
+}
+
+
+Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
+ : reg_(reg),
+ shift_(NO_SHIFT),
+ extend_(extend),
+ shift_amount_(shift_amount),
+ rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
+ ASSERT(reg.IsValid());
+ ASSERT(shift_amount <= 4);
+ ASSERT(!reg.IsSP());
+
+ // Extend modes SXTX and UXTX require a 64-bit register.
+ ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
+}
+
+
+bool Operand::IsImmediate() const {
+ return reg_.Is(NoReg);
+}
+
+
+bool Operand::IsShiftedRegister() const {
+ return reg_.IsValid() && (shift_ != NO_SHIFT);
+}
+
+
+bool Operand::IsExtendedRegister() const {
+ return reg_.IsValid() && (extend_ != NO_EXTEND);
+}
+
+
+bool Operand::IsZero() const {
+ if (IsImmediate()) {
+ return immediate() == 0;
+ } else {
+ return reg().IsZero();
+ }
+}
+
+
+Operand Operand::ToExtendedRegister() const {
+ ASSERT(IsShiftedRegister());
+ ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
+ return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
+}
+
+
+int64_t Operand::immediate() const {
+ ASSERT(IsImmediate());
+ return immediate_;
+}
+
+
+Register Operand::reg() const {
+ ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return reg_;
+}
+
+
+Shift Operand::shift() const {
+ ASSERT(IsShiftedRegister());
+ return shift_;
+}
+
+
+Extend Operand::extend() const {
+ ASSERT(IsExtendedRegister());
+ return extend_;
+}
+
+
+unsigned Operand::shift_amount() const {
+ ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return shift_amount_;
+}
+
+
+Operand Operand::UntagSmi(Register smi) {
+ ASSERT(smi.Is64Bits());
+ return Operand(smi, ASR, kSmiShift);
+}
+
+
+Operand Operand::UntagSmiAndScale(Register smi, int scale) {
+ ASSERT(smi.Is64Bits());
+ ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
+ if (scale > kSmiShift) {
+ return Operand(smi, LSL, scale - kSmiShift);
+ } else if (scale < kSmiShift) {
+ return Operand(smi, ASR, kSmiShift - scale);
+ }
+ return Operand(smi);
+}
+
+
+MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
+ : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
+ shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+ ASSERT(!regoffset.IsSP());
+ ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
+
+ // SXTX extend mode requires a 64-bit offset register.
+ ASSERT(regoffset.Is64Bits() || (extend != SXTX));
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Shift shift,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+ ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
+ ASSERT(shift == LSL);
+}
+
+
+MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
+ : base_(base), addrmode_(addrmode) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+
+ if (offset.IsImmediate()) {
+ offset_ = offset.immediate();
+
+ regoffset_ = NoReg;
+ } else if (offset.IsShiftedRegister()) {
+ ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ shift_= offset.shift();
+ shift_amount_ = offset.shift_amount();
+
+ extend_ = NO_EXTEND;
+ offset_ = 0;
+
+ // These assertions match those in the shifted-register constructor.
+ ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
+ ASSERT(shift_ == LSL);
+ } else {
+ ASSERT(offset.IsExtendedRegister());
+ ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ extend_ = offset.extend();
+ shift_amount_ = offset.shift_amount();
+
+ shift_= NO_SHIFT;
+ offset_ = 0;
+
+ // These assertions match those in the extended-register constructor.
+ ASSERT(!regoffset_.IsSP());
+ ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
+ ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
+ }
+}
+
+bool MemOperand::IsImmediateOffset() const {
+ return (addrmode_ == Offset) && regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsRegisterOffset() const {
+ return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsPreIndex() const {
+ return addrmode_ == PreIndex;
+}
+
+
+bool MemOperand::IsPostIndex() const {
+ return addrmode_ == PostIndex;
+}
+
+Operand MemOperand::OffsetAsOperand() const {
+ if (IsImmediateOffset()) {
+ return offset();
+ } else {
+ ASSERT(IsRegisterOffset());
+ if (extend() == NO_EXTEND) {
+ return Operand(regoffset(), shift(), shift_amount());
+ } else {
+ return Operand(regoffset(), extend(), shift_amount());
+ }
+ }
+}
+
+
+void Assembler::Unreachable() {
+#ifdef USE_SIMULATOR
+ debug("UNREACHABLE", __LINE__, BREAK);
+#else
+ // Crash by branching to 0. lr now points near the fault.
+ Emit(BLR | Rn(xzr));
+#endif
+}
+
+
+Address Assembler::target_pointer_address_at(Address pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ ASSERT(instr->IsLdrLiteralX());
+ return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
+}
+
+
+// Read/Modify the code target address in the branch/call instruction at pc.
+Address Assembler::target_address_at(Address pc) {
+ return Memory::Address_at(target_pointer_address_at(pc));
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ // Returns the address of the call target from the return address that will
+ // be returned to after a call.
+ // Call sequence on A64 is:
+ // ldr ip0, #... @ load from literal pool
+ // blr ip0
+ Address candidate = pc - 2 * kInstructionSize;
+ Instruction* instr = reinterpret_cast<Instruction*>(candidate);
+ USE(instr);
+ ASSERT(instr->IsLdrLiteralX());
+ return candidate;
+}
+
+
+Address Assembler::return_address_from_call_start(Address pc) {
+ // The call, generated by MacroAssembler::Call, is one of two possible
+ // sequences:
+ //
+ // Without relocation:
+ // movz ip0, #(target & 0x000000000000ffff)
+ // movk ip0, #(target & 0x00000000ffff0000)
+ // movk ip0, #(target & 0x0000ffff00000000)
+ // movk ip0, #(target & 0xffff000000000000)
+ // blr ip0
+ //
+ // With relocation:
+ // ldr ip0, =target
+ // blr ip0
+ //
+ // The return address is immediately after the blr instruction in both cases,
+ // so it can be found by adding the call size to the address at the start of
+ // the call sequence.
+ STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 5 * kInstructionSize);
+ STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
+
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ if (instr->IsMovz()) {
+ // Verify the instruction sequence.
+ ASSERT(instr->following(1)->IsMovk());
+ ASSERT(instr->following(2)->IsMovk());
+ ASSERT(instr->following(3)->IsMovk());
+ ASSERT(instr->following(4)->IsBranchAndLinkToRegister());
+ return pc + Assembler::kCallSizeWithoutRelocation;
+ } else {
+ // Verify the instruction sequence.
+ ASSERT(instr->IsLdrLiteralX());
+ ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
+ return pc + Assembler::kCallSizeWithRelocation;
+ }
+}
+
+
+void Assembler::deserialization_set_special_target_at(
+ Address constant_pool_entry, Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ Memory::Address_at(target_pointer_address_at(pc)) = target;
+ // Intuitively, we would think it is necessary to always flush the
+ // instruction cache after patching a target address in the code as follows:
+ // CPU::FlushICache(pc, sizeof(target));
+ // However, on ARM, an instruction is actually patched in the case of
+ // embedded constants of the form:
+ // ldr ip, [pc, #...]
+ // since the instruction accessing this address in the constant pool remains
+ // unchanged, a flush is not required.
+}
+
+
+int RelocInfo::target_address_size() {
+ return kPointerSize;
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_pointer_address_at(pc_);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Handle<Object>(reinterpret_cast<Object**>(
+ Assembler::target_address_at(pc_)));
+}
+
+
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
+}
+
+
+Address RelocInfo::target_reference() {
+ ASSERT(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target) set_target_address(target, mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ UNIMPLEMENTED();
+ Cell *null_cell = NULL;
+ return Handle<Cell>(null_cell);
+}
+
+
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+ UNIMPLEMENTED();
+}
+
+
+static const int kCodeAgeSequenceSize = 5 * kInstructionSize;
+static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on A64.
+ return Handle<Object>();
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(!Code::IsYoungSequence(pc_));
+ // Read the stub entry point from the code age sequence.
+ Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
+ return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(!Code::IsYoungSequence(pc_));
+ // Overwrite the stub entry point in the code age sequence. This is loaded as
+ // a literal so there is no need to call FlushICache here.
+ Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
+ Memory::Address_at(stub_entry_address) = stub->instruction_start();
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // For the above sequences the Relocinfo points to the load literal loading
+ // the call address.
+ return Assembler::target_address_at(pc_);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ Assembler::set_target_address_at(pc_, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+void RelocInfo::WipeOut() {
+ ASSERT(IsEmbeddedObject(rmode_) ||
+ IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, NULL);
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+ // The sequence must be:
+ // ldr ip0, [pc, #offset]
+ // blr ip0
+ // See a64/debug-a64.cc BreakLocationIterator::SetDebugBreakAtReturn().
+ Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
+ Instruction* i2 = i1->following();
+ return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
+ i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
+ return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ visitor->VisitEmbeddedPointer(this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ visitor->VisitExternalReference(this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
+ isolate->debug()->has_break_points()) {
+ visitor->VisitDebugTarget(this);
+#endif
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (heap->isolate()->debug()->has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(heap, this);
+#endif
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
+LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
+ ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDR_x : LDR_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDR_d : LDR_s;
+ }
+}
+
+
+LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDP_x : LDP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDP_d : LDP_s;
+ }
+}
+
+
+LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
+ ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STR_x : STR_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STR_d : STR_s;
+ }
+}
+
+
+LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STP_x : STP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STP_d : STP_s;
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDNP_x : LDNP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDNP_d : LDNP_s;
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STNP_x : STNP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STNP_d : STNP_s;
+ }
+}
+
+
+int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
+ ASSERT(kStartOfLabelLinkChain == 0);
+ int offset = LinkAndGetByteOffsetTo(label);
+ ASSERT(IsAligned(offset, kInstructionSize));
+ return offset >> kInstructionSizeLog2;
+}
+
+
+Instr Assembler::Flags(FlagsUpdate S) {
+ if (S == SetFlags) {
+ return 1 << FlagsUpdate_offset;
+ } else if (S == LeaveFlags) {
+ return 0 << FlagsUpdate_offset;
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+Instr Assembler::Cond(Condition cond) {
+ return cond << Condition_offset;
+}
+
+
+Instr Assembler::ImmPCRelAddress(int imm21) {
+ CHECK(is_int21(imm21));
+ Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
+ Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
+ Instr immlo = imm << ImmPCRelLo_offset;
+ return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
+}
+
+
+Instr Assembler::ImmUncondBranch(int imm26) {
+ CHECK(is_int26(imm26));
+ return truncate_to_int26(imm26) << ImmUncondBranch_offset;
+}
+
+
+Instr Assembler::ImmCondBranch(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCondBranch_offset;
+}
+
+
+Instr Assembler::ImmCmpBranch(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCmpBranch_offset;
+}
+
+
+Instr Assembler::ImmTestBranch(int imm14) {
+ CHECK(is_int14(imm14));
+ return truncate_to_int14(imm14) << ImmTestBranch_offset;
+}
+
+
+Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
+ ASSERT(is_uint6(bit_pos));
+ // Subtract five from the shift offset, as we need bit 5 from bit_pos.
+ unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
+ unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
+ b5 &= ImmTestBranchBit5_mask;
+ b40 &= ImmTestBranchBit40_mask;
+ return b5 | b40;
+}
+
+
+Instr Assembler::SF(Register rd) {
+ return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
+}
+
+
+Instr Assembler::ImmAddSub(int64_t imm) {
+ ASSERT(IsImmAddSub(imm));
+ if (is_uint12(imm)) { // No shift required.
+ return imm << ImmAddSub_offset;
+ } else {
+ return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
+ }
+}
+
+
+Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
+ ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
+ ((reg_size == kWRegSize) && is_uint5(imms)));
+ USE(reg_size);
+ return imms << ImmS_offset;
+}
+
+
+Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
+ ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
+ ((reg_size == kWRegSize) && is_uint5(immr)));
+ USE(reg_size);
+ ASSERT(is_uint6(immr));
+ return immr << ImmR_offset;
+}
+
+
+Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ ASSERT(is_uint6(imms));
+ ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
+ USE(reg_size);
+ return imms << ImmSetBits_offset;
+}
+
+
+Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
+ ((reg_size == kWRegSize) && is_uint5(immr)));
+ USE(reg_size);
+ return immr << ImmRotate_offset;
+}
+
+
+Instr Assembler::ImmLLiteral(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmLLiteral_offset;
+}
+
+
+Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ ASSERT((reg_size == kXRegSize) || (bitn == 0));
+ USE(reg_size);
+ return bitn << BitN_offset;
+}
+
+
+Instr Assembler::ShiftDP(Shift shift) {
+ ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+ return shift << ShiftDP_offset;
+}
+
+
+Instr Assembler::ImmDPShift(unsigned amount) {
+ ASSERT(is_uint6(amount));
+ return amount << ImmDPShift_offset;
+}
+
+
+Instr Assembler::ExtendMode(Extend extend) {
+ return extend << ExtendMode_offset;
+}
+
+
+Instr Assembler::ImmExtendShift(unsigned left_shift) {
+ ASSERT(left_shift <= 4);
+ return left_shift << ImmExtendShift_offset;
+}
+
+
+Instr Assembler::ImmCondCmp(unsigned imm) {
+ ASSERT(is_uint5(imm));
+ return imm << ImmCondCmp_offset;
+}
+
+
+Instr Assembler::Nzcv(StatusFlags nzcv) {
+ return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
+}
+
+
+Instr Assembler::ImmLSUnsigned(int imm12) {
+ ASSERT(is_uint12(imm12));
+ return imm12 << ImmLSUnsigned_offset;
+}
+
+
+Instr Assembler::ImmLS(int imm9) {
+ ASSERT(is_int9(imm9));
+ return truncate_to_int9(imm9) << ImmLS_offset;
+}
+
+
+Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
+ ASSERT(((imm7 >> size) << size) == imm7);
+ int scaled_imm7 = imm7 >> size;
+ ASSERT(is_int7(scaled_imm7));
+ return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
+}
+
+
+Instr Assembler::ImmShiftLS(unsigned shift_amount) {
+ ASSERT(is_uint1(shift_amount));
+ return shift_amount << ImmShiftLS_offset;
+}
+
+
+Instr Assembler::ImmException(int imm16) {
+ ASSERT(is_uint16(imm16));
+ return imm16 << ImmException_offset;
+}
+
+
+Instr Assembler::ImmSystemRegister(int imm15) {
+ ASSERT(is_uint15(imm15));
+ return imm15 << ImmSystemRegister_offset;
+}
+
+
+Instr Assembler::ImmHint(int imm7) {
+ ASSERT(is_uint7(imm7));
+ return imm7 << ImmHint_offset;
+}
+
+
+Instr Assembler::ImmBarrierDomain(int imm2) {
+ ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierDomain_offset;
+}
+
+
+Instr Assembler::ImmBarrierType(int imm2) {
+ ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierType_offset;
+}
+
+
+LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
+ ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
+ return static_cast<LSDataSize>(op >> SizeLS_offset);
+}
+
+
+Instr Assembler::ImmMoveWide(uint64_t imm) {
+ ASSERT(is_uint16(imm));
+ return imm << ImmMoveWide_offset;
+}
+
+
+Instr Assembler::ShiftMoveWide(int64_t shift) {
+ ASSERT(is_uint2(shift));
+ return shift << ShiftMoveWide_offset;
+}
+
+
+Instr Assembler::FPType(FPRegister fd) {
+ return fd.Is64Bits() ? FP64 : FP32;
+}
+
+
+Instr Assembler::FPScale(unsigned scale) {
+ ASSERT(is_uint6(scale));
+ return scale << FPScale_offset;
+}
+
+
+const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
+ return reg.Is64Bits() ? xzr : wzr;
+}
+
+
+void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
+ LoadRelocatedValue(rt, operand, LDR_x_lit);
+}
+
+
+inline void Assembler::CheckBuffer() {
+ ASSERT(pc_ < (buffer_ + buffer_size_));
+ if (buffer_space() < kGap) {
+ GrowBuffer();
+ }
+ if (pc_offset() >= next_buffer_check_) {
+ CheckConstPool(false, true);
+ }
+}
+
+
+TypeFeedbackId Assembler::RecordedAstId() {
+ ASSERT(!recorded_ast_id_.IsNone());
+ return recorded_ast_id_;
+}
+
+
+void Assembler::ClearRecordedAstId() {
+ recorded_ast_id_ = TypeFeedbackId::None();
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_A64_ASSEMBLER_A64_INL_H_
diff --git a/deps/v8/src/a64/assembler-a64.cc b/deps/v8/src/a64/assembler-a64.cc
new file mode 100644
index 0000000000..43b1391605
--- /dev/null
+++ b/deps/v8/src/a64/assembler-a64.cc
@@ -0,0 +1,2606 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#define A64_DEFINE_REG_STATICS
+
+#include "a64/assembler-a64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// CpuFeatures utilities (for V8 compatibility).
+
+ExternalReference ExternalReference::cpu_features() {
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+
+// -----------------------------------------------------------------------------
+// CPURegList utilities.
+
+CPURegister CPURegList::PopLowestIndex() {
+ ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountTrailingZeros(list_, kRegListSizeInBits);
+ ASSERT((1 << index) & list_);
+ Remove(index);
+ return CPURegister::Create(index, size_, type_);
+}
+
+
+CPURegister CPURegList::PopHighestIndex() {
+ ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountLeadingZeros(list_, kRegListSizeInBits);
+ index = kRegListSizeInBits - 1 - index;
+ ASSERT((1 << index) & list_);
+ Remove(index);
+ return CPURegister::Create(index, size_, type_);
+}
+
+
+void CPURegList::RemoveCalleeSaved() {
+ if (type() == CPURegister::kRegister) {
+ Remove(GetCalleeSaved(RegisterSizeInBits()));
+ } else if (type() == CPURegister::kFPRegister) {
+ Remove(GetCalleeSavedFP(RegisterSizeInBits()));
+ } else {
+ ASSERT(type() == CPURegister::kNoRegister);
+ ASSERT(IsEmpty());
+ // The list must already be empty, so do nothing.
+ }
+}
+
+
+CPURegList CPURegList::GetCalleeSaved(unsigned size) {
+ return CPURegList(CPURegister::kRegister, size, 19, 29);
+}
+
+
+CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
+ return CPURegList(CPURegister::kFPRegister, size, 8, 15);
+}
+
+
+CPURegList CPURegList::GetCallerSaved(unsigned size) {
+ // Registers x0-x18 and lr (x30) are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
+ list.Combine(lr);
+ return list;
+}
+
+
+CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
+ // Registers d0-d7 and d16-d31 are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
+ list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
+ return list;
+}
+
+
+// This function defines the list of registers which are associated with a
+// safepoint slot. Safepoint register slots are saved contiguously on the stack.
+// MacroAssembler::SafepointRegisterStackIndex handles mapping from register
+// code to index in the safepoint register slots. Any change here can affect
+// this mapping.
+CPURegList CPURegList::GetSafepointSavedRegisters() {
+ CPURegList list = CPURegList::GetCalleeSaved();
+ list.Combine(CPURegList(CPURegister::kRegister, kXRegSize, kJSCallerSaved));
+
+ // Note that unfortunately we can't use symbolic names for registers and have
+ // to directly use register codes. This is because this function is used to
+ // initialize some static variables and we can't rely on register variables
+ // to be initialized due to static initialization order issues in C++.
+
+ // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
+ // preserved outside of the macro assembler.
+ list.Remove(16);
+ list.Remove(17);
+
+ // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
+ // is a caller-saved register according to the procedure call standard.
+ list.Combine(18);
+
+ // Drop jssp as the stack pointer doesn't need to be included.
+ list.Remove(28);
+
+ // Add the link register (x30) to the safepoint list.
+ list.Combine(30);
+
+ return list;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 0;
+
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on A64 means that it is a movz/movk sequence. We don't
+ // generate those for relocatable pointers.
+ return false;
+}
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc + i) = *(instr + i);
+ }
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count * kInstructionSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ UNIMPLEMENTED();
+}
+
+
+Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
+ Register reg3, Register reg4) {
+ CPURegList regs(reg1, reg2, reg3, reg4);
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs.IncludesAliasOf(candidate)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return NoReg;
+}
+
+
+bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ int number_of_valid_regs = 0;
+ int number_of_valid_fpregs = 0;
+
+ RegList unique_regs = 0;
+ RegList unique_fpregs = 0;
+
+ const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
+
+ for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
+ if (regs[i].IsRegister()) {
+ number_of_valid_regs++;
+ unique_regs |= regs[i].Bit();
+ } else if (regs[i].IsFPRegister()) {
+ number_of_valid_fpregs++;
+ unique_fpregs |= regs[i].Bit();
+ } else {
+ ASSERT(!regs[i].IsValid());
+ }
+ }
+
+ int number_of_unique_regs =
+ CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
+ int number_of_unique_fpregs =
+ CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
+
+ ASSERT(number_of_valid_regs >= number_of_unique_regs);
+ ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
+
+ return (number_of_valid_regs != number_of_unique_regs) ||
+ (number_of_valid_fpregs != number_of_unique_fpregs);
+}
+
+
+bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
+ match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
+ match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
+ match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
+ match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
+ match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
+ match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
+ return match;
+}
+
+
+void Operand::initialize_handle(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
+
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+ immediate_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
+ immediate_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE64;
+ }
+}
+
+
+bool Operand::NeedsRelocation() const {
+ if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ return Serializer::enabled();
+ }
+
+ return !RelocInfo::IsNone(rmode_);
+}
+
+
+// Assembler
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
+ recorded_ast_id_(TypeFeedbackId::None()),
+ unresolved_branches_(),
+ positions_recorder_(this) {
+ const_pool_blocked_nesting_ = 0;
+ Reset();
+}
+
+
+Assembler::~Assembler() {
+ ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(const_pool_blocked_nesting_ == 0);
+}
+
+
+void Assembler::Reset() {
+#ifdef DEBUG
+ ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
+ ASSERT(const_pool_blocked_nesting_ == 0);
+ memset(buffer_, 0, pc_ - buffer_);
+#endif
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
+ reinterpret_cast<byte*>(pc_));
+ num_pending_reloc_info_ = 0;
+ next_buffer_check_ = 0;
+ no_const_pool_before_ = 0;
+ first_const_pool_use_ = -1;
+ ClearRecordedAstId();
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // Emit constant pool if necessary.
+ CheckConstPool(true, false);
+ ASSERT(num_pending_reloc_info_ == 0);
+
+ // Set up code descriptor.
+ if (desc) {
+ desc->buffer = reinterpret_cast<byte*>(buffer_);
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
+ reloc_info_writer.pos();
+ desc->origin = this;
+ }
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::CheckLabelLinkChain(Label const * label) {
+#ifdef DEBUG
+ if (label->is_linked()) {
+ int linkoffset = label->pos();
+ bool end_of_chain = false;
+ while (!end_of_chain) {
+ Instruction * link = InstructionAt(linkoffset);
+ int linkpcoffset = link->ImmPCOffset();
+ int prevlinkoffset = linkoffset + linkpcoffset;
+
+ end_of_chain = (linkoffset == prevlinkoffset);
+ linkoffset = linkoffset + linkpcoffset;
+ }
+ }
+#endif
+}
+
+
+void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
+ Label* label,
+ Instruction* label_veneer) {
+ ASSERT(label->is_linked());
+
+ CheckLabelLinkChain(label);
+
+ Instruction* link = InstructionAt(label->pos());
+ Instruction* prev_link = link;
+ Instruction* next_link;
+ bool end_of_chain = false;
+
+ while (link != branch && !end_of_chain) {
+ next_link = link->ImmPCOffsetTarget();
+ end_of_chain = (link == next_link);
+ prev_link = link;
+ link = next_link;
+ }
+
+ ASSERT(branch == link);
+ next_link = branch->ImmPCOffsetTarget();
+
+ if (branch == prev_link) {
+ // The branch is the first instruction in the chain.
+ if (branch == next_link) {
+ // It is also the last instruction in the chain, so it is the only branch
+ // currently referring to this label.
+ label->Unuse();
+ } else {
+ label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
+ }
+
+ } else if (branch == next_link) {
+ // The branch is the last (but not also the first) instruction in the chain.
+ prev_link->SetImmPCOffsetTarget(prev_link);
+
+ } else {
+ // The branch is in the middle of the chain.
+ if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
+ prev_link->SetImmPCOffsetTarget(next_link);
+ } else if (label_veneer != NULL) {
+ // Use the veneer for all previous links in the chain.
+ prev_link->SetImmPCOffsetTarget(prev_link);
+
+ end_of_chain = false;
+ link = next_link;
+ while (!end_of_chain) {
+ next_link = link->ImmPCOffsetTarget();
+ end_of_chain = (link == next_link);
+ link->SetImmPCOffsetTarget(label_veneer);
+ link = next_link;
+ }
+ } else {
+ // The assert below will fire.
+ // Some other work could be attempted to fix up the chain, but it would be
+ // rather complicated. If we crash here, we may want to consider using an
+ // other mechanism than a chain of branches.
+ //
+ // Note that this situation currently should not happen, as we always call
+ // this function with a veneer to the target label.
+ // However this could happen with a MacroAssembler in the following state:
+ // [previous code]
+ // B(label);
+ // [20KB code]
+ // Tbz(label); // First tbz. Pointing to unconditional branch.
+ // [20KB code]
+ // Tbz(label); // Second tbz. Pointing to the first tbz.
+ // [more code]
+ // and this function is called to remove the first tbz from the label link
+ // chain. Since tbz has a range of +-32KB, the second tbz cannot point to
+ // the unconditional branch.
+ CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
+ UNREACHABLE();
+ }
+ }
+
+ CheckLabelLinkChain(label);
+}
+
+
+void Assembler::bind(Label* label) {
+ // Bind label to the address at pc_. All instructions (most likely branches)
+ // that are linked to this label will be updated to point to the newly-bound
+ // label.
+
+ ASSERT(!label->is_near_linked());
+ ASSERT(!label->is_bound());
+
+ // If the label is linked, the link chain looks something like this:
+ //
+ // |--I----I-------I-------L
+ // |---------------------->| pc_offset
+ // |-------------->| linkoffset = label->pos()
+ // |<------| link->ImmPCOffset()
+ // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset()
+ //
+ // On each iteration, the last link is updated and then removed from the
+ // chain until only one remains. At that point, the label is bound.
+ //
+ // If the label is not linked, no preparation is required before binding.
+ while (label->is_linked()) {
+ int linkoffset = label->pos();
+ Instruction* link = InstructionAt(linkoffset);
+ int prevlinkoffset = linkoffset + link->ImmPCOffset();
+
+ CheckLabelLinkChain(label);
+
+ ASSERT(linkoffset >= 0);
+ ASSERT(linkoffset < pc_offset());
+ ASSERT((linkoffset > prevlinkoffset) ||
+ (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
+ ASSERT(prevlinkoffset >= 0);
+
+ // Update the link to point to the label.
+ link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+
+ // Link the label to the previous link in the chain.
+ if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
+ // We hit kStartOfLabelLinkChain, so the chain is fully processed.
+ label->Unuse();
+ } else {
+ // Update the label for the next iteration.
+ label->link_to(prevlinkoffset);
+ }
+ }
+ label->bind_to(pc_offset());
+
+ ASSERT(label->is_bound());
+ ASSERT(!label->is_linked());
+
+ DeleteUnresolvedBranchInfoForLabel(label);
+}
+
+
+int Assembler::LinkAndGetByteOffsetTo(Label* label) {
+ ASSERT(sizeof(*pc_) == 1);
+ CheckLabelLinkChain(label);
+
+ int offset;
+ if (label->is_bound()) {
+ // The label is bound, so it does not need to be updated. Referring
+ // instructions must link directly to the label as they will not be
+ // updated.
+ //
+ // In this case, label->pos() returns the offset of the label from the
+ // start of the buffer.
+ //
+ // Note that offset can be zero for self-referential instructions. (This
+ // could be useful for ADR, for example.)
+ offset = label->pos() - pc_offset();
+ ASSERT(offset <= 0);
+ } else {
+ if (label->is_linked()) {
+ // The label is linked, so the referring instruction should be added onto
+ // the end of the label's link chain.
+ //
+ // In this case, label->pos() returns the offset of the last linked
+ // instruction from the start of the buffer.
+ offset = label->pos() - pc_offset();
+ ASSERT(offset != kStartOfLabelLinkChain);
+ // Note that the offset here needs to be PC-relative only so that the
+ // first instruction in a buffer can link to an unbound label. Otherwise,
+ // the offset would be 0 for this case, and 0 is reserved for
+ // kStartOfLabelLinkChain.
+ } else {
+ // The label is unused, so it now becomes linked and the referring
+ // instruction is at the start of the new link chain.
+ offset = kStartOfLabelLinkChain;
+ }
+ // The instruction at pc is now the last link in the label's chain.
+ label->link_to(pc_offset());
+ }
+
+ return offset;
+}
+
+
+void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
+ // Branches to this label will be resolved when the label is bound below.
+ std::multimap<int, FarBranchInfo>::iterator it_tmp, it;
+ it = unresolved_branches_.begin();
+ while (it != unresolved_branches_.end()) {
+ it_tmp = it++;
+ if (it_tmp->second.label_ == label) {
+ CHECK(it_tmp->first >= pc_offset());
+ unresolved_branches_.erase(it_tmp);
+ }
+ }
+}
+
+
+void Assembler::StartBlockConstPool() {
+ if (const_pool_blocked_nesting_++ == 0) {
+ // Prevent constant pool checks happening by setting the next check to
+ // the biggest possible offset.
+ next_buffer_check_ = kMaxInt;
+ }
+}
+
+
+void Assembler::EndBlockConstPool() {
+ if (--const_pool_blocked_nesting_ == 0) {
+ // Check the constant pool hasn't been blocked for too long.
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
+ // Two cases:
+ // * no_const_pool_before_ >= next_buffer_check_ and the emission is
+ // still blocked
+ // * no_const_pool_before_ < next_buffer_check_ and the next emit will
+ // trigger a check.
+ next_buffer_check_ = no_const_pool_before_;
+ }
+}
+
+
+bool Assembler::is_const_pool_blocked() const {
+ return (const_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_const_pool_before_);
+}
+
+
+bool Assembler::IsConstantPoolAt(Instruction* instr) {
+ // The constant pool marker is made of two instructions. These instructions
+ // will never be emitted by the JIT, so checking for the first one is enough:
+ // 0: ldr xzr, #<size of pool>
+ bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
+
+ // It is still worth asserting the marker is complete.
+ // 4: blr xzr
+ ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() &&
+ instr->following()->Rn() == xzr.code()));
+
+ return result;
+}
+
+
+int Assembler::ConstantPoolSizeAt(Instruction* instr) {
+ if (IsConstantPoolAt(instr)) {
+ return instr->ImmLLiteral();
+ } else {
+ return -1;
+ }
+}
+
+
+void Assembler::ConstantPoolMarker(uint32_t size) {
+ ASSERT(is_const_pool_blocked());
+ // + 1 is for the crash guard.
+ Emit(LDR_x_lit | ImmLLiteral(2 * size + 1) | Rt(xzr));
+}
+
+
+void Assembler::ConstantPoolGuard() {
+#ifdef DEBUG
+ // Currently this is only used after a constant pool marker.
+ ASSERT(is_const_pool_blocked());
+ Instruction* instr = reinterpret_cast<Instruction*>(pc_);
+ ASSERT(instr->preceding()->IsLdrLiteralX() &&
+ instr->preceding()->Rt() == xzr.code());
+#endif
+
+ // We must generate only one instruction.
+ Emit(BLR | Rn(xzr));
+}
+
+
+void Assembler::br(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ Emit(BR | Rn(xn));
+}
+
+
+void Assembler::blr(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ // The pattern 'blr xzr' is used as a guard to detect when execution falls
+ // through the constant pool. It should not be emitted.
+ ASSERT(!xn.Is(xzr));
+ Emit(BLR | Rn(xn));
+}
+
+
+void Assembler::ret(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ Emit(RET | Rn(xn));
+}
+
+
+void Assembler::b(int imm26) {
+ Emit(B | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::b(Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ b(LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::b(int imm19, Condition cond) {
+ Emit(B_cond | ImmCondBranch(imm19) | cond);
+}
+
+
+void Assembler::b(Label* label, Condition cond) {
+ positions_recorder()->WriteRecordedPositions();
+ b(LinkAndGetInstructionOffsetTo(label), cond);
+}
+
+
+void Assembler::bl(int imm26) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(BL | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::bl(Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ bl(LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::cbz(const Register& rt,
+ int imm19) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbz(const Register& rt,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ cbz(rt, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::cbnz(const Register& rt,
+ int imm19) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbnz(const Register& rt,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ cbnz(rt, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::tbz(const Register& rt,
+ unsigned bit_pos,
+ int imm14) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbz(const Register& rt,
+ unsigned bit_pos,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::tbnz(const Register& rt,
+ unsigned bit_pos,
+ int imm14) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbnz(const Register& rt,
+ unsigned bit_pos,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::adr(const Register& rd, int imm21) {
+ ASSERT(rd.Is64Bits());
+ Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adr(const Register& rd, Label* label) {
+ adr(rd, LinkAndGetByteOffsetTo(label));
+}
+
+
+void Assembler::add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, ADD);
+}
+
+
+void Assembler::adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, ADD);
+}
+
+
+void Assembler::cmn(const Register& rn,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ adds(zr, rn, operand);
+}
+
+
+void Assembler::sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, SUB);
+}
+
+
+void Assembler::subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, SUB);
+}
+
+
+void Assembler::cmp(const Register& rn, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ subs(zr, rn, operand);
+}
+
+
+void Assembler::neg(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sub(rd, zr, operand);
+}
+
+
+void Assembler::negs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ subs(rd, zr, operand);
+}
+
+
+void Assembler::adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void Assembler::adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void Assembler::sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void Assembler::sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void Assembler::ngc(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbc(rd, zr, operand);
+}
+
+
+void Assembler::ngcs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbcs(rd, zr, operand);
+}
+
+
+// Logical instructions.
+void Assembler::and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, AND);
+}
+
+
+void Assembler::ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ANDS);
+}
+
+
+void Assembler::tst(const Register& rn,
+ const Operand& operand) {
+ ands(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void Assembler::bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BIC);
+}
+
+
+void Assembler::bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BICS);
+}
+
+
+void Assembler::orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORR);
+}
+
+
+void Assembler::orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORN);
+}
+
+
+void Assembler::eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EOR);
+}
+
+
+void Assembler::eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EON);
+}
+
+
+void Assembler::lslv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::lsrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::asrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rorv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+// Bitfield operations.
+void Assembler::bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | BFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.Is64Bits() || rn.Is32Bits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | SBFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | UBFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | EXTR | N | Rm(rm) |
+ ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSEL);
+}
+
+
+void Assembler::csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINC);
+}
+
+
+void Assembler::csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINV);
+}
+
+
+void Assembler::csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSNEG);
+}
+
+
+void Assembler::cset(const Register &rd, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinc(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::csetm(const Register &rd, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinv(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMN);
+}
+
+
+void Assembler::ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMP);
+}
+
+
+void Assembler::DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op) {
+ Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ Register zr = AppropriateZeroRegFor(rn);
+ DataProcessing3Source(rd, rn, rm, zr, MADD);
+}
+
+
+void Assembler::madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+ DataProcessing3Source(rd, rn, rm, ra, MADD);
+}
+
+
+void Assembler::mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ Register zr = AppropriateZeroRegFor(rn);
+ DataProcessing3Source(rd, rn, rm, zr, MSUB);
+}
+
+
+void Assembler::msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+ DataProcessing3Source(rd, rn, rm, ra, MSUB);
+}
+
+
+void Assembler::smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
+}
+
+
+void Assembler::smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
+}
+
+
+void Assembler::umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
+}
+
+
+void Assembler::umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
+}
+
+
+void Assembler::smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
+}
+
+
+void Assembler::smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
+}
+
+
+void Assembler::sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rbit(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, RBIT);
+}
+
+
+void Assembler::rev16(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, REV16);
+}
+
+
+void Assembler::rev32(const Register& rd,
+ const Register& rn) {
+ ASSERT(rd.Is64Bits());
+ DataProcessing1Source(rd, rn, REV);
+}
+
+
+void Assembler::rev(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
+}
+
+
+void Assembler::clz(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLZ);
+}
+
+
+void Assembler::cls(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLS);
+}
+
+
+void Assembler::ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
+}
+
+
+void Assembler::stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
+}
+
+
+void Assembler::ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ ASSERT(rt.Is64Bits());
+ LoadStorePair(rt, rt2, src, LDPSW_x);
+}
+
+
+void Assembler::LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // 'rt' and 'rt2' can only be aliased for stores.
+ ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
+ ASSERT(AreSameSizeAndType(rt, rt2));
+
+ Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
+
+ Instr addrmodeop;
+ if (addr.IsImmediateOffset()) {
+ addrmodeop = LoadStorePairOffsetFixed;
+ } else {
+ // Pre-index and post-index modes.
+ ASSERT(!rt.Is(addr.base()));
+ ASSERT(!rt2.Is(addr.base()));
+ ASSERT(addr.offset() != 0);
+ if (addr.IsPreIndex()) {
+ addrmodeop = LoadStorePairPreIndexFixed;
+ } else {
+ ASSERT(addr.IsPostIndex());
+ addrmodeop = LoadStorePairPostIndexFixed;
+ }
+ }
+ Emit(addrmodeop | memop);
+}
+
+
+void Assembler::ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePairNonTemporal(rt, rt2, src,
+ LoadPairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePairNonTemporal(rt, rt2, dst,
+ StorePairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op) {
+ ASSERT(!rt.Is(rt2));
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ ASSERT(addr.IsImmediateOffset());
+
+ LSDataSize size = CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(op & LoadStorePairMask));
+ Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(addr.offset(), size));
+}
+
+
+// Memory instructions.
+void Assembler::ldrb(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, LDRB_w);
+}
+
+
+void Assembler::strb(const Register& rt, const MemOperand& dst) {
+ LoadStore(rt, dst, STRB_w);
+}
+
+
+void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
+}
+
+
+void Assembler::ldrh(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, LDRH_w);
+}
+
+
+void Assembler::strh(const Register& rt, const MemOperand& dst) {
+ LoadStore(rt, dst, STRH_w);
+}
+
+
+void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
+}
+
+
+void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
+ LoadStore(rt, src, LoadOpFor(rt));
+}
+
+
+void Assembler::str(const CPURegister& rt, const MemOperand& src) {
+ LoadStore(rt, src, StoreOpFor(rt));
+}
+
+
+void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
+ ASSERT(rt.Is64Bits());
+ LoadStore(rt, src, LDRSW_x);
+}
+
+
+void Assembler::ldr(const Register& rt, uint64_t imm) {
+ // TODO(all): Constant pool may be garbage collected. Hence we cannot store
+ // TODO(all): arbitrary values in them. Manually move it for now.
+ // TODO(all): Fix MacroAssembler::Fmov when this is implemented.
+ UNIMPLEMENTED();
+}
+
+
+void Assembler::ldr(const FPRegister& ft, double imm) {
+ // TODO(all): Constant pool may be garbage collected. Hence we cannot store
+ // TODO(all): arbitrary values in them. Manually move it for now.
+ // TODO(all): Fix MacroAssembler::Fmov when this is implemented.
+ UNIMPLEMENTED();
+}
+
+
+void Assembler::mov(const Register& rd, const Register& rm) {
+ // Moves involving the stack pointer are encoded as add immediate with
+ // second operand of zero. Otherwise, orr with first operand zr is
+ // used.
+ if (rd.IsSP() || rm.IsSP()) {
+ add(rd, rm, 0);
+ } else {
+ orr(rd, AppropriateZeroRegFor(rd), rm);
+ }
+}
+
+
+void Assembler::mvn(const Register& rd, const Operand& operand) {
+ orn(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
+ ASSERT(rt.Is64Bits());
+ Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
+}
+
+
+void Assembler::msr(SystemRegister sysreg, const Register& rt) {
+ ASSERT(rt.Is64Bits());
+ Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
+}
+
+
+void Assembler::hint(SystemHint code) {
+ Emit(HINT | ImmHint(code) | Rt(xzr));
+}
+
+
+void Assembler::dmb(BarrierDomain domain, BarrierType type) {
+ Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::dsb(BarrierDomain domain, BarrierType type) {
+ Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::isb() {
+ Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
+}
+
+
+void Assembler::fmov(FPRegister fd, double imm) {
+ if (fd.Is64Bits() && IsImmFP64(imm)) {
+ Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
+ } else if (fd.Is32Bits() && IsImmFP32(imm)) {
+ Emit(FMOV_s_imm | Rd(fd) | ImmFP32(static_cast<float>(imm)));
+ } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
+ Register zr = AppropriateZeroRegFor(fd);
+ fmov(fd, zr);
+ } else {
+ ldr(fd, imm);
+ }
+}
+
+
+void Assembler::fmov(Register rd, FPRegister fn) {
+ ASSERT(rd.SizeInBits() == fn.SizeInBits());
+ FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
+ Emit(op | Rd(rd) | Rn(fn));
+}
+
+
+void Assembler::fmov(FPRegister fd, Register rn) {
+ ASSERT(fd.SizeInBits() == rn.SizeInBits());
+ FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
+ Emit(op | Rd(fd) | Rn(rn));
+}
+
+
+void Assembler::fmov(FPRegister fd, FPRegister fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
+}
+
+
+void Assembler::fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FADD);
+}
+
+
+void Assembler::fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FSUB);
+}
+
+
+void Assembler::fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMUL);
+}
+
+
+void Assembler::fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
+}
+
+
+void Assembler::fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
+}
+
+
+void Assembler::fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
+}
+
+
+void Assembler::fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
+}
+
+
+void Assembler::fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FDIV);
+}
+
+
+void Assembler::fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMAX);
+}
+
+
+void Assembler::fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMAXNM);
+}
+
+
+void Assembler::fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMIN);
+}
+
+
+void Assembler::fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMINNM);
+}
+
+
+void Assembler::fabs(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FABS);
+}
+
+
+void Assembler::fneg(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FNEG);
+}
+
+
+void Assembler::fsqrt(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FSQRT);
+}
+
+
+void Assembler::frinta(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTA);
+}
+
+
+void Assembler::frintn(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTN);
+}
+
+
+void Assembler::frintz(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTZ);
+}
+
+
+void Assembler::fcmp(const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(fn.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
+}
+
+
+void Assembler::fcmp(const FPRegister& fn,
+ double value) {
+ USE(value);
+ // Although the fcmp instruction can strictly only take an immediate value of
+ // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
+ // affect the result of the comparison.
+ ASSERT(value == 0.0);
+ Emit(FPType(fn) | FCMP_zero | Rn(fn));
+}
+
+
+void Assembler::fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(fn.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
+}
+
+
+void Assembler::fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ ASSERT(fd.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPConvertToInt(const Register& rd,
+ const FPRegister& fn,
+ FPIntegerConvertOp op) {
+ Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
+}
+
+
+void Assembler::fcvt(const FPRegister& fd,
+ const FPRegister& fn) {
+ if (fd.Is64Bits()) {
+ // Convert float to double.
+ ASSERT(fn.Is32Bits());
+ FPDataProcessing1Source(fd, fn, FCVT_ds);
+ } else {
+ // Convert double to float.
+ ASSERT(fn.Is64Bits());
+ FPDataProcessing1Source(fd, fn, FCVT_sd);
+ }
+}
+
+
+void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTAU);
+}
+
+
+void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTAS);
+}
+
+
+void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTMU);
+}
+
+
+void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTMS);
+}
+
+
+void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTNU);
+}
+
+
+void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTNS);
+}
+
+
+void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTZU);
+}
+
+
+void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTZS);
+}
+
+
+void Assembler::scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
+ } else {
+ Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(fd));
+ }
+}
+
+
+void Assembler::ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
+ } else {
+ Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(fd));
+ }
+}
+
+
+// Note:
+// Below, a difference in case for the same letter indicates a
+// negated bit.
+// If b is 1, then B is 0.
+Instr Assembler::ImmFP32(float imm) {
+ ASSERT(IsImmFP32(imm));
+ // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = float_to_rawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
+
+ return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
+}
+
+
+Instr Assembler::ImmFP64(double imm) {
+ ASSERT(IsImmFP64(imm));
+ // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = double_to_rawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
+
+ return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
+}
+
+
+// Code generation helpers.
+void Assembler::MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op) {
+ if (shift >= 0) {
+ // Explicit shift specified.
+ ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
+ ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
+ shift /= 16;
+ } else {
+ // Calculate a new immediate and shift combination to encode the immediate
+ // argument.
+ shift = 0;
+ if ((imm & ~0xffffUL) == 0) {
+ // Nothing to do.
+ } else if ((imm & ~(0xffffUL << 16)) == 0) {
+ imm >>= 16;
+ shift = 1;
+ } else if ((imm & ~(0xffffUL << 32)) == 0) {
+ ASSERT(rd.Is64Bits());
+ imm >>= 32;
+ shift = 2;
+ } else if ((imm & ~(0xffffUL << 48)) == 0) {
+ ASSERT(rd.Is64Bits());
+ imm >>= 48;
+ shift = 3;
+ }
+ }
+
+ ASSERT(is_uint16(imm));
+
+ Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
+ Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
+}
+
+
+void Assembler::AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(!operand.NeedsRelocation());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ ASSERT(IsImmAddSub(immediate));
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
+ ImmAddSub(immediate) | dest_reg | RnSP(rn));
+ } else if (operand.IsShiftedRegister()) {
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ ASSERT(operand.shift() != ROR);
+
+ // For instructions of the form:
+ // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
+ // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
+ // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // or their 64-bit register equivalents, convert the operand from shifted to
+ // extended register mode, and emit an add/sub extended instruction.
+ if (rn.IsSP() || rd.IsSP()) {
+ ASSERT(!(rd.IsSP() && (S == SetFlags)));
+ DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
+ AddSubExtendedFixed | op);
+ } else {
+ DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
+ }
+ } else {
+ ASSERT(operand.IsExtendedRegister());
+ DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
+ }
+}
+
+
+void Assembler::AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
+ ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ASSERT(!operand.NeedsRelocation());
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::hlt(int code) {
+ ASSERT(is_uint16(code));
+ Emit(HLT | ImmException(code));
+}
+
+
+void Assembler::brk(int code) {
+ ASSERT(is_uint16(code));
+ Emit(BRK | ImmException(code));
+}
+
+
+void Assembler::debug(const char* message, uint32_t code, Instr params) {
+#ifdef USE_SIMULATOR
+ // Don't generate simulator specific code if we are building a snapshot, which
+ // might be run on real hardware.
+ if (!Serializer::enabled()) {
+#ifdef DEBUG
+ Serializer::TooLateToEnableNow();
+#endif
+ // The arguments to the debug marker need to be contiguous in memory, so
+ // make sure we don't try to emit a literal pool.
+ BlockConstPoolScope scope(this);
+
+ Label start;
+ bind(&start);
+
+ // Refer to instructions-a64.h for a description of the marker and its
+ // arguments.
+ hlt(kImmExceptionIsDebug);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
+ dc32(code);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
+ dc32(params);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
+ EmitStringData(message);
+ hlt(kImmExceptionIsUnreachable);
+
+ return;
+ }
+ // Fall through if Serializer is enabled.
+#endif
+
+ if (params & BREAK) {
+ hlt(kImmExceptionIsDebug);
+ }
+}
+
+
+void Assembler::Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(!operand.NeedsRelocation());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.SizeInBits();
+
+ ASSERT(immediate != 0);
+ ASSERT(immediate != -1);
+ ASSERT(rd.Is64Bits() || is_uint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ } else {
+ ASSERT(operand.IsShiftedRegister());
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
+ DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
+ }
+}
+
+
+void Assembler::LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op) {
+ unsigned reg_size = rd.SizeInBits();
+ Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
+ ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
+ Rn(rn));
+}
+
+
+void Assembler::ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ Instr ccmpop;
+ ASSERT(!operand.NeedsRelocation());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ ASSERT(IsImmConditionalCompare(immediate));
+ ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
+ } else {
+ ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
+ }
+ Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
+}
+
+
+void Assembler::DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Emit(SF(rn) | op | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::FPDataProcessing1Source(const FPRegister& fd,
+ const FPRegister& fn,
+ FPDataProcessing1SourceOp op) {
+ Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPDataProcessing2Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ FPDataProcessing2SourceOp op) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ ASSERT(fd.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPDataProcessing3Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa,
+ FPDataProcessing3SourceOp op) {
+ ASSERT(AreSameSizeAndType(fd, fn, fm, fa));
+ Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
+}
+
+
+void Assembler::EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned shift_amount) {
+ switch (shift) {
+ case LSL:
+ lsl(rd, rn, shift_amount);
+ break;
+ case LSR:
+ lsr(rd, rn, shift_amount);
+ break;
+ case ASR:
+ asr(rd, rn, shift_amount);
+ break;
+ case ROR:
+ ror(rd, rn, shift_amount);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Assembler::EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift) {
+ ASSERT(rd.SizeInBits() >= rn.SizeInBits());
+ unsigned reg_size = rd.SizeInBits();
+ // Use the correct size of register.
+ Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
+ // Bits extracted are high_bit:0.
+ unsigned high_bit = (8 << (extend & 0x3)) - 1;
+ // Number of bits left in the result that are not introduced by the shift.
+ unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
+
+ if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
+ switch (extend) {
+ case UXTB:
+ case UXTH:
+ case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
+ case SXTB:
+ case SXTH:
+ case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
+ case UXTX:
+ case SXTX: {
+ ASSERT(rn.SizeInBits() == kXRegSize);
+ // Nothing to extend. Just shift.
+ lsl(rd, rn_, left_shift);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ } else {
+ // No need to extend as the extended bits would be shifted away.
+ lsl(rd, rn_, left_shift);
+ }
+}
+
+
+void Assembler::DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ ASSERT(operand.IsShiftedRegister());
+ ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
+ ASSERT(!operand.NeedsRelocation());
+ Emit(SF(rd) | op | Flags(S) |
+ ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
+ Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ ASSERT(!operand.NeedsRelocation());
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
+ ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
+ dest_reg | RnSP(rn));
+}
+
+
+bool Assembler::IsImmAddSub(int64_t immediate) {
+ return is_uint12(immediate) ||
+ (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
+}
+
+void Assembler::LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ Instr memop = op | Rt(rt) | RnSP(addr.base());
+ ptrdiff_t offset = addr.offset();
+
+ if (addr.IsImmediateOffset()) {
+ LSDataSize size = CalcLSDataSize(op);
+ if (IsImmLSScaled(offset, size)) {
+ // Use the scaled addressing mode.
+ Emit(LoadStoreUnsignedOffsetFixed | memop |
+ ImmLSUnsigned(offset >> size));
+ } else if (IsImmLSUnscaled(offset)) {
+ // Use the unscaled addressing mode.
+ Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ } else if (addr.IsRegisterOffset()) {
+ Extend ext = addr.extend();
+ Shift shift = addr.shift();
+ unsigned shift_amount = addr.shift_amount();
+
+ // LSL is encoded in the option field as UXTX.
+ if (shift == LSL) {
+ ext = UXTX;
+ }
+
+ // Shifts are encoded in one bit, indicating a left shift by the memory
+ // access size.
+ ASSERT((shift_amount == 0) ||
+ (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
+ Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
+ ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
+ } else {
+ // Pre-index and post-index modes.
+ ASSERT(!rt.Is(addr.base()));
+ if (IsImmLSUnscaled(offset)) {
+ if (addr.IsPreIndex()) {
+ Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
+ } else {
+ ASSERT(addr.IsPostIndex());
+ Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
+ }
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ }
+}
+
+
+bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
+ return is_int9(offset);
+}
+
+
+bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
+ bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+ return offset_is_size_multiple && is_uint12(offset >> size);
+}
+
+
+void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) {
+ ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0);
+ // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
+ // constant pool. It should not be emitted.
+ ASSERT(!rt.Is(xzr));
+ Emit(LDR_x_lit |
+ ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) |
+ Rt(rt));
+}
+
+
+void Assembler::LoadRelocatedValue(const CPURegister& rt,
+ const Operand& operand,
+ LoadLiteralOp op) {
+ int64_t imm = operand.immediate();
+ ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
+ RecordRelocInfo(operand.rmode(), imm);
+ BlockConstPoolFor(1);
+ Emit(op | ImmLLiteral(0) | Rt(rt));
+}
+
+
+// Test if a given value can be encoded in the immediate field of a logical
+// instruction.
+// If it can be encoded, the function returns true, and values pointed to by n,
+// imm_s and imm_r are updated with immediates encoded in the format required
+// by the corresponding fields in the logical instruction.
+// If it can not be encoded, the function returns false, and the values pointed
+// to by n, imm_s and imm_r are undefined.
+bool Assembler::IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r) {
+ ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
+ ASSERT((width == kWRegSize) || (width == kXRegSize));
+
+ // Logical immediates are encoded using parameters n, imm_s and imm_r using
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+ // To test if an arbitary immediate can be encoded using this scheme, an
+ // iterative algorithm is used.
+ //
+ // TODO(mcapewel) This code does not consider using X/W register overlap to
+ // support 64-bit immediates where the top 32-bits are zero, and the bottom
+ // 32-bits are an encodable logical immediate.
+
+ // 1. If the value has all set or all clear bits, it can't be encoded.
+ if ((value == 0) || (value == 0xffffffffffffffffUL) ||
+ ((width == kWRegSize) && (value == 0xffffffff))) {
+ return false;
+ }
+
+ unsigned lead_zero = CountLeadingZeros(value, width);
+ unsigned lead_one = CountLeadingZeros(~value, width);
+ unsigned trail_zero = CountTrailingZeros(value, width);
+ unsigned trail_one = CountTrailingZeros(~value, width);
+ unsigned set_bits = CountSetBits(value, width);
+
+ // The fixed bits in the immediate s field.
+ // If width == 64 (X reg), start at 0xFFFFFF80.
+ // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
+ // widths won't be executed.
+ int imm_s_fixed = (width == kXRegSize) ? -128 : -64;
+ int imm_s_mask = 0x3F;
+
+ for (;;) {
+ // 2. If the value is two bits wide, it can be encoded.
+ if (width == 2) {
+ *n = 0;
+ *imm_s = 0x3C;
+ *imm_r = (value & 3) - 1;
+ return true;
+ }
+
+ *n = (width == 64) ? 1 : 0;
+ *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
+ if ((lead_zero + set_bits) == width) {
+ *imm_r = 0;
+ } else {
+ *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
+ }
+
+ // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
+ // the bit width of the value, it can be encoded.
+ if (lead_zero + trail_zero + set_bits == width) {
+ return true;
+ }
+
+ // 4. If the sum of leading ones, trailing ones and unset bits in the
+ // value is equal to the bit width of the value, it can be encoded.
+ if (lead_one + trail_one + (width - set_bits) == width) {
+ return true;
+ }
+
+ // 5. If the most-significant half of the bitwise value is equal to the
+ // least-significant half, return to step 2 using the least-significant
+ // half of the value.
+ uint64_t mask = (1UL << (width >> 1)) - 1;
+ if ((value & mask) == ((value >> (width >> 1)) & mask)) {
+ width >>= 1;
+ set_bits >>= 1;
+ imm_s_fixed >>= 1;
+ continue;
+ }
+
+ // 6. Otherwise, the value can't be encoded.
+ return false;
+ }
+}
+
+
+bool Assembler::IsImmConditionalCompare(int64_t immediate) {
+ return is_uint5(immediate);
+}
+
+
+bool Assembler::IsImmFP32(float imm) {
+ // Valid values will have the form:
+ // aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = float_to_rawbits(imm);
+ // bits[19..0] are cleared.
+ if ((bits & 0x7ffff) != 0) {
+ return false;
+ }
+
+ // bits[29..25] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 16) & 0x3e00;
+ if (b_pattern != 0 && b_pattern != 0x3e00) {
+ return false;
+ }
+
+ // bit[30] and bit[29] are opposite.
+ if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool Assembler::IsImmFP64(double imm) {
+ // Valid values will have the form:
+ // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = double_to_rawbits(imm);
+ // bits[47..0] are cleared.
+ if ((bits & 0xffffffffffffL) != 0) {
+ return false;
+ }
+
+ // bits[61..54] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 48) & 0x3fc0;
+ if (b_pattern != 0 && b_pattern != 0x3fc0) {
+ return false;
+ }
+
+ // bit[62] and bit[61] are opposite.
+ if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // Compute new buffer size.
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4 * KB) {
+ desc.buffer_size = 4 * KB;
+ } else if (buffer_size_ < 1 * MB) {
+ desc.buffer_size = 2 * buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1 * MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // No overflow.
+
+ byte* buffer = reinterpret_cast<byte*>(buffer_);
+
+ // Set up new buffer.
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
+
+ // Copy the data.
+ intptr_t pc_delta = desc.buffer - buffer;
+ intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
+ (buffer + buffer_size_);
+ memmove(desc.buffer, buffer, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // Switch buffers.
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ = reinterpret_cast<byte*>(pc_) + pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // None of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries.
+
+ // Relocate pending relocation entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION);
+ if (rinfo.rmode() != RelocInfo::JS_RETURN) {
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
+ }
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
+ if (((rmode >= RelocInfo::JS_RETURN) &&
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
+ (rmode == RelocInfo::CONST_POOL)) {
+ // Adjust code for new modes.
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+ || RelocInfo::IsJSReturn(rmode)
+ || RelocInfo::IsComment(rmode)
+ || RelocInfo::IsPosition(rmode)
+ || RelocInfo::IsConstPool(rmode));
+ // These modes do not need an entry in the constant pool.
+ } else {
+ ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
+ if (num_pending_reloc_info_ == 0) {
+ first_const_pool_use_ = pc_offset();
+ }
+ pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
+ }
+
+ if (!RelocInfo::IsNone(rmode)) {
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !emit_debug_code()) {
+ return;
+ }
+ }
+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(
+ reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
+ ClearRecordedAstId();
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
+ } else {
+ reloc_info_writer.Write(&rinfo);
+ }
+ }
+}
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+ int pc_limit = pc_offset() + instructions * kInstructionSize;
+ if (no_const_pool_before_ < pc_limit) {
+ // If there are some pending entries, the constant pool cannot be blocked
+ // further than first_const_pool_use_ + kMaxDistToPool
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
+ no_const_pool_before_ = pc_limit;
+ }
+
+ if (next_buffer_check_ < no_const_pool_before_) {
+ next_buffer_check_ = no_const_pool_before_;
+ }
+}
+
+
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Some short sequence of instruction mustn't be broken up by constant pool
+ // emission, such sequences are protected by calls to BlockConstPoolFor and
+ // BlockConstPoolScope.
+ if (is_const_pool_blocked()) {
+ // Something is wrong if emission is forced and blocked at the same time.
+ ASSERT(!force_emit);
+ return;
+ }
+
+ // There is nothing to do if there are no pending constant pool entries.
+ if (num_pending_reloc_info_ == 0) {
+ // Calculate the offset of the next check.
+ next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ return;
+ }
+
+ // We emit a constant pool when:
+ // * requested to do so by parameter force_emit (e.g. after each function).
+ // * the distance to the first instruction accessing the constant pool is
+ // kAvgDistToPool or more.
+ // * no jump is required and the distance to the first instruction accessing
+ // the constant pool is at least kMaxDistToPool / 2.
+ ASSERT(first_const_pool_use_ >= 0);
+ int dist = pc_offset() - first_const_pool_use_;
+ if (!force_emit && dist < kAvgDistToPool &&
+ (require_jump || (dist < (kMaxDistToPool / 2)))) {
+ return;
+ }
+
+ Label size_check;
+ bind(&size_check);
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (include the jump over the pool, the constant pool marker, the
+ // constant pool guard, and the gap to the relocation information).
+ int jump_instr = require_jump ? kInstructionSize : 0;
+ int size_pool_marker = kInstructionSize;
+ int size_pool_guard = kInstructionSize;
+ int pool_size = jump_instr + size_pool_marker + size_pool_guard +
+ num_pending_reloc_info_ * kPointerSize;
+ int needed_space = pool_size + kGap;
+ while (buffer_space() <= needed_space) {
+ GrowBuffer();
+ }
+
+ {
+ // Block recursive calls to CheckConstPool.
+ BlockConstPoolScope block_const_pool(this);
+ RecordComment("[ Constant Pool");
+ RecordConstPool(pool_size);
+
+ // Emit jump over constant pool if necessary.
+ Label after_pool;
+ if (require_jump) {
+ b(&after_pool);
+ }
+
+ // Emit a constant pool header. The header has two goals:
+ // 1) Encode the size of the constant pool, for use by the disassembler.
+ // 2) Terminate the program, to try to prevent execution from accidentally
+ // flowing into the constant pool.
+ // The header is therefore made of two a64 instructions:
+ // ldr xzr, #<size of the constant pool in 32-bit words>
+ // blr xzr
+ // If executed the code will likely segfault and lr will point to the
+ // beginning of the constant pool.
+ // TODO(all): currently each relocated constant is 64 bits, consider adding
+ // support for 32-bit entries.
+ ConstantPoolMarker(2 * num_pending_reloc_info_);
+ ConstantPoolGuard();
+
+ // Emit constant pool entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION &&
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
+ rinfo.rmode() != RelocInfo::CONST_POOL);
+
+ Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc());
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ ASSERT(instr->IsLdrLiteral() &&
+ instr->ImmLLiteral() == 0);
+
+ instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+ dc64(rinfo.data());
+ }
+
+ num_pending_reloc_info_ = 0;
+ first_const_pool_use_ = -1;
+
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
+ }
+
+ // Since a constant pool was just emitted, move the check offset forward by
+ // the standard interval.
+ next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+
+ ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
+ static_cast<unsigned>(pool_size));
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_code_comments) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+int Assembler::buffer_space() const {
+ return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
+}
+
+
+void Assembler::RecordJSReturn() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
+void Assembler::RecordConstPool(int size) {
+ // We only need this for debugger support, to correctly compute offsets in the
+ // code.
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
+#endif
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/assembler-a64.h b/deps/v8/src/a64/assembler-a64.h
new file mode 100644
index 0000000000..a2c93df2ae
--- /dev/null
+++ b/deps/v8/src/a64/assembler-a64.h
@@ -0,0 +1,2085 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_ASSEMBLER_A64_H_
+#define V8_A64_ASSEMBLER_A64_H_
+
+#include <list>
+#include <map>
+
+#include "globals.h"
+#include "utils.h"
+#include "assembler.h"
+#include "serialize.h"
+#include "a64/instructions-a64.h"
+#include "a64/cpu-a64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Registers.
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+
+static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
+
+
+// Some CPURegister methods can return Register and FPRegister types, so we
+// need to declare them in advance.
+struct Register;
+struct FPRegister;
+
+
+struct CPURegister {
+ enum RegisterType {
+ // The kInvalid value is used to detect uninitialized static instances,
+ // which are always zero-initialized before any constructors are called.
+ kInvalid = 0,
+ kRegister,
+ kFPRegister,
+ kNoRegister
+ };
+
+ static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
+ CPURegister r = {code, size, type};
+ return r;
+ }
+
+ unsigned code() const;
+ RegisterType type() const;
+ RegList Bit() const;
+ unsigned SizeInBits() const;
+ int SizeInBytes() const;
+ bool Is32Bits() const;
+ bool Is64Bits() const;
+ bool IsValid() const;
+ bool IsValidOrNone() const;
+ bool IsValidRegister() const;
+ bool IsValidFPRegister() const;
+ bool IsNone() const;
+ bool Is(const CPURegister& other) const;
+
+ bool IsZero() const;
+ bool IsSP() const;
+
+ bool IsRegister() const;
+ bool IsFPRegister() const;
+
+ Register X() const;
+ Register W() const;
+ FPRegister D() const;
+ FPRegister S() const;
+
+ bool IsSameSizeAndType(const CPURegister& other) const;
+
+ // V8 compatibility.
+ bool is(const CPURegister& other) const { return Is(other); }
+ bool is_valid() const { return IsValid(); }
+
+ unsigned reg_code;
+ unsigned reg_size;
+ RegisterType reg_type;
+};
+
+
+struct Register : public CPURegister {
+ static Register Create(unsigned code, unsigned size) {
+ return CPURegister::Create(code, size, CPURegister::kRegister);
+ }
+
+ Register() {
+ reg_code = 0;
+ reg_size = 0;
+ reg_type = CPURegister::kNoRegister;
+ }
+
+ Register(const CPURegister& r) { // NOLINT(runtime/explicit)
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ bool IsValid() const {
+ ASSERT(IsRegister() || IsNone());
+ return IsValidRegister();
+ }
+
+ static Register XRegFromCode(unsigned code);
+ static Register WRegFromCode(unsigned code);
+
+ // Start of V8 compatibility section ---------------------
+ // These memebers are necessary for compilation.
+ // A few of them may be unused for now.
+
+ static const int kNumRegisters = kNumberOfRegisters;
+ static int NumRegisters() { return kNumRegisters; }
+
+ // We allow crankshaft to use the following registers:
+ // - x0 to x15
+ // - x18 to x24
+ // - x27 (also context)
+ //
+ // TODO(all): Register x25 is currently free and could be available for
+ // crankshaft, but we don't use it as we might use it as a per function
+ // literal pool pointer in the future.
+ //
+ // TODO(all): Consider storing cp in x25 to have only two ranges.
+ // We split allocatable registers in three ranges called
+ // - "low range"
+ // - "high range"
+ // - "context"
+ static const unsigned kAllocatableLowRangeBegin = 0;
+ static const unsigned kAllocatableLowRangeEnd = 15;
+ static const unsigned kAllocatableHighRangeBegin = 18;
+ static const unsigned kAllocatableHighRangeEnd = 24;
+ static const unsigned kAllocatableContext = 27;
+
+ // Gap between low and high ranges.
+ static const int kAllocatableRangeGapSize =
+ (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
+
+ static const int kMaxNumAllocatableRegisters =
+ (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
+ (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
+ static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+
+ // Return true if the register is one that crankshaft can allocate.
+ bool IsAllocatable() const {
+ return ((reg_code == kAllocatableContext) ||
+ (reg_code <= kAllocatableLowRangeEnd) ||
+ ((reg_code >= kAllocatableHighRangeBegin) &&
+ (reg_code <= kAllocatableHighRangeEnd)));
+ }
+
+ static Register FromAllocationIndex(unsigned index) {
+ ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+ // cp is the last allocatable register.
+ if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
+ return from_code(kAllocatableContext);
+ }
+
+ // Handle low and high ranges.
+ return (index <= kAllocatableLowRangeEnd)
+ ? from_code(index)
+ : from_code(index + kAllocatableRangeGapSize);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
+ ASSERT((kAllocatableLowRangeBegin == 0) &&
+ (kAllocatableLowRangeEnd == 15) &&
+ (kAllocatableHighRangeBegin == 18) &&
+ (kAllocatableHighRangeEnd == 24) &&
+ (kAllocatableContext == 27));
+ const char* const names[] = {
+ "x0", "x1", "x2", "x3", "x4",
+ "x5", "x6", "x7", "x8", "x9",
+ "x10", "x11", "x12", "x13", "x14",
+ "x15", "x18", "x19", "x20", "x21",
+ "x22", "x23", "x24", "x27",
+ };
+ return names[index];
+ }
+
+ static int ToAllocationIndex(Register reg) {
+ ASSERT(reg.IsAllocatable());
+ unsigned code = reg.code();
+ if (code == kAllocatableContext) {
+ return NumAllocatableRegisters() - 1;
+ }
+
+ return (code <= kAllocatableLowRangeEnd)
+ ? code
+ : code - kAllocatableRangeGapSize;
+ }
+
+ static Register from_code(int code) {
+ // Always return an X register.
+ return Register::Create(code, kXRegSize);
+ }
+
+ // End of V8 compatibility section -----------------------
+};
+
+
+struct FPRegister : public CPURegister {
+ static FPRegister Create(unsigned code, unsigned size) {
+ return CPURegister::Create(code, size, CPURegister::kFPRegister);
+ }
+
+ FPRegister() {
+ reg_code = 0;
+ reg_size = 0;
+ reg_type = CPURegister::kNoRegister;
+ }
+
+ FPRegister(const CPURegister& r) { // NOLINT(runtime/explicit)
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ bool IsValid() const {
+ ASSERT(IsFPRegister() || IsNone());
+ return IsValidFPRegister();
+ }
+
+ static FPRegister SRegFromCode(unsigned code);
+ static FPRegister DRegFromCode(unsigned code);
+
+ // Start of V8 compatibility section ---------------------
+ static const int kMaxNumRegisters = kNumberOfFPRegisters;
+
+ // Crankshaft can use all the FP registers except:
+ // - d29 which is used in crankshaft as a double scratch register
+ // - d30 which is used to keep the 0 double value
+ // - d31 which is used in the MacroAssembler as a double scratch register
+ static const int kNumReservedRegisters = 3;
+ static const int kMaxNumAllocatableRegisters =
+ kNumberOfFPRegisters - kNumReservedRegisters;
+ static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+ static const RegList kAllocatableFPRegisters =
+ (1 << kMaxNumAllocatableRegisters) - 1;
+
+ static FPRegister FromAllocationIndex(int index) {
+ ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
+ return from_code(index);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
+ const char* const names[] = {
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28",
+ };
+ return names[index];
+ }
+
+ static int ToAllocationIndex(FPRegister reg) {
+ int code = reg.code();
+ ASSERT(code < NumAllocatableRegisters());
+ return code;
+ }
+
+ static FPRegister from_code(int code) {
+ // Always return a D register.
+ return FPRegister::Create(code, kDRegSize);
+ }
+ // End of V8 compatibility section -----------------------
+};
+
+
+STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
+STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
+
+
+#if defined(A64_DEFINE_REG_STATICS)
+#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
+ const CPURegister init_##register_class##_##name = {code, size, type}; \
+ const register_class& name = *reinterpret_cast<const register_class*>( \
+ &init_##register_class##_##name)
+#define ALIAS_REGISTER(register_class, alias, name) \
+ const register_class& alias = *reinterpret_cast<const register_class*>( \
+ &init_##register_class##_##name)
+#else
+#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
+ extern const register_class& name
+#define ALIAS_REGISTER(register_class, alias, name) \
+ extern const register_class& alias
+#endif // defined(A64_DEFINE_REG_STATICS)
+
+// No*Reg is used to indicate an unused argument, or an error case. Note that
+// these all compare equal (using the Is() method). The Register and FPRegister
+// variants are provided for convenience.
+INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
+INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
+INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
+
+// v8 compatibility.
+INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
+
+#define DEFINE_REGISTERS(N) \
+ INITIALIZE_REGISTER(Register, w##N, N, kWRegSize, CPURegister::kRegister); \
+ INITIALIZE_REGISTER(Register, x##N, N, kXRegSize, CPURegister::kRegister);
+REGISTER_CODE_LIST(DEFINE_REGISTERS)
+#undef DEFINE_REGISTERS
+
+INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSize,
+ CPURegister::kRegister);
+INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSize,
+ CPURegister::kRegister);
+
+#define DEFINE_FPREGISTERS(N) \
+ INITIALIZE_REGISTER(FPRegister, s##N, N, kSRegSize, \
+ CPURegister::kFPRegister); \
+ INITIALIZE_REGISTER(FPRegister, d##N, N, kDRegSize, CPURegister::kFPRegister);
+REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
+#undef DEFINE_FPREGISTERS
+
+#undef INITIALIZE_REGISTER
+
+// Registers aliases.
+ALIAS_REGISTER(Register, ip0, x16);
+ALIAS_REGISTER(Register, ip1, x17);
+ALIAS_REGISTER(Register, wip0, w16);
+ALIAS_REGISTER(Register, wip1, w17);
+// Root register.
+ALIAS_REGISTER(Register, root, x26);
+ALIAS_REGISTER(Register, rr, x26);
+// Context pointer register.
+ALIAS_REGISTER(Register, cp, x27);
+// We use a register as a JS stack pointer to overcome the restriction on the
+// architectural SP alignment.
+// We chose x28 because it is contiguous with the other specific purpose
+// registers.
+STATIC_ASSERT(kJSSPCode == 28);
+ALIAS_REGISTER(Register, jssp, x28);
+ALIAS_REGISTER(Register, wjssp, w28);
+ALIAS_REGISTER(Register, fp, x29);
+ALIAS_REGISTER(Register, lr, x30);
+ALIAS_REGISTER(Register, xzr, x31);
+ALIAS_REGISTER(Register, wzr, w31);
+
+// Crankshaft double scratch register.
+ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
+// Keeps the 0 double value.
+ALIAS_REGISTER(FPRegister, fp_zero, d30);
+// MacroAssembler double scratch register.
+ALIAS_REGISTER(FPRegister, fp_scratch, d31);
+
+#undef ALIAS_REGISTER
+
+
+Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = NoReg,
+ Register reg3 = NoReg,
+ Register reg4 = NoReg);
+
+
+// AreAliased returns true if any of the named registers overlap. Arguments set
+// to NoReg are ignored. The system stack pointer may be specified.
+bool AreAliased(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg,
+ const CPURegister& reg4 = NoReg,
+ const CPURegister& reg5 = NoReg,
+ const CPURegister& reg6 = NoReg,
+ const CPURegister& reg7 = NoReg,
+ const CPURegister& reg8 = NoReg);
+
+// AreSameSizeAndType returns true if all of the specified registers have the
+// same size, and are of the same type. The system stack pointer may be
+// specified. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
+bool AreSameSizeAndType(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg,
+ const CPURegister& reg5 = NoCPUReg,
+ const CPURegister& reg6 = NoCPUReg,
+ const CPURegister& reg7 = NoCPUReg,
+ const CPURegister& reg8 = NoCPUReg);
+
+
+typedef FPRegister DoubleRegister;
+
+
+// -----------------------------------------------------------------------------
+// Lists of registers.
+class CPURegList {
+ public:
+ explicit CPURegList(CPURegister reg1,
+ CPURegister reg2 = NoCPUReg,
+ CPURegister reg3 = NoCPUReg,
+ CPURegister reg4 = NoCPUReg)
+ : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
+ size_(reg1.SizeInBits()), type_(reg1.type()) {
+ ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+ ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+ : list_(list), size_(size), type_(type) {
+ ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size,
+ unsigned first_reg, unsigned last_reg)
+ : size_(size), type_(type) {
+ ASSERT(((type == CPURegister::kRegister) &&
+ (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kFPRegister) &&
+ (last_reg < kNumberOfFPRegisters)));
+ ASSERT(last_reg >= first_reg);
+ list_ = (1UL << (last_reg + 1)) - 1;
+ list_ &= ~((1UL << first_reg) - 1);
+ ASSERT(IsValid());
+ }
+
+ CPURegister::RegisterType type() const {
+ ASSERT(IsValid());
+ return type_;
+ }
+
+ RegList list() const {
+ ASSERT(IsValid());
+ return list_;
+ }
+
+ // Combine another CPURegList into this one. Registers that already exist in
+ // this list are left unchanged. The type and size of the registers in the
+ // 'other' list must match those in this list.
+ void Combine(const CPURegList& other);
+
+ // Remove every register in the other CPURegList from this one. Registers that
+ // do not exist in this list are ignored. The type and size of the registers
+ // in the 'other' list must match those in this list.
+ void Remove(const CPURegList& other);
+
+ // Variants of Combine and Remove which take a single register.
+ void Combine(const CPURegister& other);
+ void Remove(const CPURegister& other);
+
+ // Variants of Combine and Remove which take a single register by its code;
+ // the type and size of the register is inferred from this list.
+ void Combine(int code);
+ void Remove(int code);
+
+ // Remove all callee-saved registers from the list. This can be useful when
+ // preparing registers for an AAPCS64 function call, for example.
+ void RemoveCalleeSaved();
+
+ CPURegister PopLowestIndex();
+ CPURegister PopHighestIndex();
+
+ // AAPCS64 callee-saved registers.
+ static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
+ static CPURegList GetCalleeSavedFP(unsigned size = kDRegSize);
+
+ // AAPCS64 caller-saved registers. Note that this includes lr.
+ static CPURegList GetCallerSaved(unsigned size = kXRegSize);
+ static CPURegList GetCallerSavedFP(unsigned size = kDRegSize);
+
+ // Registers saved as safepoints.
+ static CPURegList GetSafepointSavedRegisters();
+
+ bool IsEmpty() const {
+ ASSERT(IsValid());
+ return list_ == 0;
+ }
+
+ bool IncludesAliasOf(const CPURegister& other) const {
+ ASSERT(IsValid());
+ return (type_ == other.type()) && (other.Bit() & list_);
+ }
+
+ int Count() const {
+ ASSERT(IsValid());
+ return CountSetBits(list_, kRegListSizeInBits);
+ }
+
+ unsigned RegisterSizeInBits() const {
+ ASSERT(IsValid());
+ return size_;
+ }
+
+ unsigned RegisterSizeInBytes() const {
+ int size_in_bits = RegisterSizeInBits();
+ ASSERT((size_in_bits % kBitsPerByte) == 0);
+ return size_in_bits / kBitsPerByte;
+ }
+
+ private:
+ RegList list_;
+ unsigned size_;
+ CPURegister::RegisterType type_;
+
+ bool IsValid() const {
+ if ((type_ == CPURegister::kRegister) ||
+ (type_ == CPURegister::kFPRegister)) {
+ bool is_valid = true;
+ // Try to create a CPURegister for each element in the list.
+ for (int i = 0; i < kRegListSizeInBits; i++) {
+ if (((list_ >> i) & 1) != 0) {
+ is_valid &= CPURegister::Create(i, size_, type_).IsValid();
+ }
+ }
+ return is_valid;
+ } else if (type_ == CPURegister::kNoRegister) {
+ // The kNoRegister type is valid only for empty lists.
+ // We can't use IsEmpty here because that asserts IsValid().
+ return list_ == 0;
+ } else {
+ return false;
+ }
+ }
+};
+
+
+// AAPCS64 callee-saved registers.
+#define kCalleeSaved CPURegList::GetCalleeSaved()
+#define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
+
+
+// AAPCS64 caller-saved registers. Note that this includes lr.
+#define kCallerSaved CPURegList::GetCallerSaved()
+#define kCallerSavedFP CPURegList::GetCallerSavedFP()
+
+
+// -----------------------------------------------------------------------------
+// Operands.
+const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+
+// Represents an operand in a machine instruction.
+class Operand {
+ // TODO(all): If necessary, study more in details which methods
+ // TODO(all): should be inlined or not.
+ public:
+ // rm, {<shift> {#<shift_amount>}}
+ // where <shift> is one of {LSL, LSR, ASR, ROR}.
+ // <shift_amount> is uint6_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ inline Operand(Register reg,
+ Shift shift = LSL,
+ unsigned shift_amount = 0); // NOLINT(runtime/explicit)
+
+ // rm, <extend> {#<shift_amount>}
+ // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
+ // <shift_amount> is uint2_t.
+ inline Operand(Register reg,
+ Extend extend,
+ unsigned shift_amount = 0);
+
+ template<typename T>
+ inline explicit Operand(Handle<T> handle);
+
+ // Implicit constructor for all int types, ExternalReference, and Smi.
+ template<typename T>
+ inline Operand(T t); // NOLINT(runtime/explicit)
+
+ // Implicit constructor for int types.
+ template<typename int_t>
+ inline Operand(int_t t, RelocInfo::Mode rmode);
+
+ inline bool IsImmediate() const;
+ inline bool IsShiftedRegister() const;
+ inline bool IsExtendedRegister() const;
+ inline bool IsZero() const;
+
+ // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
+ // which helps in the encoding of instructions that use the stack pointer.
+ inline Operand ToExtendedRegister() const;
+
+ inline int64_t immediate() const;
+ inline Register reg() const;
+ inline Shift shift() const;
+ inline Extend extend() const;
+ inline unsigned shift_amount() const;
+
+ // Relocation information.
+ RelocInfo::Mode rmode() const { return rmode_; }
+ void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
+ bool NeedsRelocation() const;
+
+ // Helpers
+ inline static Operand UntagSmi(Register smi);
+ inline static Operand UntagSmiAndScale(Register smi, int scale);
+
+ private:
+ void initialize_handle(Handle<Object> value);
+ int64_t immediate_;
+ Register reg_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+ RelocInfo::Mode rmode_;
+};
+
+
+// MemOperand represents a memory operand in a load or store instruction.
+class MemOperand {
+ public:
+ inline explicit MemOperand(Register base,
+ ptrdiff_t offset = 0,
+ AddrMode addrmode = Offset);
+ inline explicit MemOperand(Register base,
+ Register regoffset,
+ Shift shift = LSL,
+ unsigned shift_amount = 0);
+ inline explicit MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount = 0);
+ inline explicit MemOperand(Register base,
+ const Operand& offset,
+ AddrMode addrmode = Offset);
+
+ const Register& base() const { return base_; }
+ const Register& regoffset() const { return regoffset_; }
+ ptrdiff_t offset() const { return offset_; }
+ AddrMode addrmode() const { return addrmode_; }
+ Shift shift() const { return shift_; }
+ Extend extend() const { return extend_; }
+ unsigned shift_amount() const { return shift_amount_; }
+ inline bool IsImmediateOffset() const;
+ inline bool IsRegisterOffset() const;
+ inline bool IsPreIndex() const;
+ inline bool IsPostIndex() const;
+
+ // For offset modes, return the offset as an Operand. This helper cannot
+ // handle indexed modes.
+ inline Operand OffsetAsOperand() const;
+
+ private:
+ Register base_;
+ Register regoffset_;
+ ptrdiff_t offset_;
+ AddrMode addrmode_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Assembler.
+
+class Assembler : public AssemblerBase {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
+
+ virtual ~Assembler();
+
+ // System functions ---------------------------------------------------------
+ // Start generating code from the beginning of the buffer, discarding any code
+ // and data that has already been emitted into the buffer.
+ //
+ // In order to avoid any accidental transfer of state, Reset ASSERTs that the
+ // constant pool is not blocked.
+ void Reset();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ //
+ // The descriptor (desc) can be NULL. In that case, the code is finalized as
+ // usual, but the descriptor is not populated.
+ void GetCode(CodeDesc* desc);
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+
+ inline void Unreachable();
+
+ // Label --------------------------------------------------------------------
+ // Bind a label to the current pc. Note that labels can only be bound once,
+ // and if labels are linked to other instructions, they _must_ be bound
+ // before they go out of scope.
+ void bind(Label* label);
+
+
+ // RelocInfo and constant pool ----------------------------------------------
+
+ // Record relocation information for current pc_.
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc.
+ inline static Address target_pointer_address_at(Address pc);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ inline static Address target_address_at(Address pc);
+ inline static void set_target_address_at(Address pc, Address target);
+
+ // Return the code target address at a call site from the return address of
+ // that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
+ // Given the address of the beginning of a call, return the address in the
+ // instruction stream that call will return from.
+ inline static Address return_address_from_call_start(Address pc);
+
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches within generated code.
+ inline static void deserialization_set_special_target_at(
+ Address constant_pool_entry, Address target);
+
+ // All addresses in the constant pool are the same size as pointers.
+ static const int kSpecialTargetSize = kPointerSize;
+
+ // The sizes of the call sequences emitted by MacroAssembler::Call.
+ // Wherever possible, use MacroAssembler::CallSize instead of these constants,
+ // as it will choose the correct value for a given relocation mode.
+ //
+ // Without relocation:
+ // movz ip0, #(target & 0x000000000000ffff)
+ // movk ip0, #(target & 0x00000000ffff0000)
+ // movk ip0, #(target & 0x0000ffff00000000)
+ // movk ip0, #(target & 0xffff000000000000)
+ // blr ip0
+ //
+ // With relocation:
+ // ldr ip0, =target
+ // blr ip0
+ static const int kCallSizeWithoutRelocation = 5 * kInstructionSize;
+ static const int kCallSizeWithRelocation = 2 * kInstructionSize;
+
+ // Size of the generated code in bytes
+ uint64_t SizeOfGeneratedCode() const {
+ ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
+ return pc_ - buffer_;
+ }
+
+ // Return the code size generated from label to the current position.
+ uint64_t SizeOfCodeGeneratedSince(const Label* label) {
+ ASSERT(label->is_bound());
+ ASSERT(pc_offset() >= label->pos());
+ ASSERT(pc_offset() < buffer_size_);
+ return pc_offset() - label->pos();
+ }
+
+ // Check the size of the code generated since the given label. This function
+ // is used primarily to work around comparisons between signed and unsigned
+ // quantities, since V8 uses both.
+ // TODO(jbramley): Work out what sign to use for these things and if possible,
+ // change things to be consistent.
+ void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
+ ASSERT(size >= 0);
+ ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
+ }
+
+ // Return the number of instructions generated from label to the
+ // current position.
+ int InstructionsGeneratedSince(const Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstructionSize;
+ }
+
+ // TODO(all): Initialize these constants related with code patching.
+ // TODO(all): Set to -1 to hopefully crash if mistakenly used.
+
+ // Number of instructions generated for the return sequence in
+ // FullCodeGenerator::EmitReturnSequence.
+ static const int kJSRetSequenceInstructions = 7;
+ // Distance between start of patched return sequence and the emitted address
+ // to jump to.
+ static const int kPatchReturnSequenceAddressOffset = 0;
+ static const int kPatchDebugBreakSlotAddressOffset = 0;
+
+ // Number of instructions necessary to be able to later patch it to a call.
+ // See Debug::GenerateSlot() and BreakLocationIterator::SetDebugBreakAtSlot().
+ static const int kDebugBreakSlotInstructions = 4;
+ static const int kDebugBreakSlotLength =
+ kDebugBreakSlotInstructions * kInstructionSize;
+
+ static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
+
+ // Prevent contant pool emission until EndBlockConstPool is called.
+ // Call to this function can be nested but must be followed by an equal
+ // number of call to EndBlockConstpool.
+ void StartBlockConstPool();
+
+ // Resume constant pool emission. Need to be called as many time as
+ // StartBlockConstPool to have an effect.
+ void EndBlockConstPool();
+
+ bool is_const_pool_blocked() const;
+ static bool IsConstantPoolAt(Instruction* instr);
+ static int ConstantPoolSizeAt(Instruction* instr);
+ // See Assembler::CheckConstPool for more info.
+ void ConstantPoolMarker(uint32_t size);
+ void ConstantPoolGuard();
+
+
+ // Debugging ----------------------------------------------------------------
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ void RecordComment(const char* msg);
+ int buffer_space() const;
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
+ // Record the emission of a constant pool.
+ //
+ // The emission of constant pool depends on the size of the code generated and
+ // the number of RelocInfo recorded.
+ // The Debug mechanism needs to map code offsets between two versions of a
+ // function, compiled with and without debugger support (see for example
+ // Debug::PrepareForBreakPoints()).
+ // Compiling functions with debugger support generates additional code
+ // (Debug::GenerateSlot()). This may affect the emission of the constant
+ // pools and cause the version of the code with debugger support to have
+ // constant pools generated in different places.
+ // Recording the position and size of emitted constant pools allows to
+ // correctly compute the offset mappings between the different versions of a
+ // function in all situations.
+ //
+ // The parameter indicates the size of the constant pool (in bytes), including
+ // the marker and branch over the data.
+ void RecordConstPool(int size);
+
+
+ // Instruction set functions ------------------------------------------------
+
+ // Branch / Jump instructions.
+ // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
+ // Branch to register.
+ void br(const Register& xn);
+
+ // Branch-link to register.
+ void blr(const Register& xn);
+
+ // Branch to register with return hint.
+ void ret(const Register& xn = lr);
+
+ // Unconditional branch to label.
+ void b(Label* label);
+
+ // Conditional branch to label.
+ void b(Label* label, Condition cond);
+
+ // Unconditional branch to PC offset.
+ void b(int imm26);
+
+ // Conditional branch to PC offset.
+ void b(int imm19, Condition cond);
+
+ // Branch-link to label / pc offset.
+ void bl(Label* label);
+ void bl(int imm26);
+
+ // Compare and branch to label / pc offset if zero.
+ void cbz(const Register& rt, Label* label);
+ void cbz(const Register& rt, int imm19);
+
+ // Compare and branch to label / pc offset if not zero.
+ void cbnz(const Register& rt, Label* label);
+ void cbnz(const Register& rt, int imm19);
+
+ // Test bit and branch to label / pc offset if zero.
+ void tbz(const Register& rt, unsigned bit_pos, Label* label);
+ void tbz(const Register& rt, unsigned bit_pos, int imm14);
+
+ // Test bit and branch to label / pc offset if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void tbnz(const Register& rt, unsigned bit_pos, int imm14);
+
+ // Address calculation instructions.
+ // Calculate a PC-relative address. Unlike for branches the offset in adr is
+ // unscaled (i.e. the result can be unaligned).
+ void adr(const Register& rd, Label* label);
+ void adr(const Register& rd, int imm21);
+
+ // Data Processing instructions.
+ // Add.
+ void add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add and update status flags.
+ void adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare negative.
+ void cmn(const Register& rn, const Operand& operand);
+
+ // Subtract.
+ void sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract and update status flags.
+ void subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare.
+ void cmp(const Register& rn, const Operand& operand);
+
+ // Negate.
+ void neg(const Register& rd,
+ const Operand& operand);
+
+ // Negate and update status flags.
+ void negs(const Register& rd,
+ const Operand& operand);
+
+ // Add with carry bit.
+ void adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add with carry bit and update status flags.
+ void adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit.
+ void sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit and update status flags.
+ void sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Negate with carry bit.
+ void ngc(const Register& rd,
+ const Operand& operand);
+
+ // Negate with carry bit and update status flags.
+ void ngcs(const Register& rd,
+ const Operand& operand);
+
+ // Logical instructions.
+ // Bitwise and (A & B).
+ void and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise and (A & B) and update status flags.
+ void ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit test, and set flags.
+ void tst(const Register& rn, const Operand& operand);
+
+ // Bit clear (A & ~B).
+ void bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit clear (A & ~B) and update status flags.
+ void bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise or (A | B).
+ void orr(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise nor (A | ~B).
+ void orn(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise eor/xor (A ^ B).
+ void eor(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise enor/xnor (A ^ ~B).
+ void eon(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Logical shift left variable.
+ void lslv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Logical shift right variable.
+ void lsrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Arithmetic shift right variable.
+ void asrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Rotate right variable.
+ void rorv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bitfield instructions.
+ // Bitfield move.
+ void bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Signed bitfield move.
+ void sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Unsigned bitfield move.
+ void ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Bfm aliases.
+ // Bitfield insert.
+ void bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Bitfield extract and insert low.
+ void bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ bfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Sbfm aliases.
+ // Arithmetic shift right.
+ void asr(const Register& rd, const Register& rn, unsigned shift) {
+ ASSERT(shift < rd.SizeInBits());
+ sbfm(rd, rn, shift, rd.SizeInBits() - 1);
+ }
+
+ // Signed bitfield insert in zero.
+ void sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Signed bitfield extract.
+ void sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ sbfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Signed extend byte.
+ void sxtb(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 7);
+ }
+
+ // Signed extend halfword.
+ void sxth(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 15);
+ }
+
+ // Signed extend word.
+ void sxtw(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 31);
+ }
+
+ // Ubfm aliases.
+ // Logical shift left.
+ void lsl(const Register& rd, const Register& rn, unsigned shift) {
+ unsigned reg_size = rd.SizeInBits();
+ ASSERT(shift < reg_size);
+ ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
+ }
+
+ // Logical shift right.
+ void lsr(const Register& rd, const Register& rn, unsigned shift) {
+ ASSERT(shift < rd.SizeInBits());
+ ubfm(rd, rn, shift, rd.SizeInBits() - 1);
+ }
+
+ // Unsigned bitfield insert in zero.
+ void ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Unsigned bitfield extract.
+ void ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ ubfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Unsigned extend byte.
+ void uxtb(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 7);
+ }
+
+ // Unsigned extend halfword.
+ void uxth(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 15);
+ }
+
+ // Unsigned extend word.
+ void uxtw(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 31);
+ }
+
+ // Extract.
+ void extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+
+ // Conditional select: rd = cond ? rn : rm.
+ void csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select increment: rd = cond ? rn : rm + 1.
+ void csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select inversion: rd = cond ? rn : ~rm.
+ void csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select negation: rd = cond ? rn : -rm.
+ void csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional set: rd = cond ? 1 : 0.
+ void cset(const Register& rd, Condition cond);
+
+ // Conditional set minus: rd = cond ? -1 : 0.
+ void csetm(const Register& rd, Condition cond);
+
+ // Conditional increment: rd = cond ? rn + 1 : rn.
+ void cinc(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional invert: rd = cond ? ~rn : rn.
+ void cinv(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional negate: rd = cond ? -rn : rn.
+ void cneg(const Register& rd, const Register& rn, Condition cond);
+
+ // Extr aliases.
+ void ror(const Register& rd, const Register& rs, unsigned shift) {
+ extr(rd, rs, rs, shift);
+ }
+
+ // Conditional comparison.
+ // Conditional compare negative.
+ void ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Conditional compare.
+ void ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Multiplication.
+ // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
+ void mul(const Register& rd, const Register& rn, const Register& rm);
+
+ // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
+ void madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
+ void mneg(const Register& rd, const Register& rn, const Register& rm);
+
+ // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
+ void msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // 32 x 32 -> 64-bit multiply.
+ void smull(const Register& rd, const Register& rn, const Register& rm);
+
+ // Xd = bits<127:64> of Xn * Xm.
+ void smulh(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed 32 x 32 -> 64-bit multiply and accumulate.
+ void smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
+ void umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed 32 x 32 -> 64-bit multiply and subtract.
+ void smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned 32 x 32 -> 64-bit multiply and subtract.
+ void umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed integer divide.
+ void sdiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Unsigned integer divide.
+ void udiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bit count, bit reverse and endian reverse.
+ void rbit(const Register& rd, const Register& rn);
+ void rev16(const Register& rd, const Register& rn);
+ void rev32(const Register& rd, const Register& rn);
+ void rev(const Register& rd, const Register& rn);
+ void clz(const Register& rd, const Register& rn);
+ void cls(const Register& rd, const Register& rn);
+
+ // Memory instructions.
+
+ // Load literal from pc + offset_from_pc.
+ void LoadLiteral(const CPURegister& rt, int offset_from_pc);
+
+ // Load integer or FP register.
+ void ldr(const CPURegister& rt, const MemOperand& src);
+
+ // Store integer or FP register.
+ void str(const CPURegister& rt, const MemOperand& dst);
+
+ // Load word with sign extension.
+ void ldrsw(const Register& rt, const MemOperand& src);
+
+ // Load byte.
+ void ldrb(const Register& rt, const MemOperand& src);
+
+ // Store byte.
+ void strb(const Register& rt, const MemOperand& dst);
+
+ // Load byte with sign extension.
+ void ldrsb(const Register& rt, const MemOperand& src);
+
+ // Load half-word.
+ void ldrh(const Register& rt, const MemOperand& src);
+
+ // Store half-word.
+ void strh(const Register& rt, const MemOperand& dst);
+
+ // Load half-word with sign extension.
+ void ldrsh(const Register& rt, const MemOperand& src);
+
+ // Load integer or FP register pair.
+ void ldp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair.
+ void stp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load word pair with sign extension.
+ void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Load integer or FP register pair, non-temporal.
+ void ldnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair, non-temporal.
+ void stnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load literal to register.
+ void ldr(const Register& rt, uint64_t imm);
+
+ // Load literal to FP register.
+ void ldr(const FPRegister& ft, double imm);
+
+ // Move instructions. The default shift of -1 indicates that the move
+ // instruction will calculate an appropriate 16-bit immediate and left shift
+ // that is equal to the 64-bit immediate argument. If an explicit left shift
+ // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
+ //
+ // For movk, an explicit shift can be used to indicate which half word should
+ // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
+ // half word with zero, whereas movk(x0, 0, 48) will overwrite the
+ // most-significant.
+
+ // Move and keep.
+ void movk(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVK);
+ }
+
+ // Move with non-zero.
+ void movn(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVN);
+ }
+
+ // Move with zero.
+ void movz(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVZ);
+ }
+
+ // Misc instructions.
+ // Monitor debug-mode breakpoint.
+ void brk(int code);
+
+ // Halting debug-mode breakpoint.
+ void hlt(int code);
+
+ // Move register to register.
+ void mov(const Register& rd, const Register& rn);
+
+ // Move NOT(operand) to register.
+ void mvn(const Register& rd, const Operand& operand);
+
+ // System instructions.
+ // Move to register from system register.
+ void mrs(const Register& rt, SystemRegister sysreg);
+
+ // Move from register to system register.
+ void msr(SystemRegister sysreg, const Register& rt);
+
+ // System hint.
+ void hint(SystemHint code);
+
+ // Data memory barrier
+ void dmb(BarrierDomain domain, BarrierType type);
+
+ // Data synchronization barrier
+ void dsb(BarrierDomain domain, BarrierType type);
+
+ // Instruction synchronization barrier
+ void isb();
+
+ // Alias for system instructions.
+ void nop() { hint(NOP); }
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ DEBUG_BREAK_NOP,
+ INTERRUPT_CODE_NOP,
+ FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
+ LAST_NOP_MARKER = INTERRUPT_CODE_NOP
+ };
+
+ void nop(NopMarkerTypes n) {
+ ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
+ mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
+ }
+
+ // FP instructions.
+ // Move immediate to FP register.
+ void fmov(FPRegister fd, double imm);
+
+ // Move FP register to register.
+ void fmov(Register rd, FPRegister fn);
+
+ // Move register to FP register.
+ void fmov(FPRegister fd, Register rn);
+
+ // Move FP register to FP register.
+ void fmov(FPRegister fd, FPRegister fn);
+
+ // FP add.
+ void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP subtract.
+ void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP multiply.
+ void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP fused multiply and add.
+ void fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply and subtract.
+ void fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply, add and negate.
+ void fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply, subtract and negate.
+ void fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP divide.
+ void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP maximum.
+ void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP minimum.
+ void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP maximum.
+ void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP minimum.
+ void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP absolute.
+ void fabs(const FPRegister& fd, const FPRegister& fn);
+
+ // FP negate.
+ void fneg(const FPRegister& fd, const FPRegister& fn);
+
+ // FP square root.
+ void fsqrt(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (nearest with ties to away).
+ void frinta(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (nearest with ties to even).
+ void frintn(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (towards zero.)
+ void frintz(const FPRegister& fd, const FPRegister& fn);
+
+ // FP compare registers.
+ void fcmp(const FPRegister& fn, const FPRegister& fm);
+
+ // FP compare immediate.
+ void fcmp(const FPRegister& fn, double value);
+
+ // FP conditional compare.
+ void fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // FP conditional select.
+ void fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond);
+
+ // Common FP Convert function
+ void FPConvertToInt(const Register& rd,
+ const FPRegister& fn,
+ FPIntegerConvertOp op);
+
+ // FP convert between single and double precision.
+ void fcvt(const FPRegister& fd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (nearest with ties to away).
+ void fcvtau(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (nearest with ties to away).
+ void fcvtas(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (round towards -infinity).
+ void fcvtmu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (round towards -infinity).
+ void fcvtms(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (nearest with ties to even).
+ void fcvtnu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (nearest with ties to even).
+ void fcvtns(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (round towards zero).
+ void fcvtzu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (rounf towards zero).
+ void fcvtzs(const Register& rd, const FPRegister& fn);
+
+ // Convert signed integer or fixed point to FP.
+ void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+
+ // Convert unsigned integer or fixed point to FP.
+ void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+
+ // Instruction functions used only for test, debug, and patching.
+ // Emit raw instructions in the instruction stream.
+ void dci(Instr raw_inst) { Emit(raw_inst); }
+
+ // Emit 8 bits of data in the instruction stream.
+ void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
+
+ // Emit 32 bits of data in the instruction stream.
+ void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
+
+ // Emit 64 bits of data in the instruction stream.
+ void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
+
+ // Copy a string into the instruction stream, including the terminating NULL
+ // character. The instruction pointer (pc_) is then aligned correctly for
+ // subsequent instructions.
+ void EmitStringData(const char * string) {
+ size_t len = strlen(string) + 1;
+ ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
+ EmitData(string, len);
+ // Pad with NULL characters until pc_ is aligned.
+ const char pad[] = {'\0', '\0', '\0', '\0'};
+ STATIC_ASSERT(sizeof(pad) == kInstructionSize);
+ byte* next_pc = AlignUp(pc_, kInstructionSize);
+ EmitData(&pad, next_pc - pc_);
+ }
+
+ // Pseudo-instructions ------------------------------------------------------
+
+ // Parameters are described in a64/instructions-a64.h.
+ void debug(const char* message, uint32_t code, Instr params = BREAK);
+
+ // Required by V8.
+ void dd(uint32_t data) { dc32(data); }
+ void db(uint8_t data) { dc8(data); }
+
+ // Code generation helpers --------------------------------------------------
+
+ unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; }
+
+ Instruction* InstructionAt(int offset) const {
+ return reinterpret_cast<Instruction*>(buffer_ + offset);
+ }
+
+ // Register encoding.
+ static Instr Rd(CPURegister rd) {
+ ASSERT(rd.code() != kSPRegInternalCode);
+ return rd.code() << Rd_offset;
+ }
+
+ static Instr Rn(CPURegister rn) {
+ ASSERT(rn.code() != kSPRegInternalCode);
+ return rn.code() << Rn_offset;
+ }
+
+ static Instr Rm(CPURegister rm) {
+ ASSERT(rm.code() != kSPRegInternalCode);
+ return rm.code() << Rm_offset;
+ }
+
+ static Instr Ra(CPURegister ra) {
+ ASSERT(ra.code() != kSPRegInternalCode);
+ return ra.code() << Ra_offset;
+ }
+
+ static Instr Rt(CPURegister rt) {
+ ASSERT(rt.code() != kSPRegInternalCode);
+ return rt.code() << Rt_offset;
+ }
+
+ static Instr Rt2(CPURegister rt2) {
+ ASSERT(rt2.code() != kSPRegInternalCode);
+ return rt2.code() << Rt2_offset;
+ }
+
+ // These encoding functions allow the stack pointer to be encoded, and
+ // disallow the zero register.
+ static Instr RdSP(Register rd) {
+ ASSERT(!rd.IsZero());
+ return (rd.code() & kRegCodeMask) << Rd_offset;
+ }
+
+ static Instr RnSP(Register rn) {
+ ASSERT(!rn.IsZero());
+ return (rn.code() & kRegCodeMask) << Rn_offset;
+ }
+
+ // Flags encoding.
+ inline static Instr Flags(FlagsUpdate S);
+ inline static Instr Cond(Condition cond);
+
+ // PC-relative address encoding.
+ inline static Instr ImmPCRelAddress(int imm21);
+
+ // Branch encoding.
+ inline static Instr ImmUncondBranch(int imm26);
+ inline static Instr ImmCondBranch(int imm19);
+ inline static Instr ImmCmpBranch(int imm19);
+ inline static Instr ImmTestBranch(int imm14);
+ inline static Instr ImmTestBranchBit(unsigned bit_pos);
+
+ // Data Processing encoding.
+ inline static Instr SF(Register rd);
+ inline static Instr ImmAddSub(int64_t imm);
+ inline static Instr ImmS(unsigned imms, unsigned reg_size);
+ inline static Instr ImmR(unsigned immr, unsigned reg_size);
+ inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
+ inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
+ inline static Instr ImmLLiteral(int imm19);
+ inline static Instr BitN(unsigned bitn, unsigned reg_size);
+ inline static Instr ShiftDP(Shift shift);
+ inline static Instr ImmDPShift(unsigned amount);
+ inline static Instr ExtendMode(Extend extend);
+ inline static Instr ImmExtendShift(unsigned left_shift);
+ inline static Instr ImmCondCmp(unsigned imm);
+ inline static Instr Nzcv(StatusFlags nzcv);
+
+ // MemOperand offset encoding.
+ inline static Instr ImmLSUnsigned(int imm12);
+ inline static Instr ImmLS(int imm9);
+ inline static Instr ImmLSPair(int imm7, LSDataSize size);
+ inline static Instr ImmShiftLS(unsigned shift_amount);
+ inline static Instr ImmException(int imm16);
+ inline static Instr ImmSystemRegister(int imm15);
+ inline static Instr ImmHint(int imm7);
+ inline static Instr ImmBarrierDomain(int imm2);
+ inline static Instr ImmBarrierType(int imm2);
+ inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
+
+ // Move immediates encoding.
+ inline static Instr ImmMoveWide(uint64_t imm);
+ inline static Instr ShiftMoveWide(int64_t shift);
+
+ // FP Immediates.
+ static Instr ImmFP32(float imm);
+ static Instr ImmFP64(double imm);
+ inline static Instr FPScale(unsigned scale);
+
+ // FP register type.
+ inline static Instr FPType(FPRegister fd);
+
+ // Class for scoping postponing the constant pool generation.
+ class BlockConstPoolScope {
+ public:
+ explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockConstPool();
+ }
+ ~BlockConstPoolScope() {
+ assem_->EndBlockConstPool();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
+ };
+
+ // Check if is time to emit a constant pool.
+ void CheckConstPool(bool force_emit, bool require_jump);
+
+ // Available for constrained code generation scopes. Prefer
+ // MacroAssembler::Mov() when possible.
+ inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
+
+ protected:
+ inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
+
+ void LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+ static bool IsImmLSUnscaled(ptrdiff_t offset);
+ static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
+
+ void Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+ void LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op);
+ static bool IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r);
+
+ void ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ static bool IsImmConditionalCompare(int64_t immediate);
+
+ void AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Functions for emulating operands not directly supported by the instruction
+ // set.
+ void EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned amount);
+ void EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift);
+
+ void AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+ static bool IsImmAddSub(int64_t immediate);
+
+ static bool IsImmFP32(float imm);
+ static bool IsImmFP64(double imm);
+
+ // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
+ // registers. Only simple loads are supported; sign- and zero-extension (such
+ // as in LDPSW_x or LDRB_w) are not supported.
+ static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
+ static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
+ static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+
+ // Remove the specified branch from the unbound label link chain.
+ // If available, a veneer for this label can be used for other branches in the
+ // chain if the link chain cannot be fixed up without this branch.
+ void RemoveBranchFromLabelLinkChain(Instruction* branch,
+ Label* label,
+ Instruction* label_veneer = NULL);
+
+ private:
+ // Instruction helpers.
+ void MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op);
+ void DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op);
+ void LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op);
+ // Register the relocation information for the operand and load its value
+ // into rt.
+ void LoadRelocatedValue(const CPURegister& rt,
+ const Operand& operand,
+ LoadLiteralOp op);
+ void ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op);
+ void DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op);
+ void DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op);
+ void FPDataProcessing1Source(const FPRegister& fd,
+ const FPRegister& fn,
+ FPDataProcessing1SourceOp op);
+ void FPDataProcessing2Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ FPDataProcessing2SourceOp op);
+ void FPDataProcessing3Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa,
+ FPDataProcessing3SourceOp op);
+
+ // Label helpers.
+
+ // Return an offset for a label-referencing instruction, typically a branch.
+ int LinkAndGetByteOffsetTo(Label* label);
+
+ // This is the same as LinkAndGetByteOffsetTo, but return an offset
+ // suitable for fields that take instruction offsets.
+ inline int LinkAndGetInstructionOffsetTo(Label* label);
+
+ static const int kStartOfLabelLinkChain = 0;
+
+ // Verify that a label's link chain is intact.
+ void CheckLabelLinkChain(Label const * label);
+
+ void RecordLiteral(int64_t imm, unsigned size);
+
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
+ // Emit the instruction at pc_.
+ void Emit(Instr instruction) {
+ STATIC_ASSERT(sizeof(*pc_) == 1);
+ STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
+ ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
+
+ memcpy(pc_, &instruction, sizeof(instruction));
+ pc_ += sizeof(instruction);
+ CheckBuffer();
+ }
+
+ // Emit data inline in the instruction stream.
+ void EmitData(void const * data, unsigned size) {
+ ASSERT(sizeof(*pc_) == 1);
+ ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
+
+ // TODO(all): Somehow register we have some data here. Then we can
+ // disassemble it correctly.
+ memcpy(pc_, data, size);
+ pc_ += size;
+ CheckBuffer();
+ }
+
+ void GrowBuffer();
+ void CheckBuffer();
+
+ // Pc offset of the next buffer check.
+ int next_buffer_check_;
+
+ // Constant pool generation
+ // Pools are emitted in the instruction stream, preferably after unconditional
+ // jumps or after returns from functions (in dead code locations).
+ // If a long code sequence does not contain unconditional jumps, it is
+ // necessary to emit the constant pool before the pool gets too far from the
+ // location it is accessed from. In this case, we emit a jump over the emitted
+ // constant pool.
+ // Constants in the pool may be addresses of functions that gets relocated;
+ // if so, a relocation info entry is associated to the constant pool entry.
+
+ // Repeated checking whether the constant pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated. That also means that the sizing of the buffers is not
+ // an exact science, and that we rely on some slop to not overrun buffers.
+ static const int kCheckPoolIntervalInst = 128;
+ static const int kCheckPoolInterval =
+ kCheckPoolIntervalInst * kInstructionSize;
+
+ // Constants in pools are accessed via pc relative addressing, which can
+ // reach +/-4KB thereby defining a maximum distance between the instruction
+ // and the accessed constant.
+ static const int kMaxDistToPool = 4 * KB;
+ static const int kMaxNumPendingRelocInfo = kMaxDistToPool / kInstructionSize;
+
+
+ // Average distance beetween a constant pool and the first instruction
+ // accessing the constant pool. Longer distance should result in less I-cache
+ // pollution.
+ // In practice the distance will be smaller since constant pool emission is
+ // forced after function return and sometimes after unconditional branches.
+ static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
+
+ // Emission of the constant pool may be blocked in some code sequences.
+ int const_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_const_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_const_pool_use_;
+
+ // Relocation info generation
+ // Each relocation is encoded as a variable size value
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+
+ // Relocation info records are also used during code generation as temporary
+ // containers for constants and code target addresses until they are emitted
+ // to the constant pool. These pending relocation info records are temporarily
+ // stored in a separate buffer until a constant pool is emitted.
+ // If every instruction in a long sequence is accessing the pool, we need one
+ // pending relocation entry per instruction.
+
+ // the buffer of pending relocation info
+ RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
+ // number of pending reloc info entries in the buffer
+ int num_pending_reloc_info_;
+
+ // Relocation for a type-recording IC has the AST id added to it. This
+ // member variable is a way to pass the information from the call site to
+ // the relocation info.
+ TypeFeedbackId recorded_ast_id_;
+
+ inline TypeFeedbackId RecordedAstId();
+ inline void ClearRecordedAstId();
+
+ protected:
+ // Record the AST id of the CallIC being compiled, so that it can be placed
+ // in the relocation information.
+ void SetRecordedAstId(TypeFeedbackId ast_id) {
+ ASSERT(recorded_ast_id_.IsNone());
+ recorded_ast_id_ = ast_id;
+ }
+
+ // Code generation
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries, and debug strings encoded in the instruction
+ // stream.
+ static const int kGap = 128;
+
+ public:
+ class FarBranchInfo {
+ public:
+ FarBranchInfo(int offset, Label* label)
+ : pc_offset_(offset), label_(label) {}
+ // Offset of the branch in the code generation buffer.
+ int pc_offset_;
+ // The label branched to.
+ Label* label_;
+ };
+
+ protected:
+ // Information about unresolved (forward) branches.
+ // The Assembler is only allowed to delete out-of-date information from here
+ // after a label is bound. The MacroAssembler uses this information to
+ // generate veneers.
+ //
+ // The second member gives information about the unresolved branch. The first
+ // member of the pair is the maximum offset that the branch can reach in the
+ // buffer. The map is sorted according to this reachable offset, allowing to
+ // easily check when veneers need to be emitted.
+ // Note that the maximum reachable offset (first member of the pairs) should
+ // always be positive but has the same type as the return value for
+ // pc_offset() for convenience.
+ std::multimap<int, FarBranchInfo> unresolved_branches_;
+
+ private:
+ // If a veneer is emitted for a branch instruction, that instruction must be
+ // removed from the associated label's link chain so that the assembler does
+ // not later attempt (likely unsuccessfully) to patch it to branch directly to
+ // the label.
+ void DeleteUnresolvedBranchInfoForLabel(Label* label);
+
+ private:
+ // TODO(jbramley): VIXL uses next_literal_pool_check_ and
+ // literal_pool_monitor_ to determine when to consider emitting a literal
+ // pool. V8 doesn't use them, so they should either not be here at all, or
+ // should replace or be merged with next_buffer_check_ and
+ // const_pool_blocked_nesting_.
+ Instruction* next_literal_pool_check_;
+ unsigned literal_pool_monitor_;
+
+ PositionsRecorder positions_recorder_;
+ friend class PositionsRecorder;
+ friend class EnsureSpace;
+};
+
+class PatchingAssembler : public Assembler {
+ public:
+ // Create an Assembler with a buffer starting at 'start'.
+ // The buffer size is
+ // size of instructions to patch + kGap
+ // Where kGap is the distance from which the Assembler tries to grow the
+ // buffer.
+ // If more or fewer instructions than expected are generated or if some
+ // relocation information takes space in the buffer, the PatchingAssembler
+ // will crash trying to grow the buffer.
+ PatchingAssembler(Instruction* start, unsigned count)
+ : Assembler(NULL,
+ reinterpret_cast<byte*>(start),
+ count * kInstructionSize + kGap) {
+ // Block constant pool emission.
+ StartBlockConstPool();
+ }
+
+ PatchingAssembler(byte* start, unsigned count)
+ : Assembler(NULL, start, count * kInstructionSize + kGap) {
+ // Block constant pool emission.
+ StartBlockConstPool();
+ }
+
+ ~PatchingAssembler() {
+ // Const pool should still be blocked.
+ ASSERT(is_const_pool_blocked());
+ EndBlockConstPool();
+ // Verify we have generated the number of instruction we expected.
+ ASSERT((pc_offset() + kGap) == buffer_size_);
+ // Verify no relocation information has been emitted.
+ ASSERT(num_pending_reloc_info() == 0);
+ // Flush the Instruction cache.
+ size_t length = buffer_size_ - kGap;
+ CPU::FlushICache(buffer_, length);
+ }
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) {
+ assembler->CheckBuffer();
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_ASSEMBLER_A64_H_
diff --git a/deps/v8/src/a64/builtins-a64.cc b/deps/v8/src/a64/builtins-a64.cc
new file mode 100644
index 0000000000..797fbc3a54
--- /dev/null
+++ b/deps/v8/src/a64/builtins-a64.cc
@@ -0,0 +1,1479 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "codegen.h"
+#include "debug.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the native context.
+ __ Ldr(result, GlobalObjectMemOperand());
+ __ Ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
+ __ Ldr(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+ Register result) {
+ // Load the native context.
+ __ Ldr(result, GlobalObjectMemOperand());
+ __ Ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
+ __ Ldr(result, ContextMemOperand(result,
+ Context::INTERNAL_ARRAY_FUNCTION_INDEX));
+}
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments excluding receiver
+ // -- x1 : called function (only guaranteed when
+ // extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument (argc == x0)
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ Push(x1);
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToExternalReference expects x0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ Add(x0, x0, num_extra_args + 1);
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_InternalArrayCode");
+ Label generic_array_code;
+
+ // Get the InternalArray function.
+ GenerateLoadInternalArrayFunction(masm, x1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray functions should be maps.
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Tst(x10, kSmiTagMask);
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
+ __ CompareObjectType(x10, x11, x12, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_ArrayCode");
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, x1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array functions should be maps.
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Tst(x10, kSmiTagMask);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ CompareObjectType(x10, x11, x12, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ Mov(x2, Operand(undefined_sentinel));
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_StringConstructCode");
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1, x10, x11);
+
+ Register argc = x0;
+ Register function = x1;
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, x10);
+ __ Cmp(function, x10);
+ __ Assert(eq, kUnexpectedStringFunction);
+ }
+
+ // Load the first arguments in x0 and get rid of the rest.
+ Label no_arguments;
+ __ Cbz(argc, &no_arguments);
+ // First args = sp[(argc - 1) * 8].
+ __ Sub(argc, argc, 1);
+ __ Claim(argc, kXRegSizeInBytes);
+ // jssp now point to args[0], load and drop args[0] + receiver.
+ // TODO(jbramley): Consider adding ClaimAndPoke.
+ __ Ldr(argc, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+
+ Register argument = x2;
+ Label not_cached, argument_is_string;
+ __ LookupNumberStringCache(argc, // Input.
+ argument, // Result.
+ x10, // Scratch.
+ x11, // Scratch.
+ x12, // Scratch.
+ &not_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, x10, x11);
+ __ Bind(&argument_is_string);
+
+ // ----------- S t a t e -------------
+ // -- x2 : argument converted to string
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -----------------------------------
+
+ Label gc_required;
+ Register new_obj = x0;
+ __ Allocate(JSValue::kSize, new_obj, x10, x11, &gc_required, TAG_OBJECT);
+
+ // Initialize the String object.
+ Register map = x3;
+ __ LoadGlobalFunctionInitialMap(function, map, x10);
+ if (FLAG_debug_code) {
+ __ Ldrb(x4, FieldMemOperand(map, Map::kInstanceSizeOffset));
+ __ Cmp(x4, JSValue::kSize >> kPointerSizeLog2);
+ __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
+ __ Ldrb(x4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+ __ Cmp(x4, 0);
+ __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
+ }
+ __ Str(map, FieldMemOperand(new_obj, HeapObject::kMapOffset));
+
+ Register empty = x3;
+ __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(empty, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
+ __ Str(empty, FieldMemOperand(new_obj, JSObject::kElementsOffset));
+
+ __ Str(argument, FieldMemOperand(new_obj, JSValue::kValueOffset));
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == (4 * kPointerSize));
+
+ __ Ret();
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ Bind(&not_cached);
+ __ JumpIfSmi(argc, &convert_argument);
+
+ // Is it a String?
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Tbnz(x11, MaskToBit(kIsNotStringMask), &convert_argument);
+ __ Mov(argument, argc);
+ __ IncrementCounter(counters->string_ctor_string_value(), 1, x10, x11);
+ __ B(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into x2.
+ __ Bind(&convert_argument);
+ __ Push(function); // Preserve the function.
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, x10, x11);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(argc);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ }
+ __ Pop(function);
+ __ Mov(argument, x0);
+ __ B(&argument_is_string);
+
+ // Load the empty string into x2, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ Bind(&no_arguments);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ B(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to create a
+ // string wrapper.
+ __ Bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, x10, x11);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
+ __ Ret();
+}
+
+
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // - Push a copy of the function onto the stack.
+ // - Push another copy as a parameter to the runtime call.
+ __ Push(x1, x1);
+
+ __ CallRuntime(function_id, 1);
+
+ // - Restore receiver.
+ __ Pop(x1);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x2);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ Add(x0, x0, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x0);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However, not
+ // checking may delay installing ready functions, and always checking would be
+ // quite expensive. A good compromise is to first check against stack limit as
+ // a cue for an interrupt signal.
+ Label ok;
+ __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
+
+ __ Bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool count_constructions) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
+ // Should never count constructions for api objects.
+ ASSERT(!is_api_function || !count_constructions);
+
+ Isolate* isolate = masm->isolate();
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the two incoming parameters on the stack.
+ Register argc = x0;
+ Register constructor = x1;
+ // x1: constructor function
+ __ SmiTag(argc);
+ __ Push(argc, constructor);
+ // sp[0] : Constructor function.
+ // sp[1]: number of arguments (smi-tagged)
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+#if ENABLE_DEBUGGER_SUPPORT
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ Mov(x2, Operand(debug_step_in_fp));
+ __ Ldr(x2, MemOperand(x2));
+ __ Cbnz(x2, &rt_call);
+#endif
+ // Load the initial map and verify that it is in fact a map.
+ Register init_map = x2;
+ __ Ldr(init_map,
+ FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(init_map, &rt_call);
+ __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the initial
+ // map's instance type would be JS_FUNCTION_TYPE.
+ __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
+ __ B(eq, &rt_call);
+
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ Ldr(x3, FieldMemOperand(constructor,
+ JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(x3, SharedFunctionInfo::kConstructionCountOffset);
+ __ Ldrb(x4, constructor_count);
+ __ Subs(x4, x4, 1);
+ __ Strb(x4, constructor_count);
+ __ B(ne, &allocate);
+
+ // Push the constructor and map to the stack, and the constructor again
+ // as argument to the runtime call.
+ __ Push(constructor, init_map, constructor);
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ Pop(init_map, constructor);
+ __ Bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ Register obj_size = x3;
+ Register new_obj = x4;
+ __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
+ __ Allocate(obj_size, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // NB. the object pointer is not tagged, so MemOperand is used.
+ Register empty = x5;
+ __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(init_map, MemOperand(new_obj, JSObject::kMapOffset));
+ __ Str(empty, MemOperand(new_obj, JSObject::kPropertiesOffset));
+ __ Str(empty, MemOperand(new_obj, JSObject::kElementsOffset));
+
+ Register first_prop = x5;
+ __ Add(first_prop, new_obj, JSObject::kHeaderSize);
+
+ // Fill all of the in-object properties with the appropriate filler.
+ Register obj_end = x6;
+ __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
+ Register undef = x7;
+ __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
+
+ // Obtain number of pre-allocated property fields and in-object
+ // properties.
+ Register prealloc_fields = x10;
+ Register inobject_props = x11;
+ Register inst_sizes = x11;
+ __ Ldr(inst_sizes, FieldMemOperand(init_map, Map::kInstanceSizesOffset));
+ __ Ubfx(prealloc_fields, inst_sizes,
+ Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ Ubfx(inobject_props, inst_sizes,
+ Map::kInObjectPropertiesByte * kBitsPerByte, kBitsPerByte);
+
+ if (count_constructions) {
+ // Register first_non_prealloc is the offset of the first field after
+ // pre-allocated fields.
+ Register first_non_prealloc = x12;
+ __ Add(first_non_prealloc, first_prop,
+ Operand(prealloc_fields, LSL, kPointerSizeLog2));
+
+ if (FLAG_debug_code) {
+ __ Cmp(first_non_prealloc, obj_end);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(first_prop, first_non_prealloc, undef);
+ // To allow for truncation.
+ __ LoadRoot(x12, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(first_prop, obj_end, x12);
+ } else {
+ __ InitializeFieldsWithFiller(first_prop, obj_end, undef);
+ }
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ __ Add(new_obj, new_obj, kHeapObjectTag);
+
+ // Check if a non-empty properties array is needed. Continue with
+ // allocated object if not, or fall through to runtime call if it is.
+ Register element_count = x3;
+ __ Ldrb(x3, FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+ __ Add(x3, x3, prealloc_fields);
+ __ Subs(element_count, x3, inobject_props);
+
+ // Done if no extra properties are to be allocated.
+ __ B(eq, &allocated);
+ __ Assert(pl, kPropertyAllocationCountFailed);
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ Register new_array = x5;
+ Register array_size = x6;
+ __ Add(array_size, element_count, FixedArray::kHeaderSize / kPointerSize);
+ __ Allocate(array_size, new_array, x11, x12, &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP |
+ SIZE_IN_WORDS));
+
+ Register array_map = x10;
+ __ LoadRoot(array_map, Heap::kFixedArrayMapRootIndex);
+ __ Str(array_map, MemOperand(new_array, FixedArray::kMapOffset));
+ __ SmiTag(x0, element_count);
+ __ Str(x0, MemOperand(new_array, FixedArray::kLengthOffset));
+
+ // Initialize the fields to undefined.
+ Register elements = x10;
+ Register elements_end = x11;
+ __ Add(elements, new_array, FixedArray::kHeaderSize);
+ __ Add(elements_end, elements,
+ Operand(element_count, LSL, kPointerSizeLog2));
+ __ InitializeFieldsWithFiller(elements, elements_end, undef);
+
+ // Store the initialized FixedArray into the properties field of the
+ // JSObject.
+ __ Add(new_array, new_array, kHeapObjectTag);
+ __ Str(new_array, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated.
+ __ B(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ __ Bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(new_obj, x14);
+ }
+
+ // Allocate the new receiver object using the runtime call.
+ __ Bind(&rt_call);
+ __ Push(constructor); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ Mov(x4, x0);
+
+ // Receiver for constructor call allocated.
+ // x4: JSObject
+ __ Bind(&allocated);
+ __ Push(x4, x4);
+
+ // Reload the number of arguments from the stack.
+ // Set it up in x0 for the function call below.
+ // jssp[0]: receiver
+ // jssp[1]: receiver
+ // jssp[2]: constructor function
+ // jssp[3]: number of arguments (smi-tagged)
+ __ Peek(constructor, 2 * kXRegSizeInBytes); // Load constructor.
+ __ Peek(argc, 3 * kXRegSizeInBytes); // Load number of arguments.
+ __ SmiUntag(argc);
+
+ // Set up pointer to last argument.
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
+
+ // Copy arguments and receiver to the expression stack.
+ // Copy 2 values every loop to use ldp/stp.
+ // x0: number of arguments
+ // x1: constructor function
+ // x2: address of last argument (caller sp)
+ // jssp[0]: receiver
+ // jssp[1]: receiver
+ // jssp[2]: constructor function
+ // jssp[3]: number of arguments (smi-tagged)
+ // Compute the start address of the copy in x3.
+ __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
+ Label loop, entry, done_copying_arguments;
+ __ B(&entry);
+ __ Bind(&loop);
+ __ Ldp(x10, x11, MemOperand(x3, -2 * kPointerSize, PreIndex));
+ __ Push(x11, x10);
+ __ Bind(&entry);
+ __ Cmp(x3, x2);
+ __ B(gt, &loop);
+ // Because we copied values 2 by 2 we may have copied one extra value.
+ // Drop it if that is the case.
+ __ B(eq, &done_copying_arguments);
+ __ Drop(1);
+ __ Bind(&done_copying_arguments);
+
+ // Call the function.
+ // x0: number of arguments
+ // x1: constructor function
+ if (is_api_function) {
+ __ Ldr(cp, FieldMemOperand(constructor, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(argc);
+ __ InvokeFunction(constructor, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+
+ // Store offset of return address for deoptimizer.
+ if (!is_api_function && !count_constructions) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore the context from the frame.
+ // x0: result
+ // jssp[0]: receiver
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(x0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ JumpIfObjectType(x0, x1, x3, FIRST_SPEC_OBJECT_TYPE, &exit, ge);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ Bind(&use_receiver);
+ __ Peek(x0, 0);
+
+ // Remove the receiver from the stack, remove caller arguments, and
+ // return.
+ __ Bind(&exit);
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ Peek(x1, 2 * kXRegSizeInBytes);
+
+ // Leave construct frame.
+ }
+
+ __ DropBySMI(x1);
+ __ Drop(1);
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+// Input:
+// x0: code entry.
+// x1: function.
+// x2: receiver.
+// x3: argc.
+// x4: argv.
+// Output:
+// x0: result.
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Called from JSEntryStub::GenerateBody().
+ Register function = x1;
+ Register receiver = x2;
+ Register argc = x3;
+ Register argv = x4;
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Clear the context before we push it when entering the internal frame.
+ __ Mov(cp, 0);
+
+ {
+ // Enter an internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Set up the context from the function argument.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ __ InitializeRootRegister();
+
+ // Push the function and the receiver onto the stack.
+ __ Push(function, receiver);
+
+ // Copy arguments to the stack in a loop, in reverse order.
+ // x3: argc.
+ // x4: argv.
+ Label loop, entry;
+ // Compute the copy end address.
+ __ Add(x10, argv, Operand(argc, LSL, kPointerSizeLog2));
+
+ __ B(&entry);
+ __ Bind(&loop);
+ __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
+ __ Ldr(x12, MemOperand(x11)); // Dereference the handle.
+ __ Push(x12); // Push the argument.
+ __ Bind(&entry);
+ __ Cmp(x10, argv);
+ __ B(ne, &loop);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ // The original values have been saved in JSEntryStub::GenerateBody().
+ __ LoadRoot(x19, Heap::kUndefinedValueRootIndex);
+ __ Mov(x20, x19);
+ __ Mov(x21, x19);
+ __ Mov(x22, x19);
+ __ Mov(x23, x19);
+ __ Mov(x24, x19);
+ __ Mov(x25, x19);
+ // Don't initialize the reserved registers.
+ // x26 : root register (root).
+ // x27 : context pointer (cp).
+ // x28 : JS stack pointer (jssp).
+ // x29 : frame pointer (fp).
+
+ // TODO(alexandre): Revisit the MAsm function invocation mechanisms.
+ // Currently there is a mix of statically and dynamically allocated
+ // registers.
+ __ Mov(x0, argc);
+ if (is_construct) {
+ // No type feedback cell is available.
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ __ Mov(x2, Operand(undefined_sentinel));
+
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ CallStub(&stub);
+ } else {
+ ParameterCount actual(x0);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+ // Exit the JS internal frame and remove the parameters (except function),
+ // and return.
+ }
+
+ // Result is in x0. Return.
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ Register function = x1;
+
+ // Preserve function. At the same time, push arguments for
+ // kCompileOptimized.
+ __ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent));
+ __ Push(function, function, x10);
+
+ __ CallRuntime(Runtime::kCompileOptimized, 2);
+
+ // Restore receiver.
+ __ Pop(function);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code fast, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // The following caller-saved registers must be saved and restored when
+ // calling through to the runtime:
+ // x0 - The address from which to resume execution.
+ // x1 - isolate
+ // lr - The return address for the JSFunction itself. It has not yet been
+ // preserved on the stack because the frame setup code was replaced
+ // with a call to this stub, to handle code ageing.
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(x0, x1, fp, lr);
+ __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ __ Pop(lr, fp, x1, x0);
+ }
+
+ // The calling function has been made young again, so return to execute the
+ // real frame set-up code.
+ __ Br(x0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ // The following caller-saved registers must be saved and restored when
+ // calling through to the runtime:
+ // x0 - The address from which to resume execution.
+ // x1 - isolate
+ // lr - The return address for the JSFunction itself. It has not yet been
+ // preserved on the stack because the frame setup code was replaced
+ // with a call to this stub, to handle code ageing.
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(x0, x1, fp, lr);
+ __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(
+ masm->isolate()), 2);
+ __ Pop(lr, fp, x1, x0);
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ EmitFrameSetupForCodeAgePatching(masm);
+ }
+
+ // Jump to point after the code-age stub.
+ __ Add(x0, x0, kCodeAgeSequenceSize);
+ __ Br(x0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ // TODO(jbramley): Is it correct (and appropriate) to use safepoint
+ // registers here? According to the comment above, we should only need to
+ // preserve the registers with parameters.
+ __ PushXRegList(kSafepointSavedRegisters);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ PopXRegList(kSafepointSavedRegisters);
+ }
+
+ // Ignore state (pushed by Deoptimizer::EntryGenerator::Generate).
+ __ Drop(1);
+
+ // Jump to the miss handler. Deoptimizer::EntryGenerator::Generate loads this
+ // into lr before it jumps here.
+ __ Br(lr);
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass the deoptimization type to the runtime system.
+ __ Mov(x0, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ }
+
+ // Get the full codegen state from the stack and untag it.
+ Register state = x6;
+ __ Peek(state, 0);
+ __ SmiUntag(state);
+
+ // Switch on the state.
+ Label with_tos_register, unknown_state;
+ __ CompareAndBranch(
+ state, FullCodeGenerator::NO_REGISTERS, ne, &with_tos_register);
+ __ Drop(1); // Remove state.
+ __ Ret();
+
+ __ Bind(&with_tos_register);
+ // Reload TOS register.
+ __ Peek(x0, kPointerSize);
+ __ CompareAndBranch(state, FullCodeGenerator::TOS_REG, ne, &unknown_state);
+ __ Drop(2); // Remove state and TOS.
+ __ Ret();
+
+ __ Bind(&unknown_state);
+ __ Abort(kInvalidFullCodegenState);
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass function as argument.
+ __ Push(x0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
+
+ // If the code object is null, just return to the unoptimized code.
+ Label skip;
+ __ CompareAndBranch(x0, Operand(Smi::FromInt(0)), ne, &skip);
+ __ Ret();
+
+ __ Bind(&skip);
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ Ldrsw(w1, UntagSmiFieldMemOperand(x1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ Add(x0, x0, x1);
+ __ Add(lr, x0, Code::kHeaderSize - kHeapObjectTag);
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&ok);
+ __ Ret();
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ enum {
+ call_type_JS_func = 0,
+ call_type_func_proxy = 1,
+ call_type_non_func = 2
+ };
+ Register argc = x0;
+ Register function = x1;
+ Register call_type = x4;
+ Register scratch1 = x10;
+ Register scratch2 = x11;
+ Register receiver_type = x13;
+
+ ASM_LOCATION("Builtins::Generate_FunctionCall");
+ // 1. Make sure we have at least one argument.
+ { Label done;
+ __ Cbnz(argc, &done);
+ __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
+ __ Push(scratch1);
+ __ Mov(argc, 1);
+ __ Bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ Label slow, non_function;
+ __ Peek(function, Operand(argc, LSL, kXRegSizeInBytesLog2));
+ __ JumpIfSmi(function, &non_function);
+ __ JumpIfNotObjectType(function, scratch1, receiver_type,
+ JS_FUNCTION_TYPE, &slow);
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ Label shift_arguments;
+ __ Mov(call_type, static_cast<int>(call_type_JS_func));
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ // Also do not transform the receiver for native (Compilerhints already in
+ // x3).
+ __ Ldr(scratch1,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(scratch2.W(),
+ FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestAndBranchIfAnySet(
+ scratch2.W(),
+ (1 << SharedFunctionInfo::kStrictModeFunction) |
+ (1 << SharedFunctionInfo::kNative),
+ &shift_arguments);
+
+ // Compute the receiver in non-strict mode.
+ Register receiver = x2;
+ __ Sub(scratch1, argc, 1);
+ __ Peek(receiver, Operand(scratch1, LSL, kXRegSizeInBytesLog2));
+ __ JumpIfSmi(receiver, &convert_to_object);
+
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
+ &use_global_receiver);
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(receiver, scratch1, scratch2,
+ FIRST_SPEC_OBJECT_TYPE, &shift_arguments, ge);
+
+ __ Bind(&convert_to_object);
+
+ {
+ // Enter an internal frame in order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(argc);
+
+ __ Push(argc, receiver);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Mov(receiver, x0);
+
+ __ Pop(argc);
+ __ SmiUntag(argc);
+
+ // Exit the internal frame.
+ }
+
+ // Restore the function and flag in the registers.
+ __ Peek(function, Operand(argc, LSL, kXRegSizeInBytesLog2));
+ __ Mov(call_type, static_cast<int>(call_type_JS_func));
+ __ B(&patch_receiver);
+
+ __ Bind(&use_global_receiver);
+ __ Ldr(receiver, GlobalObjectMemOperand());
+ __ Ldr(receiver,
+ FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset));
+
+
+ __ Bind(&patch_receiver);
+ __ Sub(scratch1, argc, 1);
+ __ Poke(receiver, Operand(scratch1, LSL, kXRegSizeInBytesLog2));
+
+ __ B(&shift_arguments);
+ }
+
+ // 3b. Check for function proxy.
+ __ Bind(&slow);
+ __ Mov(call_type, static_cast<int>(call_type_func_proxy));
+ __ Cmp(receiver_type, JS_FUNCTION_PROXY_TYPE);
+ __ B(eq, &shift_arguments);
+ __ Bind(&non_function);
+ __ Mov(call_type, static_cast<int>(call_type_non_func));
+
+ // 3c. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ // call type (0: JS function, 1: function proxy, 2: non-function)
+ __ Sub(scratch1, argc, 1);
+ __ Poke(function, Operand(scratch1, LSL, kXRegSizeInBytesLog2));
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ // call type (0: JS function, 1: function proxy, 2: non-function)
+ __ Bind(&shift_arguments);
+ { Label loop;
+ // Calculate the copy start address (destination). Copy end address is jssp.
+ __ Add(scratch2, jssp, Operand(argc, LSL, kPointerSizeLog2));
+ __ Sub(scratch1, scratch2, kPointerSize);
+
+ __ Bind(&loop);
+ __ Ldr(x12, MemOperand(scratch1, -kPointerSize, PostIndex));
+ __ Str(x12, MemOperand(scratch2, -kPointerSize, PostIndex));
+ __ Cmp(scratch1, jssp);
+ __ B(ge, &loop);
+ // Adjust the actual number of arguments and remove the top element
+ // (which is a copy of the last argument).
+ __ Sub(argc, argc, 1);
+ __ Drop(1);
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+ // or a function proxy via CALL_FUNCTION_PROXY.
+ // call type (0: JS function, 1: function proxy, 2: non-function)
+ { Label js_function, non_proxy;
+ __ Cbz(call_type, &js_function);
+ // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+ __ Mov(x2, 0);
+ __ Cmp(call_type, static_cast<int>(call_type_func_proxy));
+ __ B(ne, &non_proxy);
+
+ __ Push(function); // Re-add proxy object as additional argument.
+ __ Add(argc, argc, 1);
+ __ GetBuiltinFunction(function, Builtins::CALL_FUNCTION_PROXY);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&non_proxy);
+ __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ Bind(&js_function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ __ Ldr(x3, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(x2,
+ FieldMemOperand(x3,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ Label dont_adapt_args;
+ __ Cmp(x2, argc); // Check formal and actual parameter counts.
+ __ B(eq, &dont_adapt_args);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ Bind(&dont_adapt_args);
+
+ __ Ldr(x3, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(0);
+ __ InvokeCode(x3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_FunctionApply");
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kReceiverOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
+
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+
+ Register args = x12;
+ Register receiver = x14;
+ Register function = x15;
+
+ // Get the length of the arguments via a builtin call.
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+ __ Ldr(args, MemOperand(fp, kArgsOffset));
+ __ Push(function, args);
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ Register argc = x0;
+
+ // Check the stack for overflow.
+ // We are not trying to catch interruptions (e.g. debug break and
+ // preemption) here, so the "real stack limit" is checked.
+ Label enough_stack_space;
+ __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+ // Make x10 the space we have left. The stack might already be overflowed
+ // here which will cause x10 to become negative.
+ // TODO(jbramley): Check that the stack usage here is safe.
+ __ Sub(x10, jssp, x10);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2));
+ __ B(gt, &enough_stack_space);
+ // There is not enough stack space, so use a builtin to throw an appropriate
+ // error.
+ __ Push(function, argc);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // We should never return from the APPLY_OVERFLOW builtin.
+ if (__ emit_debug_code()) {
+ __ Unreachable();
+ }
+
+ __ Bind(&enough_stack_space);
+ // Push current limit and index.
+ __ Mov(x1, 0); // Initial index.
+ __ Push(argc, x1);
+
+ Label push_receiver;
+ __ Ldr(receiver, MemOperand(fp, kReceiverOffset));
+
+ // Check that the function is a JS function. Otherwise it must be a proxy.
+ // When it is not the function proxy will be invoked later.
+ __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE,
+ &push_receiver);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // Load the shared function info.
+ __ Ldr(x2, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+
+ // Compute and push the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label convert_receiver_to_object, use_global_receiver;
+ __ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver);
+ // Do not transform the receiver for native functions.
+ __ Tbnz(x10, SharedFunctionInfo::kNative, &push_receiver);
+
+ // Compute the receiver in non-strict mode.
+ __ JumpIfSmi(receiver, &convert_receiver_to_object);
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
+ &use_global_receiver);
+
+ // Check if the receiver is already a JavaScript object.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(receiver, x10, x11, FIRST_SPEC_OBJECT_TYPE,
+ &push_receiver, ge);
+
+ // Call a builtin to convert the receiver to a regular object.
+ __ Bind(&convert_receiver_to_object);
+ __ Push(receiver);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Mov(receiver, x0);
+ __ B(&push_receiver);
+
+ __ Bind(&use_global_receiver);
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver
+ __ Bind(&push_receiver);
+ __ Push(receiver);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ Register current = x0;
+ __ Ldr(current, MemOperand(fp, kIndexOffset));
+ __ B(&entry);
+
+ __ Bind(&loop);
+ // Load the current argument from the arguments array and push it.
+ // TODO(all): Couldn't we optimize this for JS arrays?
+
+ __ Ldr(x1, MemOperand(fp, kArgsOffset));
+ __ Push(x1, current);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ Push(x0);
+
+ // Use inline caching to access the arguments.
+ __ Ldr(current, MemOperand(fp, kIndexOffset));
+ __ Add(current, current, Operand(Smi::FromInt(1)));
+ __ Str(current, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ Bind(&entry);
+ __ Ldr(x1, MemOperand(fp, kLimitOffset));
+ __ Cmp(current, x1);
+ __ B(ne, &loop);
+
+ // At the end of the loop, the number of arguments is stored in 'current',
+ // represented as a smi.
+
+ function = x1; // From now on we want the function to be kept in x1;
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+
+ // Call the function.
+ Label call_proxy;
+ ParameterCount actual(current);
+ __ SmiUntag(current);
+ __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE, &call_proxy);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
+ frame_scope.GenerateLeaveFrame();
+ __ Drop(3);
+ __ Ret();
+
+ // Call the function proxy.
+ __ Bind(&call_proxy);
+ // x0 : argc
+ // x1 : function
+ __ Push(function); // Add function proxy as last argument.
+ __ Add(x0, x0, 1);
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ }
+ __ Drop(3);
+ __ Ret();
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ SmiTag(x10, x0);
+ __ Mov(x11, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Push(lr, fp);
+ __ Push(x11, x1, x10);
+ __ Add(fp, jssp,
+ StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : result being passed through
+ // -----------------------------------
+ // Get the number of arguments passed (as a smi), tear down the frame and
+ // then drop the parameters and the receiver.
+ __ Ldr(x10, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
+ __ Mov(jssp, fp);
+ __ Pop(fp, lr);
+ __ DropBySMI(x10, kXRegSizeInBytes);
+ __ Drop(1);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
+ // ----------- S t a t e -------------
+ // -- x0 : actual number of arguments
+ // -- x1 : function (passed through to callee)
+ // -- x2 : expected number of arguments
+ // -----------------------------------
+
+ Label invoke, dont_adapt_arguments;
+
+ Label enough, too_few;
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+ __ Cmp(x0, x2);
+ __ B(lt, &too_few);
+ __ Cmp(x2, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ B(eq, &dont_adapt_arguments);
+
+ { // Enough parameters: actual >= expected
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into x10 and end address into x11.
+ // x0: actual number of arguments
+ // x1: function
+ // x2: expected number of arguments
+ // x3: code entry to call
+ __ Add(x10, fp, Operand(x0, LSL, kPointerSizeLog2));
+ // Adjust for return address and receiver
+ __ Add(x10, x10, 2 * kPointerSize);
+ __ Sub(x11, x10, Operand(x2, LSL, kPointerSizeLog2));
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // x0: actual number of arguments
+ // x1: function
+ // x2: expected number of arguments
+ // x3: code entry to call
+ // x10: copy start address
+ // x11: copy end address
+
+ // TODO(all): Should we push values 2 by 2?
+ Label copy;
+ __ Bind(&copy);
+ __ Cmp(x10, x11);
+ __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
+ __ Push(x12);
+ __ B(gt, &copy);
+
+ __ B(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected
+ __ Bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into x10 and copy end address into x11.
+ // x0: actual number of arguments
+ // x1: function
+ // x2: expected number of arguments
+ // x3: code entry to call
+ // Adjust for return address.
+ __ Add(x11, fp, 1 * kPointerSize);
+ __ Add(x10, x11, Operand(x0, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, 1 * kPointerSize);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // x0: actual number of arguments
+ // x1: function
+ // x2: expected number of arguments
+ // x3: code entry to call
+ // x10: copy start address
+ // x11: copy end address
+ Label copy;
+ __ Bind(&copy);
+ __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
+ __ Push(x12);
+ __ Cmp(x10, x11); // Compare before moving to next argument.
+ __ B(ne, &copy);
+
+ // Fill the remaining expected arguments with undefined.
+ // x0: actual number of arguments
+ // x1: function
+ // x2: expected number of arguments
+ // x3: code entry to call
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Sub(x11, fp, Operand(x2, LSL, kPointerSizeLog2));
+ // Adjust for the arguments adaptor frame and already pushed receiver.
+ __ Sub(x11, x11,
+ StandardFrameConstants::kFixedFrameSizeFromFp + (2 * kPointerSize));
+
+ // TODO(all): Optimize this to use ldp?
+ Label fill;
+ __ Bind(&fill);
+ __ Push(x10);
+ __ Cmp(jssp, x11);
+ __ B(ne, &fill);
+ }
+
+ // Arguments have been adapted. Now call the entry point.
+ __ Bind(&invoke);
+ __ Call(x3);
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
+
+ // Call the entry point without adapting the arguments.
+ __ Bind(&dont_adapt_arguments);
+ __ Jump(x3);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/a64/code-stubs-a64.cc b/deps/v8/src/a64/code-stubs-a64.cc
new file mode 100644
index 0000000000..b640677cae
--- /dev/null
+++ b/deps/v8/src/a64/code-stubs-a64.cc
@@ -0,0 +1,5809 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "regexp-macro-assembler.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: function info
+ static Register registers[] = { x2 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: function
+ static Register registers[] = { x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x3: array literals array
+ // x2: array literal index
+ // x1: constant elements
+ static Register registers[] = { x3, x2, x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+}
+
+
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x3: object literals array
+ // x2: object literal index
+ // x1: constant properties
+ // x0: object literal flags
+ static Register registers[] = { x3, x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+}
+
+
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: feedback vector
+ // x3: call feedback slot
+ static Register registers[] = { x2, x3 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x0: key
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x0: key
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: length
+ // x1: index (of last match)
+ // x0: string
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+}
+
+
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: receiver
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ static Register registers[] = { x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: receiver
+ // x1: key
+ // x0: value
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value (js_array)
+ // x1: to_map
+ static Register registers[] = { x0, x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ Address entry =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
+}
+
+
+void CompareNilICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value to compare
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(CompareNilIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+}
+
+
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // x1: function
+ // x2: allocation site with elements kind
+ // x0: number of arguments to the constructor function
+ static Register registers_variable_args[] = { x1, x2, x0 };
+ static Register registers_no_args[] = { x1, x2 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ =
+ sizeof(registers_no_args) / sizeof(registers_no_args[0]);
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = x0;
+ descriptor->register_param_count_ =
+ sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
+ descriptor->register_params_ = registers_variable_args;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // x1: constructor function
+ // x0: number of arguments to the constructor function
+ static Register registers_variable_args[] = { x1, x0 };
+ static Register registers_no_args[] = { x1 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ =
+ sizeof(registers_no_args) / sizeof(registers_no_args[0]);
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = x0;
+ descriptor->register_param_count_ =
+ sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
+ descriptor->register_params_ = registers_variable_args;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x2: key (unused)
+ // x0: value
+ static Register registers[] = { x1, x2, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ // x3: target map
+ // x1: key
+ // x2: receiver
+ static Register registers[] = { x0, x3, x1, x2 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: allocation site
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ static PlatformCallInterfaceDescriptor default_descriptor =
+ PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ static PlatformCallInterfaceDescriptor noInlineDescriptor =
+ PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { x1, // JSFunction
+ cp, // context
+ x0, // actual number of arguments
+ x2, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { cp, // context
+ x2, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { cp, // context
+ x2, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { cp, // context
+ x0, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { x0, // callee
+ x4, // call_data
+ x2, // holder
+ x1, // api_function_address
+ cp, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ Isolate* isolate = masm->isolate();
+ isolate->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT((descriptor->register_param_count_ == 0) ||
+ x0.Is(descriptor->register_params_[param_count - 1]));
+ // Push arguments
+ // TODO(jbramley): Try to push these in blocks.
+ for (int i = 0; i < param_count; ++i) {
+ __ Push(descriptor->register_params_[i]);
+ }
+ ExternalReference miss = descriptor->miss_handler();
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ Ret();
+}
+
+
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label done;
+ Register input = source();
+ Register result = destination();
+ ASSERT(is_truncating());
+
+ ASSERT(result.Is64Bits());
+ ASSERT(jssp.Is(masm->StackPointer()));
+
+ int double_offset = offset();
+
+ DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
+ Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
+ Register scratch2 =
+ GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
+
+ __ Push(scratch1, scratch2);
+ // Account for saved regs if input is jssp.
+ if (input.is(jssp)) double_offset += 2 * kPointerSize;
+
+ if (!skip_fastpath()) {
+ __ Push(double_scratch);
+ if (input.is(jssp)) double_offset += 1 * kDoubleSize;
+ __ Ldr(double_scratch, MemOperand(input, double_offset));
+ // Try to convert with a FPU convert instruction. This handles all
+ // non-saturating cases.
+ __ TryInlineTruncateDoubleToI(result, double_scratch, &done);
+ __ Fmov(result, double_scratch);
+ } else {
+ __ Ldr(result, MemOperand(input, double_offset));
+ }
+
+ // If we reach here we need to manually convert the input to an int32.
+
+ // Extract the exponent.
+ Register exponent = scratch1;
+ __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
+ HeapNumber::kExponentBits);
+
+ // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
+ // the mantissa gets shifted completely out of the int32_t result.
+ __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
+ __ CzeroX(result, ge);
+ __ B(ge, &done);
+
+ // The Fcvtzs sequence handles all cases except where the conversion causes
+ // signed overflow in the int64_t target. Since we've already handled
+ // exponents >= 84, we can guarantee that 63 <= exponent < 84.
+
+ if (masm->emit_debug_code()) {
+ __ Cmp(exponent, HeapNumber::kExponentBias + 63);
+ // Exponents less than this should have been handled by the Fcvt case.
+ __ Check(ge, kUnexpectedValue);
+ }
+
+ // Isolate the mantissa bits, and set the implicit '1'.
+ Register mantissa = scratch2;
+ __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
+ __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
+
+ // Negate the mantissa if necessary.
+ __ Tst(result, kXSignMask);
+ __ Cneg(mantissa, mantissa, ne);
+
+ // Shift the mantissa bits in the correct place. We know that we have to shift
+ // it left here, because exponent >= 63 >= kMantissaBits.
+ __ Sub(exponent, exponent,
+ HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
+ __ Lsl(result, mantissa, exponent);
+
+ __ Bind(&done);
+ if (!skip_fastpath()) {
+ __ Pop(double_scratch);
+ }
+ __ Pop(scratch2, scratch1);
+ __ Ret();
+}
+
+
+// See call site for description.
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch,
+ FPRegister double_scratch,
+ Label* slow,
+ Condition cond) {
+ ASSERT(!AreAliased(left, right, scratch));
+ Label not_identical, return_equal, heap_number;
+ Register result = x0;
+
+ __ Cmp(right, left);
+ __ B(ne, &not_identical);
+
+ // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if ((cond == lt) || (cond == gt)) {
+ __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
+ ge);
+ } else {
+ Register right_type = scratch;
+ __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
+ &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if ((cond == le) || (cond == ge)) {
+ __ Cmp(right_type, ODDBALL_TYPE);
+ __ B(ne, &return_equal);
+ __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ Mov(result, GREATER);
+ } else {
+ // undefined >= undefined should fail.
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+ }
+ }
+ }
+
+ __ Bind(&return_equal);
+ if (cond == lt) {
+ __ Mov(result, GREATER); // Things aren't less than themselves.
+ } else if (cond == gt) {
+ __ Mov(result, LESS); // Things aren't greater than themselves.
+ } else {
+ __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
+ }
+ __ Ret();
+
+ // Cases lt and gt have been handled earlier, and case ne is never seen, as
+ // it is handled in the parser (see Parser::ParseBinaryExpression). We are
+ // only concerned with cases ge, le and eq here.
+ if ((cond != lt) && (cond != gt)) {
+ ASSERT((cond == ge) || (cond == le) || (cond == eq));
+ __ Bind(&heap_number);
+ // Left and right are identical pointers to a heap number object. Return
+ // non-equal if the heap number is a NaN, and equal otherwise. Comparing
+ // the number to itself will set the overflow flag iff the number is NaN.
+ __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
+ __ Fcmp(double_scratch, double_scratch);
+ __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
+
+ if (cond == le) {
+ __ Mov(result, GREATER);
+ } else {
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+ }
+
+ // No fall through here.
+ if (FLAG_debug_code) {
+ __ Unreachable();
+ }
+
+ __ Bind(&not_identical);
+}
+
+
+// See call site for description.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register left_type,
+ Register right_type,
+ Register scratch) {
+ ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
+
+ if (masm->emit_debug_code()) {
+ // We assume that the arguments are not identical.
+ __ Cmp(left, right);
+ __ Assert(ne, kExpectedNonIdenticalObjects);
+ }
+
+ // If either operand is a JS object or an oddball value, then they are not
+ // equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ Label right_non_object;
+
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, &right_non_object);
+
+ // Return non-zero - x0 already contains a non-zero pointer.
+ ASSERT(left.is(x0) || right.is(x0));
+ Label return_not_equal;
+ __ Bind(&return_not_equal);
+ __ Ret();
+
+ __ Bind(&right_non_object);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ Cmp(right_type, ODDBALL_TYPE);
+
+ // If right is not ODDBALL, test left. Otherwise, set eq condition.
+ __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
+
+ // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
+ // Otherwise, right or left is ODDBALL, so set a ge condition.
+ __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
+
+ __ B(ge, &return_not_equal);
+
+ // Internalized strings are unique, so they can only be equal if they are the
+ // same object. We have already tested that case, so if left and right are
+ // both internalized strings, they cannot be equal.
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ __ Orr(scratch, left_type, right_type);
+ __ TestAndBranchIfAllClear(
+ scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
+}
+
+
+// See call site for description.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register left,
+ Register right,
+ FPRegister left_d,
+ FPRegister right_d,
+ Register scratch,
+ Label* slow,
+ bool strict) {
+ ASSERT(!AreAliased(left, right, scratch));
+ ASSERT(!AreAliased(left_d, right_d));
+ ASSERT((left.is(x0) && right.is(x1)) ||
+ (right.is(x0) && left.is(x1)));
+ Register result = x0;
+
+ Label right_is_smi, done;
+ __ JumpIfSmi(right, &right_is_smi);
+
+ // Left is the smi. Check whether right is a heap number.
+ if (strict) {
+ // If right is not a number and left is a smi, then strict equality cannot
+ // succeed. Return non-equal.
+ Label is_heap_number;
+ __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
+ &is_heap_number);
+ // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
+ if (!right.is(result)) {
+ __ Mov(result, NOT_EQUAL);
+ }
+ __ Ret();
+ __ Bind(&is_heap_number);
+ } else {
+ // Smi compared non-strictly with a non-smi, non-heap-number. Call the
+ // runtime.
+ __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ }
+
+ // Left is the smi. Right is a heap number. Load right value into right_d, and
+ // convert left smi into double in left_d.
+ __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
+ __ SmiUntagToDouble(left_d, left);
+ __ B(&done);
+
+ __ Bind(&right_is_smi);
+ // Right is a smi. Check whether the non-smi left is a heap number.
+ if (strict) {
+ // If left is not a number and right is a smi then strict equality cannot
+ // succeed. Return non-equal.
+ Label is_heap_number;
+ __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
+ &is_heap_number);
+ // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
+ if (!left.is(result)) {
+ __ Mov(result, NOT_EQUAL);
+ }
+ __ Ret();
+ __ Bind(&is_heap_number);
+ } else {
+ // Smi compared non-strictly with a non-smi, non-heap-number. Call the
+ // runtime.
+ __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ }
+
+ // Right is the smi. Left is a heap number. Load left value into left_d, and
+ // convert right smi into double in right_d.
+ __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
+ __ SmiUntagToDouble(right_d, right);
+
+ // Fall through to both_loaded_as_doubles.
+ __ Bind(&done);
+}
+
+
+// Fast negative check for internalized-to-internalized equality.
+// See call site for description.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register left_map,
+ Register right_map,
+ Register left_type,
+ Register right_type,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
+ Register result = x0;
+
+ Label object_test;
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ // TODO(all): reexamine this branch sequence for optimisation wrt branch
+ // prediction.
+ __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
+ __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
+ __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
+ __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
+
+ // Both are internalized. We already checked that they weren't the same
+ // pointer, so they are not equal.
+ __ Mov(result, NOT_EQUAL);
+ __ Ret();
+
+ __ Bind(&object_test);
+
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+
+ // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
+ // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
+ __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
+
+ __ B(lt, not_both_strings);
+
+ // If both objects are undetectable, they are equal. Otherwise, they are not
+ // equal, since they are different objects and an object is not equal to
+ // undefined.
+
+ // Returning here, so we can corrupt right_type and left_type.
+ Register right_bitfield = right_type;
+ Register left_bitfield = left_type;
+ __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
+ __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
+ __ And(result, right_bitfield, left_bitfield);
+ __ And(result, result, 1 << Map::kIsUndetectable);
+ __ Eor(result, result, 1 << Map::kIsUndetectable);
+ __ Ret();
+}
+
+
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ Bind(&ok);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = x1;
+ Register rhs = x0;
+ Register result = x0;
+ Condition cond = GetCondition();
+
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
+
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles;
+ Label not_two_smis, smi_done;
+ __ JumpIfEitherNotSmi(lhs, rhs, &not_two_smis);
+ __ SmiUntag(lhs);
+ __ Sub(result, lhs, Operand::UntagSmi(rhs));
+ __ Ret();
+
+ __ Bind(&not_two_smis);
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so it is
+ // certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
+
+ // If either is a smi (we know that at least one is not a smi), then they can
+ // only be strictly equal if the other is a HeapNumber.
+ __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
+
+ // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
+ // can:
+ // 1) Return the answer.
+ // 2) Branch to the slow case.
+ // 3) Fall through to both_loaded_as_doubles.
+ // In case 3, we have found out that we were dealing with a number-number
+ // comparison. The double values of the numbers have been loaded, right into
+ // rhs_d, left into lhs_d.
+ FPRegister rhs_d = d0;
+ FPRegister lhs_d = d1;
+ EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
+
+ __ Bind(&both_loaded_as_doubles);
+ // The arguments have been converted to doubles and stored in rhs_d and
+ // lhs_d.
+ Label nan;
+ __ Fcmp(lhs_d, rhs_d);
+ __ B(vs, &nan); // Overflow flag set if either is NaN.
+ STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
+ __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
+ __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
+ __ Ret();
+
+ __ Bind(&nan);
+ // Left and/or right is a NaN. Load the result register with whatever makes
+ // the comparison fail, since comparisons with NaN always fail (except ne,
+ // which is filtered out at a higher level.)
+ ASSERT(cond != ne);
+ if ((cond == lt) || (cond == le)) {
+ __ Mov(result, GREATER);
+ } else {
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+
+ __ Bind(&not_smis);
+ // At this point we know we are dealing with two different objects, and
+ // neither of them is a smi. The objects are in rhs_ and lhs_.
+
+ // Load the maps and types of the objects.
+ Register rhs_map = x10;
+ Register rhs_type = x11;
+ Register lhs_map = x12;
+ Register lhs_type = x13;
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+
+ if (strict()) {
+ // This emits a non-equal return sequence for some object types, or falls
+ // through if it was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
+ }
+
+ Label check_for_internalized_strings;
+ Label flat_string_check;
+ // Check for heap number comparison. Branch to earlier double comparison code
+ // if they are heap numbers, otherwise, branch to internalized string check.
+ __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
+ __ B(ne, &check_for_internalized_strings);
+ __ Cmp(lhs_map, rhs_map);
+
+ // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
+ // string check.
+ __ B(ne, &flat_string_check);
+
+ // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
+ // comparison code.
+ __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ B(&both_loaded_as_doubles);
+
+ __ Bind(&check_for_internalized_strings);
+ // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
+ // of internalized strings.
+ if ((cond == eq) && !strict()) {
+ // Returns an answer for two internalized strings or two detectable objects.
+ // Otherwise branches to the string case or not both strings case.
+ EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
+ lhs_type, rhs_type,
+ &flat_string_check, &slow);
+ }
+
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ Bind(&flat_string_check);
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
+ x15, &slow);
+
+ Isolate* isolate = masm->isolate();
+ __ IncrementCounter(isolate->counters()->string_compare_native(), 1, x10,
+ x11);
+ if (cond == eq) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
+ x10, x11, x12);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
+ x10, x11, x12, x13);
+ }
+
+ // Never fall through to here.
+ if (FLAG_debug_code) {
+ __ Unreachable();
+ }
+
+ __ Bind(&slow);
+
+ __ Push(lhs, rhs);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ if (cond == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if ((cond == lt) || (cond == le)) {
+ ncr = GREATER;
+ } else {
+ ASSERT((cond == gt) || (cond == ge)); // remaining cases
+ ncr = LESS;
+ }
+ __ Mov(x10, Operand(Smi::FromInt(ncr)));
+ __ Push(x10);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9,
+ // ip0 and ip1 are corrupted by the call into C.
+ CPURegList saved_regs = kCallerSaved;
+ saved_regs.Remove(ip0);
+ saved_regs.Remove(ip1);
+ saved_regs.Remove(x8);
+ saved_regs.Remove(x9);
+
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ PushCPURegList(saved_regs);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PushCPURegList(kCallerSavedFP);
+ }
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(x0, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ 1, 0);
+
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PopCPURegList(kCallerSavedFP);
+ }
+ __ PopCPURegList(saved_regs);
+ __ Ret();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode(isolate);
+ StoreBufferOverflowStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate);
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+ // Stack on entry:
+ // jssp[0]: Exponent (as a tagged value).
+ // jssp[1]: Base (as a tagged value).
+ //
+ // The (tagged) result will be returned in x0, as a heap number.
+
+ Register result_tagged = x0;
+ Register base_tagged = x10;
+ Register exponent_tagged = x11;
+ Register exponent_integer = x12;
+ Register scratch1 = x14;
+ Register scratch0 = x15;
+ Register saved_lr = x19;
+ FPRegister result_double = d0;
+ FPRegister base_double = d0;
+ FPRegister exponent_double = d1;
+ FPRegister base_double_copy = d2;
+ FPRegister scratch1_double = d6;
+ FPRegister scratch0_double = d7;
+
+ // A fast-path for integer exponents.
+ Label exponent_is_smi, exponent_is_integer;
+ // Bail out to runtime.
+ Label call_runtime;
+ // Allocate a heap number for the result, and return it.
+ Label done;
+
+ // Unpack the inputs.
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi;
+ Label unpack_exponent;
+
+ __ Pop(exponent_tagged, base_tagged);
+
+ __ JumpIfSmi(base_tagged, &base_is_smi);
+ __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
+ // base_tagged is a heap number, so load its double value.
+ __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
+ __ B(&unpack_exponent);
+ __ Bind(&base_is_smi);
+ // base_tagged is a SMI, so untag it and convert it to a double.
+ __ SmiUntagToDouble(base_double, base_tagged);
+
+ __ Bind(&unpack_exponent);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // d1 base_double The base as a double.
+ __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
+ __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
+ // exponent_tagged is a heap number, so load its double value.
+ __ Ldr(exponent_double,
+ FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
+ __ Ldr(exponent_double,
+ FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
+ }
+
+ // Handle double (heap number) exponents.
+ if (exponent_type_ != INTEGER) {
+ // Detect integer exponents stored as doubles and handle those in the
+ // integer fast-path.
+ __ TryConvertDoubleToInt64(exponent_integer, exponent_double,
+ scratch0_double, &exponent_is_integer);
+
+ if (exponent_type_ == ON_STACK) {
+ FPRegister half_double = d3;
+ FPRegister minus_half_double = d4;
+ FPRegister zero_double = d5;
+ // Detect square root case. Crankshaft detects constant +/-0.5 at compile
+ // time and uses DoMathPowHalf instead. We then skip this check for
+ // non-constant cases of +/-0.5 as these hardly occur.
+
+ __ Fmov(minus_half_double, -0.5);
+ __ Fmov(half_double, 0.5);
+ __ Fcmp(minus_half_double, exponent_double);
+ __ Fccmp(half_double, exponent_double, NZFlag, ne);
+ // Condition flags at this point:
+ // 0.5; nZCv // Identified by eq && pl
+ // -0.5: NZcv // Identified by eq && mi
+ // other: ?z?? // Identified by ne
+ __ B(ne, &call_runtime);
+
+ // The exponent is 0.5 or -0.5.
+
+ // Given that exponent is known to be either 0.5 or -0.5, the following
+ // special cases could apply (according to ECMA-262 15.8.2.13):
+ //
+ // base.isNaN(): The result is NaN.
+ // (base == +INFINITY) || (base == -INFINITY)
+ // exponent == 0.5: The result is +INFINITY.
+ // exponent == -0.5: The result is +0.
+ // (base == +0) || (base == -0)
+ // exponent == 0.5: The result is +0.
+ // exponent == -0.5: The result is +INFINITY.
+ // (base < 0) && base.isFinite(): The result is NaN.
+ //
+ // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
+ // where base is -INFINITY or -0.
+
+ // Add +0 to base. This has no effect other than turning -0 into +0.
+ __ Fmov(zero_double, 0.0);
+ __ Fadd(base_double, base_double, zero_double);
+ // The operation -0+0 results in +0 in all cases except where the
+ // FPCR rounding mode is 'round towards minus infinity' (RM). The
+ // A64 simulator does not currently simulate FPCR (where the rounding
+ // mode is set), so test the operation with some debug code.
+ if (masm->emit_debug_code()) {
+ Register temp = masm->Tmp1();
+ // d5 zero_double The value +0.0 as a double.
+ __ Fneg(scratch0_double, zero_double);
+ // Verify that we correctly generated +0.0 and -0.0.
+ // bits(+0.0) = 0x0000000000000000
+ // bits(-0.0) = 0x8000000000000000
+ __ Fmov(temp, zero_double);
+ __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
+ __ Fmov(temp, scratch0_double);
+ __ Eor(temp, temp, kDSignMask);
+ __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
+ // Check that -0.0 + 0.0 == +0.0.
+ __ Fadd(scratch0_double, scratch0_double, zero_double);
+ __ Fmov(temp, scratch0_double);
+ __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
+ }
+
+ // If base is -INFINITY, make it +INFINITY.
+ // * Calculate base - base: All infinities will become NaNs since both
+ // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in A64.
+ // * If the result is NaN, calculate abs(base).
+ __ Fsub(scratch0_double, base_double, base_double);
+ __ Fcmp(scratch0_double, 0.0);
+ __ Fabs(scratch1_double, base_double);
+ __ Fcsel(base_double, scratch1_double, base_double, vs);
+
+ // Calculate the square root of base.
+ __ Fsqrt(result_double, base_double);
+ __ Fcmp(exponent_double, 0.0);
+ __ B(ge, &done); // Finish now for exponents of 0.5.
+ // Find the inverse for exponents of -0.5.
+ __ Fmov(scratch0_double, 1.0);
+ __ Fdiv(result_double, scratch0_double, result_double);
+ __ B(&done);
+ }
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(saved_lr, lr);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ Mov(lr, saved_lr);
+ __ B(&done);
+ }
+
+ // Handle SMI exponents.
+ __ Bind(&exponent_is_smi);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // d1 base_double The base as a double.
+ __ SmiUntag(exponent_integer, exponent_tagged);
+ }
+
+ __ Bind(&exponent_is_integer);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // x12 exponent_integer The exponent as an integer.
+ // d1 base_double The base as a double.
+
+ // Find abs(exponent). For negative exponents, we can find the inverse later.
+ Register exponent_abs = x13;
+ __ Cmp(exponent_integer, 0);
+ __ Cneg(exponent_abs, exponent_integer, mi);
+ // x13 exponent_abs The value of abs(exponent_integer).
+
+ // Repeatedly multiply to calculate the power.
+ // result = 1.0;
+ // For each bit n (exponent_integer{n}) {
+ // if (exponent_integer{n}) {
+ // result *= base;
+ // }
+ // base *= base;
+ // if (remaining bits in exponent_integer are all zero) {
+ // break;
+ // }
+ // }
+ Label power_loop, power_loop_entry, power_loop_exit;
+ __ Fmov(scratch1_double, base_double);
+ __ Fmov(base_double_copy, base_double);
+ __ Fmov(result_double, 1.0);
+ __ B(&power_loop_entry);
+
+ __ Bind(&power_loop);
+ __ Fmul(scratch1_double, scratch1_double, scratch1_double);
+ __ Lsr(exponent_abs, exponent_abs, 1);
+ __ Cbz(exponent_abs, &power_loop_exit);
+
+ __ Bind(&power_loop_entry);
+ __ Tbz(exponent_abs, 0, &power_loop);
+ __ Fmul(result_double, result_double, scratch1_double);
+ __ B(&power_loop);
+
+ __ Bind(&power_loop_exit);
+
+ // If the exponent was positive, result_double holds the result.
+ __ Tbz(exponent_integer, kXSignBit, &done);
+
+ // The exponent was negative, so find the inverse.
+ __ Fmov(scratch0_double, 1.0);
+ __ Fdiv(result_double, scratch0_double, result_double);
+ // ECMA-262 only requires Math.pow to return an 'implementation-dependent
+ // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
+ // to calculate the subnormal value 2^-1074. This method of calculating
+ // negative powers doesn't work because 2^1074 overflows to infinity. To
+ // catch this corner-case, we bail out if the result was 0. (This can only
+ // occur if the divisor is infinity or the base is zero.)
+ __ Fcmp(result_double, 0.0);
+ __ B(&done, ne);
+
+ if (exponent_type_ == ON_STACK) {
+ // Bail out to runtime code.
+ __ Bind(&call_runtime);
+ // Put the arguments back on the stack.
+ __ Push(base_tagged, exponent_tagged);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
+ // Return.
+ __ Bind(&done);
+ __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1);
+ __ Str(result_double,
+ FieldMemOperand(result_tagged, HeapNumber::kValueOffset));
+ ASSERT(result_tagged.is(x0));
+ __ IncrementCounter(
+ masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
+ __ Ret();
+ } else {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(saved_lr, lr);
+ __ Fmov(base_double, base_double_copy);
+ __ Scvtf(exponent_double, exponent_integer);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ Mov(lr, saved_lr);
+ __ Bind(&done);
+ __ IncrementCounter(
+ masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
+ __ Ret();
+ }
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ // It is important that the following stubs are generated in this order
+ // because pregenerated stubs can only call other pregenerated stubs.
+ // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
+ // CEntryStub.
+ CEntryStub::GenerateAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Floating-point code doesn't get special handling in A64, so there's
+ // nothing to do here.
+ USE(isolate);
+}
+
+
+static void JumpIfOOM(MacroAssembler* masm,
+ Register value,
+ Register scratch,
+ Label* oom_label) {
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
+ STATIC_ASSERT(kFailureTag == 3);
+ __ And(scratch, value, 0xf);
+ __ Cmp(scratch, 0xf);
+ __ B(eq, oom_label);
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+ // CEntryStub stores the return address on the stack before calling into
+ // C++ code. In some cases, the VM accesses this address, but it is not used
+ // when the C++ code returns to the stub because LR holds the return address
+ // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
+ // returning to dead code.
+ // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
+ // find any comment to confirm this, and I don't hit any crashes whatever
+ // this function returns. The anaylsis should be properly confirmed.
+ return true;
+}
+
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ stub.GetCode(isolate);
+ CEntryStub stub_fp(1, kSaveFPRegs);
+ stub_fp.GetCode(isolate);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal,
+ Label* throw_termination,
+ Label* throw_out_of_memory,
+ bool do_gc,
+ bool always_allocate) {
+ // x0 : Result parameter for PerformGC, if do_gc is true.
+ // x21 : argv
+ // x22 : argc
+ // x23 : target
+ //
+ // The stack (on entry) holds the arguments and the receiver, with the
+ // receiver at the highest address:
+ //
+ // argv[8]: receiver
+ // argv -> argv[0]: arg[argc-2]
+ // ... ...
+ // argv[...]: arg[1]
+ // argv[...]: arg[0]
+ //
+ // Immediately below (after) this is the exit frame, as constructed by
+ // EnterExitFrame:
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // csp[...]: Saved doubles, if saved_doubles is true.
+ // csp[32]: Alignment padding, if necessary.
+ // csp[24]: Preserved x23 (used for target).
+ // csp[16]: Preserved x22 (used for argc).
+ // csp[8]: Preserved x21 (used for argv).
+ // csp -> csp[0]: Space reserved for the return address.
+ //
+ // After a successful call, the exit frame, preserved registers (x21-x23) and
+ // the arguments (including the receiver) are dropped or popped as
+ // appropriate. The stub then returns.
+ //
+ // After an unsuccessful call, the exit frame and suchlike are left
+ // untouched, and the stub either throws an exception by jumping to one of
+ // the provided throw_ labels, or it falls through. The failure details are
+ // passed through in x0.
+ ASSERT(csp.Is(__ StackPointer()));
+
+ Isolate* isolate = masm->isolate();
+
+ const Register& argv = x21;
+ const Register& argc = x22;
+ const Register& target = x23;
+
+ if (do_gc) {
+ // Call Runtime::PerformGC, passing x0 (the result parameter for
+ // PerformGC) and x1 (the isolate).
+ __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::perform_gc_function(isolate), 2, 0);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth(isolate);
+ if (always_allocate) {
+ __ Mov(x10, Operand(scope_depth));
+ __ Ldr(x11, MemOperand(x10));
+ __ Add(x11, x11, 1);
+ __ Str(x11, MemOperand(x10));
+ }
+
+ // Prepare AAPCS64 arguments to pass to the builtin.
+ __ Mov(x0, argc);
+ __ Mov(x1, argv);
+ __ Mov(x2, Operand(ExternalReference::isolate_address(isolate)));
+
+ // Store the return address on the stack, in the space previously allocated
+ // by EnterExitFrame. The return address is queried by
+ // ExitFrame::GetStateForFramePointer.
+ Label return_location;
+ __ Adr(x12, &return_location);
+ __ Poke(x12, 0);
+ if (__ emit_debug_code()) {
+ // Verify that the slot below fp[kSPOffset]-8 points to the return location
+ // (currently in x12).
+ Register temp = masm->Tmp1();
+ __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSizeInBytes)));
+ __ Cmp(temp, x12);
+ __ Check(eq, kReturnAddressNotFoundInFrame);
+ }
+
+ // Call the builtin.
+ __ Blr(target);
+ __ Bind(&return_location);
+ const Register& result = x0;
+
+ if (always_allocate) {
+ __ Mov(x10, Operand(scope_depth));
+ __ Ldr(x11, MemOperand(x10));
+ __ Sub(x11, x11, 1);
+ __ Str(x11, MemOperand(x10));
+ }
+
+ // x0 result The return code from the call.
+ // x21 argv
+ // x22 argc
+ // x23 target
+ //
+ // If all of the result bits matching kFailureTagMask are '1', the result is
+ // a failure. Otherwise, it's an ordinary tagged object and the call was a
+ // success.
+ Label failure;
+ __ And(x10, result, kFailureTagMask);
+ __ Cmp(x10, kFailureTagMask);
+ __ B(&failure, eq);
+
+ // The call succeeded, so unwind the stack and return.
+
+ // Restore callee-saved registers x21-x23.
+ __ Mov(x11, argc);
+
+ __ Peek(argv, 1 * kPointerSize);
+ __ Peek(argc, 2 * kPointerSize);
+ __ Peek(target, 3 * kPointerSize);
+
+ __ LeaveExitFrame(save_doubles_, x10, true);
+ ASSERT(jssp.Is(__ StackPointer()));
+ // Pop or drop the remaining stack slots and return from the stub.
+ // jssp[24]: Arguments array (of size argc), including receiver.
+ // jssp[16]: Preserved x23 (used for target).
+ // jssp[8]: Preserved x22 (used for argc).
+ // jssp[0]: Preserved x21 (used for argv).
+ __ Drop(x11);
+ __ Ret();
+
+ // The stack pointer is still csp if we aren't returning, and the frame
+ // hasn't changed (except for the return address).
+ __ SetStackPointer(csp);
+
+ __ Bind(&failure);
+ // The call failed, so check if we need to throw an exception, and fall
+ // through (to retry) otherwise.
+
+ Label retry;
+ // x0 result The return code from the call, including the failure
+ // code and details.
+ // x21 argv
+ // x22 argc
+ // x23 target
+ // Refer to the Failure class for details of the bit layout.
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ Tst(result, kFailureTypeTagMask << kFailureTagSize);
+ __ B(eq, &retry); // RETRY_AFTER_GC
+
+ // Special handling of out-of-memory exceptions: Pass the failure result,
+ // rather than the exception descriptor.
+ JumpIfOOM(masm, result, x10, throw_out_of_memory);
+
+ // Retrieve the pending exception.
+ const Register& exception = result;
+ const Register& exception_address = x11;
+ __ Mov(exception_address,
+ Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Ldr(exception, MemOperand(exception_address));
+
+ // See if we just retrieved an OOM exception.
+ JumpIfOOM(masm, exception, x10, throw_out_of_memory);
+
+ // Clear the pending exception.
+ __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Str(x10, MemOperand(exception_address));
+
+ // x0 exception The exception descriptor.
+ // x21 argv
+ // x22 argc
+ // x23 target
+
+ // Special handling of termination exceptions, which are uncatchable by
+ // JavaScript code.
+ __ Cmp(exception, Operand(isolate->factory()->termination_exception()));
+ __ B(eq, throw_termination);
+
+ // Handle normal exception.
+ __ B(throw_normal);
+
+ __ Bind(&retry);
+ // The result (x0) is passed through as the next PerformGC parameter.
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // The Abort mechanism relies on CallRuntime, which in turn relies on
+ // CEntryStub, so until this stub has been generated, we have to use a
+ // fall-back Abort mechanism.
+ //
+ // Note that this stub must be generated before any use of Abort.
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+
+ ASM_LOCATION("CEntryStub::Generate entry");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Register parameters:
+ // x0: argc (including receiver, untagged)
+ // x1: target
+ //
+ // The stack on entry holds the arguments and the receiver, with the receiver
+ // at the highest address:
+ //
+ // jssp]argc-1]: receiver
+ // jssp[argc-2]: arg[argc-2]
+ // ... ...
+ // jssp[1]: arg[1]
+ // jssp[0]: arg[0]
+ //
+ // The arguments are in reverse order, so that arg[argc-2] is actually the
+ // first argument to the target function and arg[0] is the last.
+ ASSERT(jssp.Is(__ StackPointer()));
+ const Register& argc_input = x0;
+ const Register& target_input = x1;
+
+ // Calculate argv, argc and the target address, and store them in
+ // callee-saved registers so we can retry the call without having to reload
+ // these arguments.
+ // TODO(jbramley): If the first call attempt succeeds in the common case (as
+ // it should), then we might be better off putting these parameters directly
+ // into their argument registers, rather than using callee-saved registers and
+ // preserving them on the stack.
+ const Register& argv = x21;
+ const Register& argc = x22;
+ const Register& target = x23;
+
+ // Derive argv from the stack pointer so that it points to the first argument
+ // (arg[argc-2]), or just below the receiver in case there are no arguments.
+ // - Adjust for the arg[] array.
+ Register temp_argv = x11;
+ __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
+ // - Adjust for the receiver.
+ __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+
+ // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
+ // registers.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(save_doubles_, x10, 3);
+ ASSERT(csp.Is(__ StackPointer()));
+
+ // Poke callee-saved registers into reserved space.
+ __ Poke(argv, 1 * kPointerSize);
+ __ Poke(argc, 2 * kPointerSize);
+ __ Poke(target, 3 * kPointerSize);
+
+ // We normally only keep tagged values in callee-saved registers, as they
+ // could be pushed onto the stack by called stubs and functions, and on the
+ // stack they can confuse the GC. However, we're only calling C functions
+ // which can push arbitrary data onto the stack anyway, and so the GC won't
+ // examine that part of the stack.
+ __ Mov(argc, argc_input);
+ __ Mov(target, target_input);
+ __ Mov(argv, temp_argv);
+
+ Label throw_normal;
+ Label throw_termination;
+ Label throw_out_of_memory;
+
+ // Call the runtime function.
+ GenerateCore(masm,
+ &throw_normal,
+ &throw_termination,
+ &throw_out_of_memory,
+ false,
+ false);
+
+ // If successful, the previous GenerateCore will have returned to the
+ // calling code. Otherwise, we fall through into the following.
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal,
+ &throw_termination,
+ &throw_out_of_memory,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ __ Mov(x0, reinterpret_cast<uint64_t>(Failure::InternalError()));
+ GenerateCore(masm,
+ &throw_normal,
+ &throw_termination,
+ &throw_out_of_memory,
+ true,
+ true);
+
+ // We didn't execute a return case, so the stack frame hasn't been updated
+ // (except for the return address slot). However, we don't need to initialize
+ // jssp because the throw method will immediately overwrite it when it
+ // unwinds the stack.
+ if (__ emit_debug_code()) {
+ __ Mov(jssp, kDebugZapValue);
+ }
+ __ SetStackPointer(jssp);
+
+ // Throw exceptions.
+ // If we throw an exception, we can end up re-entering CEntryStub before we
+ // pop the exit frame, so need to ensure that x21-x23 contain GC-safe values
+ // here.
+ __ Bind(&throw_out_of_memory);
+ ASM_LOCATION("Throw out of memory");
+ __ Mov(argv, 0);
+ __ Mov(argc, 0);
+ __ Mov(target, 0);
+ // Set external caught exception to false.
+ Isolate* isolate = masm->isolate();
+ __ Mov(x2, Operand(ExternalReference(Isolate::kExternalCaughtExceptionAddress,
+ isolate)));
+ __ Str(xzr, MemOperand(x2));
+
+ // Set pending exception and x0 to out of memory exception.
+ Label already_have_failure;
+ JumpIfOOM(masm, x0, x10, &already_have_failure);
+ Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
+ __ Mov(x0, Operand(reinterpret_cast<uint64_t>(out_of_memory)));
+ __ Bind(&already_have_failure);
+ __ Mov(x2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Str(x0, MemOperand(x2));
+ // Fall through to the next label.
+
+ __ Bind(&throw_termination);
+ ASM_LOCATION("Throw termination");
+ __ Mov(argv, 0);
+ __ Mov(argc, 0);
+ __ Mov(target, 0);
+ __ ThrowUncatchable(x0, x10, x11, x12, x13);
+
+ __ Bind(&throw_normal);
+ ASM_LOCATION("Throw normal");
+ __ Mov(argv, 0);
+ __ Mov(argc, 0);
+ __ Mov(target, 0);
+ __ Throw(x0, x10, x11, x12, x13);
+}
+
+
+// This is the entry point from C++. 5 arguments are provided in x0-x4.
+// See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
+// Input:
+// x0: code entry.
+// x1: function.
+// x2: receiver.
+// x3: argc.
+// x4: argv.
+// Output:
+// x0: result.
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ ASSERT(jssp.Is(__ StackPointer()));
+ Register code_entry = x0;
+
+ // Enable instruction instrumentation. This only works on the simulator, and
+ // will have no effect on the model or real hardware.
+ __ EnableInstrumentation();
+
+ Label invoke, handler_entry, exit;
+
+ // Push callee-saved registers and synchronize the system stack pointer (csp)
+ // and the JavaScript stack pointer (jssp).
+ //
+ // We must not write to jssp until after the PushCalleeSavedRegisters()
+ // call, since jssp is itself a callee-saved register.
+ __ SetStackPointer(csp);
+ __ PushCalleeSavedRegisters();
+ __ Mov(jssp, csp);
+ __ SetStackPointer(jssp);
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Build an entry frame (see layout below).
+ Isolate* isolate = masm->isolate();
+
+ // Build an entry frame.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
+ __ Mov(x13, bad_frame_pointer);
+ __ Mov(x12, Operand(Smi::FromInt(marker)));
+ __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
+ __ Ldr(x10, MemOperand(x11));
+
+ // TODO(all): Pushing the marker twice seems unnecessary.
+ // In this case perhaps we could push xzr in the slot for the context
+ // (see MAsm::EnterFrame).
+ __ Push(x13, x12, x12, x10);
+ // Set up fp.
+ __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
+
+ // Push the JS entry frame marker. Also set js_entry_sp if this is the
+ // outermost JS call.
+ Label non_outermost_js, done;
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ __ Mov(x10, Operand(ExternalReference(js_entry_sp)));
+ __ Ldr(x11, MemOperand(x10));
+ __ Cbnz(x11, &non_outermost_js);
+ __ Str(fp, MemOperand(x10));
+ __ Mov(x12, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ Push(x12);
+ __ B(&done);
+ __ Bind(&non_outermost_js);
+ // We spare one instruction by pushing xzr since the marker is 0.
+ ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
+ __ Push(xzr);
+ __ Bind(&done);
+
+ // The frame set up looks like this:
+ // jssp[0] : JS entry frame marker.
+ // jssp[1] : C entry FP.
+ // jssp[2] : stack frame marker.
+ // jssp[3] : stack frmae marker.
+ // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ B(&invoke);
+
+ // Prevent the constant pool from being emitted between the record of the
+ // handler_entry position and the first instruction of the sequence here.
+ // There is no risk because Assembler::Emit() emits the instruction before
+ // checking for constant pool emission, but we do not want to depend on
+ // that.
+ {
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ // TODO(jbramley): Do this in the Assembler.
+ __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ }
+ __ Str(code_entry, MemOperand(x10));
+ __ Mov(x0, Operand(reinterpret_cast<int64_t>(Failure::Exception())));
+ __ B(&exit);
+
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
+ __ Bind(&invoke);
+ __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the B(&invoke) above, which
+ // restores all callee-saved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Str(x10, MemOperand(x11));
+
+ // Invoke the function by calling through the JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Expected registers by Builtins::JSEntryTrampoline
+ // x0: code entry.
+ // x1: function.
+ // x2: receiver.
+ // x3: argc.
+ // x4: argv.
+ // TODO(jbramley): The latest ARM code checks is_construct and conditionally
+ // uses construct_entry. We probably need to do the same here.
+ ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
+ : Builtins::kJSEntryTrampoline,
+ isolate);
+ __ Mov(x10, Operand(entry));
+
+ // Call the JSEntryTrampoline.
+ __ Ldr(x11, MemOperand(x10)); // Dereference the address.
+ __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
+ __ Blr(x12);
+
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
+
+
+ __ Bind(&exit);
+ // x0 holds the result.
+ // The stack pointer points to the top of the entry frame pushed on entry from
+ // C++ (at the beginning of this stub):
+ // jssp[0] : JS entry frame marker.
+ // jssp[1] : C entry FP.
+ // jssp[2] : stack frame marker.
+ // jssp[3] : stack frmae marker.
+ // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ Pop(x10);
+ __ Cmp(x10, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ B(ne, &non_outermost_js_2);
+ __ Mov(x11, Operand(ExternalReference(js_entry_sp)));
+ __ Str(xzr, MemOperand(x11));
+ __ Bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ Pop(x10);
+ __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
+ __ Str(x10, MemOperand(x11));
+
+ // Reset the stack to the callee saved registers.
+ __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
+ // Restore the callee-saved registers and return.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+ __ PopCalleeSavedRegisters();
+ // After this point, we must not modify jssp because it is a callee-saved
+ // register which we have just restored.
+ __ Ret();
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x1 : receiver
+ // -- x0 : key
+ // -----------------------------------
+ Register key = x0;
+ receiver = x1;
+ __ Cmp(key, Operand(masm->isolate()->factory()->prototype_string()));
+ __ B(ne, &miss);
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x2 : name
+ // -- x0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = x0;
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
+
+ __ Bind(&miss);
+ StubCompiler::TailCallBuiltin(masm,
+ BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void StringLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x1 : receiver
+ // -- x0 : key
+ // -----------------------------------
+ Register key = x0;
+ receiver = x1;
+ __ Cmp(key, Operand(masm->isolate()->factory()->length_string()));
+ __ B(ne, &miss);
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x2 : name
+ // -- x0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = x0;
+ }
+
+ StubCompiler::GenerateLoadStringLength(masm, receiver, x10, x11, &miss);
+
+ __ Bind(&miss);
+ StubCompiler::TailCallBuiltin(masm,
+ BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("StoreArrayLengthStub::Generate");
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+ Label miss;
+
+ Register receiver;
+ Register value;
+ if (kind() == Code::KEYED_STORE_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x2 : receiver
+ // -- x1 : key
+ // -- x0 : value
+ // -----------------------------------
+ Register key = x1;
+ receiver = x2;
+ value = x0;
+ __ Cmp(key, Operand(masm->isolate()->factory()->length_string()));
+ __ B(ne, &miss);
+ } else {
+ ASSERT(kind() == Code::STORE_IC);
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x2 : key
+ // -- x1 : receiver
+ // -- x0 : value
+ // -----------------------------------
+ receiver = x1;
+ value = x0;
+ }
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ CompareObjectType(receiver, x10, x11, JS_ARRAY_TYPE);
+ __ B(ne, &miss);
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ Ldr(x10, FieldMemOperand(receiver, JSArray::kElementsOffset));
+ __ CompareObjectType(x10, x11, x12, FIXED_ARRAY_TYPE);
+ __ B(ne, &miss);
+
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ Ldr(x10, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
+ __ Ldr(x10, FieldMemOperand(x10, FixedArray::kMapOffset));
+ __ CompareRoot(x10, Heap::kHashTableMapRootIndex);
+ __ B(eq, &miss);
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver, value);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ Bind(&miss);
+ StubCompiler::TailCallBuiltin(masm,
+ BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Stack on entry:
+ // jssp[0]: function.
+ // jssp[8]: object.
+ //
+ // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
+ // instanceof.
+
+ Register result = x0;
+ Register function = right();
+ Register object = left();
+ Register scratch1 = x6;
+ Register scratch2 = x7;
+ Register res_true = x8;
+ Register res_false = x9;
+ // Only used if there was an inline map check site. (See
+ // LCodeGen::DoInstanceOfKnownGlobal().)
+ Register map_check_site = x4;
+ // Delta for the instructions generated between the inline map check and the
+ // instruction setting the result.
+ const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
+
+ Label not_js_object, slow;
+
+ if (!HasArgsInRegisters()) {
+ __ Pop(function, object);
+ }
+
+ if (ReturnTrueFalseObject()) {
+ __ LoadTrueFalseRoots(res_true, res_false);
+ } else {
+ // This is counter-intuitive, but correct.
+ __ Mov(res_true, Operand(Smi::FromInt(0)));
+ __ Mov(res_false, Operand(Smi::FromInt(1)));
+ }
+
+ // Check that the left hand side is a JS object and load its map as a side
+ // effect.
+ Register map = x12;
+ __ JumpIfSmi(object, &not_js_object);
+ __ IsObjectJSObjectType(object, map, scratch2, &not_js_object);
+
+ // If there is a call site cache, don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ Label miss;
+ __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
+ __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
+ __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
+ __ Bind(&miss);
+ }
+
+ // Get the prototype of the function.
+ Register prototype = x13;
+ __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
+ MacroAssembler::kMissOnBoundFunction);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(prototype, &slow);
+ __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
+
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (HasCallSiteInlineCheck()) {
+ // Patch the (relocated) inlined map check.
+ __ GetRelocatedValueLocation(map_check_site, scratch1);
+ // We have a cell, so need another level of dereferencing.
+ __ Ldr(scratch1, MemOperand(scratch1));
+ __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
+ } else {
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ }
+
+ Label return_true, return_result;
+ {
+ // Loop through the prototype chain looking for the function prototype.
+ Register chain_map = x1;
+ Register chain_prototype = x14;
+ Register null_value = x15;
+ Label loop;
+ __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ // Speculatively set a result.
+ __ Mov(result, res_false);
+
+ __ Bind(&loop);
+
+ // If the chain prototype is the object prototype, return true.
+ __ Cmp(chain_prototype, prototype);
+ __ B(eq, &return_true);
+
+ // If the chain prototype is null, we've reached the end of the chain, so
+ // return false.
+ __ Cmp(chain_prototype, null_value);
+ __ B(eq, &return_result);
+
+ // Otherwise, load the next prototype in the chain, and loop.
+ __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
+ __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
+ __ B(&loop);
+ }
+
+ // Return sequence when no arguments are on the stack.
+ // We cannot fall through to here.
+ __ Bind(&return_true);
+ __ Mov(result, res_true);
+ __ Bind(&return_result);
+ if (HasCallSiteInlineCheck()) {
+ ASSERT(ReturnTrueFalseObject());
+ __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
+ __ GetRelocatedValueLocation(map_check_site, scratch2);
+ __ Str(result, MemOperand(scratch2));
+ } else {
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ }
+ __ Ret();
+
+ Label object_not_null, object_not_null_or_smi;
+
+ __ Bind(&not_js_object);
+ Register object_type = x14;
+ // x0 result result return register (uninit)
+ // x10 function pointer to function
+ // x11 object pointer to object
+ // x14 object_type type of object (uninit)
+
+ // Before null, smi and string checks, check that the rhs is a function.
+ // For a non-function rhs, an exception must be thrown.
+ __ JumpIfSmi(function, &slow);
+ __ JumpIfNotObjectType(
+ function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
+
+ __ Mov(result, res_false);
+
+ // Null is not instance of anything.
+ __ Cmp(object_type, Operand(masm->isolate()->factory()->null_value()));
+ __ B(ne, &object_not_null);
+ __ Ret();
+
+ __ Bind(&object_not_null);
+ // Smi values are not instances of anything.
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
+ __ Ret();
+
+ __ Bind(&object_not_null_or_smi);
+ // String values are not instances of anything.
+ __ IsObjectJSStringType(object, scratch2, &slow);
+ __ Ret();
+
+ // Slow-case. Tail call builtin.
+ __ Bind(&slow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments have either been passed into registers or have been previously
+ // popped. We need to push them before calling builtin.
+ __ Push(object, function);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
+ if (ReturnTrueFalseObject()) {
+ // Reload true/false because they were clobbered in the builtin call.
+ __ LoadTrueFalseRoots(res_true, res_false);
+ __ Cmp(result, 0);
+ __ Csel(result, res_true, res_false, eq);
+ }
+ __ Ret();
+}
+
+
+Register InstanceofStub::left() {
+ // Object to check (instanceof lhs).
+ return x11;
+}
+
+
+Register InstanceofStub::right() {
+ // Constructor function (instanceof rhs).
+ return x10;
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ Register arg_count = x0;
+ Register key = x1;
+
+ // The displacement is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ JumpIfNotSmi(key, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Register local_fp = x11;
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label skip_adaptor;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Csel(local_fp, fp, caller_fp, ne);
+ __ B(ne, &skip_adaptor);
+
+ // Load the actual arguments limit found in the arguments adaptor frame.
+ __ Ldr(arg_count, MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Bind(&skip_adaptor);
+
+ // Check index against formal parameters count limit. Use unsigned comparison
+ // to get negative check for free: branch if key < 0 or key >= arg_count.
+ __ Cmp(key, arg_count);
+ __ B(hs, &slow);
+
+ // Read the argument from the stack and return it.
+ __ Sub(x10, arg_count, key);
+ __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
+ __ Ldr(x0, MemOperand(x10, kDisplacement));
+ __ Ret();
+
+ // Slow case: handle non-smi or out-of-bounds access to arguments by calling
+ // the runtime system.
+ __ Bind(&slow);
+ __ Push(key);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Register caller_fp = x10;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ // Load and untag the context.
+ STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
+ __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
+ (kSmiShift / kBitsPerByte)));
+ __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
+ __ B(ne, &runtime);
+
+ // Patch the arguments.length and parameters pointer in the current frame.
+ __ Ldr(x11, MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Poke(x11, 0 * kXRegSizeInBytes);
+ __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
+ __ Add(x10, x10, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ Poke(x10, 1 * kXRegSizeInBytes);
+
+ __ Bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+ //
+ // Returns pointer to result object in x0.
+
+ // Note: arg_count_smi is an alias of param_count_smi.
+ Register arg_count_smi = x3;
+ Register param_count_smi = x3;
+ Register param_count = x7;
+ Register recv_arg = x14;
+ Register function = x4;
+ __ Pop(param_count_smi, recv_arg, function);
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(eq, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+
+ // x1 mapped_params number of mapped params, min(params, args) (uninit)
+ // x2 arg_count number of function arguments (uninit)
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x11 caller_fp caller's frame pointer
+ // x14 recv_arg pointer to receiver arguments
+
+ Register arg_count = x2;
+ __ Mov(arg_count, param_count);
+ __ B(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ Bind(&adaptor_frame);
+ __ Ldr(arg_count_smi,
+ MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(arg_count, arg_count_smi);
+ __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
+ __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
+
+ // Compute the mapped parameter count = min(param_count, arg_count)
+ Register mapped_params = x1;
+ __ Cmp(param_count, arg_count);
+ __ Csel(mapped_params, param_count, arg_count, lt);
+
+ __ Bind(&try_allocate);
+
+ // x0 alloc_obj pointer to allocated objects: param map, backing
+ // store, arguments (uninit)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x10 size size of objects to allocate (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ // Compute the size of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has two extra words containing context and backing
+ // store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+
+ // Calculate the parameter map size, assuming it exists.
+ Register size = x10;
+ __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
+ __ Add(size, size, kParameterMapHeaderSize);
+
+ // If there are no mapped parameters, set the running size total to zero.
+ // Otherwise, use the parameter map size calculated earlier.
+ __ Cmp(mapped_params, 0);
+ __ CzeroX(size, eq);
+
+ // 2. Add the size of the backing store and arguments object.
+ __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
+ __ Add(size, size, FixedArray::kHeaderSize + Heap::kArgumentsObjectSize);
+
+ // Do the allocation of all three objects in one go. Assign this to x0, as it
+ // will be returned to the caller.
+ Register alloc_obj = x0;
+ __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x11 args_offset offset to args (or aliased args) boilerplate (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Register global_object = x10;
+ Register global_ctx = x10;
+ Register args_offset = x11;
+ Register aliased_args_offset = x10;
+ __ Ldr(global_object, GlobalObjectMemOperand());
+ __ Ldr(global_ctx, FieldMemOperand(global_object,
+ GlobalObject::kNativeContextOffset));
+
+ __ Ldr(args_offset, ContextMemOperand(global_ctx,
+ Context::ARGUMENTS_BOILERPLATE_INDEX));
+ __ Ldr(aliased_args_offset,
+ ContextMemOperand(global_ctx,
+ Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
+ __ Cmp(mapped_params, 0);
+ __ CmovX(args_offset, aliased_args_offset, ne);
+
+ // Copy the JS object part.
+ __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
+ JSObject::kHeaderSize / kPointerSize);
+
+ // Set up the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
+
+ // Use the length and set that as an in-object property.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, "elements" will point there, otherwise
+ // it will point to the backing store.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x5 elements pointer to parameter map or backing store (uninit)
+ // x6 backing_store pointer to backing store (uninit)
+ // x7 param_count number of function parameters
+ // x14 recv_arg pointer to receiver arguments
+
+ Register elements = x5;
+ __ Add(elements, alloc_obj, Heap::kArgumentsObjectSize);
+ __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ Cmp(mapped_params, 0);
+ // Set up backing store address, because it is needed later for filling in
+ // the unmapped arguments.
+ Register backing_store = x6;
+ __ CmovX(backing_store, elements, eq);
+ __ B(eq, &skip_parameter_map);
+
+ __ LoadRoot(x10, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
+ __ Add(x10, mapped_params, 2);
+ __ SmiTag(x10);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Str(cp, FieldMemOperand(elements,
+ FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, kParameterMapHeaderSize);
+ __ Str(x10, FieldMemOperand(elements,
+ FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. Then index the context,
+ // where parameters are stored in reverse order, at:
+ //
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
+ //
+ // The mapped parameter thus needs to get indices:
+ //
+ // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
+ // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
+ //
+ // We loop from right to left.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x5 elements pointer to parameter map or backing store (uninit)
+ // x6 backing_store pointer to backing store (uninit)
+ // x7 param_count number of function parameters
+ // x11 loop_count parameter loop counter (uninit)
+ // x12 index parameter index (smi, uninit)
+ // x13 the_hole hole value (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Register loop_count = x11;
+ Register index = x12;
+ Register the_hole = x13;
+ Label parameters_loop, parameters_test;
+ __ Mov(loop_count, mapped_params);
+ __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
+ __ Sub(index, index, mapped_params);
+ __ SmiTag(index);
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
+ __ Add(backing_store, backing_store, kParameterMapHeaderSize);
+
+ __ B(&parameters_test);
+
+ __ Bind(&parameters_loop);
+ __ Sub(loop_count, loop_count, 1);
+ __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
+ __ Str(index, MemOperand(elements, x10));
+ __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
+ __ Str(the_hole, MemOperand(backing_store, x10));
+ __ Add(index, index, Operand(Smi::FromInt(1)));
+ __ Bind(&parameters_test);
+ __ Cbnz(loop_count, &parameters_loop);
+
+ __ Bind(&skip_parameter_map);
+ // Copy arguments header and remaining slots (if there are any.)
+ __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
+ __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
+ __ Str(arg_count_smi, FieldMemOperand(backing_store,
+ FixedArray::kLengthOffset));
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x4 function function pointer
+ // x3 arg_count_smi number of function arguments (smi)
+ // x6 backing_store pointer to backing store (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Label arguments_loop, arguments_test;
+ __ Mov(x10, mapped_params);
+ __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
+ __ B(&arguments_test);
+
+ __ Bind(&arguments_loop);
+ __ Sub(recv_arg, recv_arg, kPointerSize);
+ __ Ldr(x11, MemOperand(recv_arg));
+ __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
+ __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
+ __ Add(x10, x10, 1);
+
+ __ Bind(&arguments_test);
+ __ Cmp(x10, arg_count);
+ __ B(lt, &arguments_loop);
+
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ Bind(&runtime);
+ __ Push(function, recv_arg, arg_count_smi);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+ //
+ // Returns pointer to result object in x0.
+
+ // Get the stub arguments from the frame, and make an untagged copy of the
+ // parameter count.
+ Register param_count_smi = x1;
+ Register params = x2;
+ Register function = x3;
+ Register param_count = x13;
+ __ Pop(param_count_smi, params, function);
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Test if arguments adaptor needed.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label try_allocate, runtime;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(ne, &try_allocate);
+
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x11 caller_fp caller's frame pointer
+ // x13 param_count number of parameters passed to function
+
+ // Patch the argument length and parameters pointer.
+ __ Ldr(param_count_smi,
+ MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(param_count, param_count_smi);
+ __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
+ __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
+
+ // Try the new space allocation. Start out with computing the size of the
+ // arguments object and the elements array in words.
+ Register size = x10;
+ __ Bind(&try_allocate);
+ __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
+ __ Cmp(param_count, 0);
+ __ CzeroX(size, eq);
+ __ Add(size, size, Heap::kArgumentsObjectSizeStrict / kPointerSize);
+
+ // Do the allocation of both objects in one go. Assign this to x0, as it will
+ // be returned to the caller.
+ Register alloc_obj = x0;
+ __ Allocate(size, alloc_obj, x11, x12, &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Get the arguments boilerplate from the current (native) context.
+ Register global_object = x10;
+ Register global_ctx = x10;
+ Register args_offset = x4;
+ __ Ldr(global_object, GlobalObjectMemOperand());
+ __ Ldr(global_ctx, FieldMemOperand(global_object,
+ GlobalObject::kNativeContextOffset));
+ __ Ldr(args_offset,
+ ContextMemOperand(global_ctx,
+ Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX));
+
+ // x0 alloc_obj pointer to allocated objects: parameter array and
+ // arguments object
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x4 args_offset offset to arguments boilerplate
+ // x13 param_count number of parameters passed to function
+
+ // Copy the JS object part.
+ __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
+ JSObject::kHeaderSize / kPointerSize);
+
+ // Set the smi-tagged length as an in-object property.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ Cbz(param_count, &done);
+
+ // Set up the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ Register elements = x5;
+ __ Add(elements, alloc_obj, Heap::kArgumentsObjectSizeStrict);
+ __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+ __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
+ __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // x0 alloc_obj pointer to allocated objects: parameter array and
+ // arguments object
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x4 array pointer to array slot (uninit)
+ // x5 elements pointer to elements array of alloc_obj
+ // x13 param_count number of parameters passed to function
+
+ // Copy the fixed array slots.
+ Label loop;
+ Register array = x4;
+ // Set up pointer to first array slot.
+ __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+
+ __ Bind(&loop);
+ // Pre-decrement the parameters pointer by kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
+ // Post-increment elements by kPointerSize on each iteration.
+ __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
+ __ Sub(param_count, param_count, 1);
+ __ Cbnz(param_count, &loop);
+
+ // Return from stub.
+ __ Bind(&done);
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ Bind(&runtime);
+ __ Push(function, params, param_count_smi);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+
+ // Stack frame on entry.
+ // jssp[0]: last_match_info (expected JSArray)
+ // jssp[8]: previous index
+ // jssp[16]: subject string
+ // jssp[24]: JSRegExp object
+ Label runtime;
+
+ // Use of registers for this function.
+
+ // Variable registers:
+ // x10-x13 used as scratch registers
+ // w0 string_type type of subject string
+ // x2 jsstring_length subject string length
+ // x3 jsregexp_object JSRegExp object
+ // w4 string_encoding ASCII or UC16
+ // w5 sliced_string_offset if the string is a SlicedString
+ // offset to the underlying string
+ // w6 string_representation groups attributes of the string:
+ // - is a string
+ // - type of the string
+ // - is a short external string
+ Register string_type = w0;
+ Register jsstring_length = x2;
+ Register jsregexp_object = x3;
+ Register string_encoding = w4;
+ Register sliced_string_offset = w5;
+ Register string_representation = w6;
+
+ // These are in callee save registers and will be preserved by the call
+ // to the native RegExp code, as this code is called using the normal
+ // C calling convention. When calling directly from generated code the
+ // native RegExp code will not do a GC and therefore the content of
+ // these registers are safe to use after the call.
+
+ // x19 subject subject string
+ // x20 regexp_data RegExp data (FixedArray)
+ // x21 last_match_info_elements info relative to the last match
+ // (FixedArray)
+ // x22 code_object generated regexp code
+ Register subject = x19;
+ Register regexp_data = x20;
+ Register last_match_info_elements = x21;
+ Register code_object = x22;
+
+ // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
+ CPURegList used_callee_saved_registers(subject,
+ regexp_data,
+ last_match_info_elements,
+ code_object);
+ __ PushCPURegList(used_callee_saved_registers);
+
+ // Stack frame.
+ // jssp[0] : x19
+ // jssp[8] : x20
+ // jssp[16]: x21
+ // jssp[24]: x22
+ // jssp[32]: last_match_info (JSArray)
+ // jssp[40]: previous index
+ // jssp[48]: subject string
+ // jssp[56]: JSRegExp object
+
+ const int kLastMatchInfoOffset = 4 * kPointerSize;
+ const int kPreviousIndexOffset = 5 * kPointerSize;
+ const int kSubjectOffset = 6 * kPointerSize;
+ const int kJSRegExpOffset = 7 * kPointerSize;
+
+ // Ensure that a RegExp stack is allocated.
+ Isolate* isolate = masm->isolate();
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(isolate);
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ __ Mov(x10, Operand(address_of_regexp_stack_memory_size));
+ __ Ldr(x10, MemOperand(x10));
+ __ Cbz(x10, &runtime);
+
+ // Check that the first argument is a JSRegExp object.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(jsregexp_object, kJSRegExpOffset);
+ __ JumpIfSmi(jsregexp_object, &runtime);
+ __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Tst(regexp_data, kSmiTagMask);
+ __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
+ __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ }
+
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ __ Cmp(x10, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ B(ne, &runtime);
+
+ // Check that the number of captures fit in the static offsets vector buffer.
+ // We have always at least one capture for the whole match, plus additional
+ // ones due to capturing parentheses. A capture takes 2 registers.
+ // The number of capture registers then is (number_of_captures + 1) * 2.
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(regexp_data,
+ JSRegExp::kIrregexpCaptureCountOffset));
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // number_of_captures * 2 <= offsets vector size - 2
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ Add(x10, x10, x10);
+ __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
+ __ B(hi, &runtime);
+
+ // Initialize offset for possibly sliced string.
+ __ Mov(sliced_string_offset, 0);
+
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(subject, kSubjectOffset);
+ __ JumpIfSmi(subject, &runtime);
+
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+
+ __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
+
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label check_underlying; // (4)
+ Label seq_string; // (5)
+ Label not_seq_nor_cons; // (6)
+ Label external_string; // (7)
+ Label not_long_external; // (8)
+
+ // (1) Sequential string? If yes, go to (5).
+ __ And(string_representation,
+ string_type,
+ kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask);
+ // We depend on the fact that Strings of type
+ // SeqString and not ShortExternalString are defined
+ // by the following pattern:
+ // string_type: 0XX0 XX00
+ // ^ ^ ^^
+ // | | ||
+ // | | is a SeqString
+ // | is not a short external String
+ // is a String
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ Cbz(string_representation, &seq_string); // Go to (5).
+
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+ __ Cmp(string_representation, kExternalStringTag);
+ __ B(ge, &not_seq_nor_cons); // Go to (6).
+
+ // (3) Cons string. Check that it's flat.
+ __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
+ // Replace subject with first string.
+ __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+
+ // (4) Is subject external? If yes, go to (7).
+ __ Bind(&check_underlying);
+ // Reload the string type.
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ TestAndBranchIfAnySet(string_type.X(),
+ kStringRepresentationMask,
+ &external_string); // Go to (7).
+
+ // (5) Sequential string. Load regexp code according to encoding.
+ __ Bind(&seq_string);
+
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(x10, kPreviousIndexOffset);
+ __ JumpIfNotSmi(x10, &runtime);
+ __ Cmp(jsstring_length, x10);
+ __ B(ls, &runtime);
+
+ // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
+ // before entering the exit frame.
+ __ SmiUntag(x1, x10);
+
+ // The third bit determines the string encoding in string_type.
+ STATIC_ASSERT(kOneByteStringTag == 0x04);
+ STATIC_ASSERT(kTwoByteStringTag == 0x00);
+ STATIC_ASSERT(kStringEncodingMask == 0x04);
+
+ // Find the code object based on the assumptions above.
+ // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
+ // of kPointerSize to reach the latter.
+ ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
+ JSRegExp::kDataUC16CodeOffset);
+ __ Mov(x10, kPointerSize);
+ // We will need the encoding later: ASCII = 0x04
+ // UC16 = 0x00
+ __ Ands(string_encoding, string_type, kStringEncodingMask);
+ __ CzeroX(x10, ne);
+ __ Add(x10, regexp_data, x10);
+ __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
+
+ // (E) Carry on. String handling is done.
+
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // a smi (code flushing support).
+ __ JumpIfSmi(code_object, &runtime);
+
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1,
+ x10,
+ x11);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ __ EnterExitFrame(false, x10, 1);
+ ASSERT(csp.Is(__ StackPointer()));
+
+ // We have 9 arguments to pass to the regexp code, therefore we have to pass
+ // one on the stack and the rest as registers.
+
+ // Note that the placement of the argument on the stack isn't standard
+ // AAPCS64:
+ // csp[0]: Space for the return address placed by DirectCEntryStub.
+ // csp[8]: Argument 9, the current isolate address.
+
+ __ Mov(x10, Operand(ExternalReference::isolate_address(isolate)));
+ __ Poke(x10, kPointerSize);
+
+ Register length = w11;
+ Register previous_index_in_bytes = w12;
+ Register start = x13;
+
+ // Load start of the subject string.
+ __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
+ // Load the length from the original subject string from the previous stack
+ // frame. Therefore we have to use fp, which points exactly to two pointer
+ // sizes below the previous sp. (Because creating a new stack frame pushes
+ // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
+ __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+ __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
+
+ // Handle UC16 encoding, two bytes make one character.
+ // string_encoding: if ASCII: 0x04
+ // if UC16: 0x00
+ STATIC_ASSERT(kStringEncodingMask == 0x04);
+ __ Ubfx(string_encoding, string_encoding, 2, 1);
+ __ Eor(string_encoding, string_encoding, 1);
+ // string_encoding: if ASCII: 0
+ // if UC16: 1
+
+ // Convert string positions from characters to bytes.
+ // Previous index is in x1.
+ __ Lsl(previous_index_in_bytes, w1, string_encoding);
+ __ Lsl(length, length, string_encoding);
+ __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
+
+ // Argument 1 (x0): Subject string.
+ __ Mov(x0, subject);
+
+ // Argument 2 (x1): Previous index, already there.
+
+ // Argument 3 (x2): Get the start of input.
+ // Start of input = start of string + previous index + substring offset
+ // (0 if the string
+ // is not sliced).
+ __ Add(w10, previous_index_in_bytes, sliced_string_offset);
+ __ Add(x2, start, Operand(w10, UXTW));
+
+ // Argument 4 (x3):
+ // End of input = start of input + (length of input - previous index)
+ __ Sub(w10, length, previous_index_in_bytes);
+ __ Add(x3, x2, Operand(w10, UXTW));
+
+ // Argument 5 (x4): static offsets vector buffer.
+ __ Mov(x4,
+ Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
+
+ // Argument 6 (x5): Set the number of capture registers to zero to force
+ // global regexps to behave as non-global. This stub is not used for global
+ // regexps.
+ __ Mov(x5, 0);
+
+ // Argument 7 (x6): Start (high end) of backtracking stack memory area.
+ __ Mov(x10, Operand(address_of_regexp_stack_memory_address));
+ __ Ldr(x10, MemOperand(x10));
+ __ Mov(x11, Operand(address_of_regexp_stack_memory_size));
+ __ Ldr(x11, MemOperand(x11));
+ __ Add(x6, x10, x11);
+
+ // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
+ __ Mov(x7, 1);
+
+ // Locate the code entry and call it.
+ __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm, code_object);
+
+ __ LeaveExitFrame(false, x10, true);
+
+ // The generated regexp code returns an int32 in w0.
+ Label failure, exception;
+ __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
+ __ CompareAndBranch(w0,
+ NativeRegExpMacroAssembler::EXCEPTION,
+ eq,
+ &exception);
+ __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
+
+ // Success: process the result from the native regexp code.
+ Register number_of_capture_registers = x12;
+
+ // Calculate number of capture registers (number_of_captures + 1) * 2
+ // and store it in the last match info.
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(regexp_data,
+ JSRegExp::kIrregexpCaptureCountOffset));
+ __ Add(x10, x10, x10);
+ __ Add(number_of_capture_registers, x10, 2);
+
+ // Check that the fourth object is a JSArray object.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(x10, kLastMatchInfoOffset);
+ __ JumpIfSmi(x10, &runtime);
+ __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
+
+ // Check that the JSArray is the fast case.
+ __ Ldr(last_match_info_elements,
+ FieldMemOperand(x10, JSArray::kElementsOffset));
+ __ Ldr(x10,
+ FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
+
+ // Check that the last match info has space for the capture registers and the
+ // additional information (overhead).
+ // (number_of_captures + 1) * 2 + overhead <= last match info size
+ // (number_of_captures * 2) + 2 + overhead <= last match info size
+ // number_of_capture_registers + overhead <= last match info size
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(last_match_info_elements,
+ FixedArray::kLengthOffset));
+ __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
+ __ Cmp(x11, x10);
+ __ B(gt, &runtime);
+
+ // Store the capture count.
+ __ SmiTag(x10, number_of_capture_registers);
+ __ Str(x10,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset));
+ // Store last subject and last input.
+ __ Str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset));
+ // Use x10 as the subject string in order to only need
+ // one RecordWriteStub.
+ __ Mov(x10, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset,
+ x10,
+ x11,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ Str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset));
+ __ Mov(x10, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastInputOffset,
+ x10,
+ x11,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+
+ Register last_match_offsets = x13;
+ Register offsets_vector_index = x14;
+ Register current_offset = x15;
+
+ // Get the static offsets vector filled by the native regexp code
+ // and fill the last match info.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(isolate);
+ __ Mov(offsets_vector_index, Operand(address_of_static_offsets_vector));
+
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // iterates down to zero (inclusive).
+ __ Add(last_match_offsets,
+ last_match_info_elements,
+ RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
+ __ Bind(&next_capture);
+ __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
+ __ B(mi, &done);
+ // Read two 32 bit values from the static offsets vector buffer into
+ // an X register
+ __ Ldr(current_offset,
+ MemOperand(offsets_vector_index, kWRegSizeInBytes * 2, PostIndex));
+ // Store the smi values in the last match info.
+ __ SmiTag(x10, current_offset);
+ // Clearing the 32 bottom bits gives us a Smi.
+ STATIC_ASSERT(kSmiShift == 32);
+ __ And(x11, current_offset, ~kWRegMask);
+ __ Stp(x10,
+ x11,
+ MemOperand(last_match_offsets, kXRegSizeInBytes * 2, PostIndex));
+ __ B(&next_capture);
+ __ Bind(&done);
+
+ // Return last match info.
+ __ Peek(x0, kLastMatchInfoOffset);
+ __ PopCPURegList(used_callee_saved_registers);
+ // Drop the 4 arguments of the stub from the stack.
+ __ Drop(4);
+ __ Ret();
+
+ __ Bind(&exception);
+ Register exception_value = x0;
+ // A stack overflow (on the backtrack stack) may have occured
+ // in the RegExp code but no exception has been created yet.
+ // If there is no pending exception, handle that in the runtime system.
+ __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Mov(x11,
+ Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Ldr(exception_value, MemOperand(x11));
+ __ Cmp(x10, exception_value);
+ __ B(eq, &runtime);
+
+ __ Str(x10, MemOperand(x11)); // Clear pending exception.
+
+ // Check if the exception is a termination. If so, throw as uncatchable.
+ Label termination_exception;
+ __ JumpIfRoot(exception_value,
+ Heap::kTerminationExceptionRootIndex,
+ &termination_exception);
+
+ __ Throw(exception_value, x10, x11, x12, x13);
+
+ __ Bind(&termination_exception);
+ __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
+
+ __ Bind(&failure);
+ __ Mov(x0, Operand(masm->isolate()->factory()->null_value()));
+ __ PopCPURegList(used_callee_saved_registers);
+ // Drop the 4 arguments of the stub from the stack.
+ __ Drop(4);
+ __ Ret();
+
+ __ Bind(&runtime);
+ __ PopCPURegList(used_callee_saved_registers);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ Bind(&not_seq_nor_cons);
+ // Compare flags are still set.
+ __ B(ne, &not_long_external); // Go to (8).
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ __ Bind(&external_string);
+ if (masm->emit_debug_code()) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Tst(x10, kIsIndirectStringMask);
+ __ Check(eq, kExternalStringExpectedButNotFound);
+ __ And(x10, x10, kStringRepresentationMask);
+ __ Cmp(x10, 0);
+ __ Check(ne, kExternalStringExpectedButNotFound);
+ }
+ __ Ldr(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ B(&seq_string); // Go to (5).
+
+ // (8) If this is a short external string or not a string, bail out to
+ // runtime.
+ __ Bind(&not_long_external);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ TestAndBranchIfAnySet(string_representation,
+ kShortExternalStringMask | kIsNotStringMask,
+ &runtime);
+
+ // (9) Sliced string. Replace subject with parent.
+ __ Ldr(sliced_string_offset,
+ UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ B(&check_underlying); // Go to (4).
+#endif
+}
+
+
+// TODO(jbramley): Don't use static registers here, but take them as arguments.
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ ASM_LOCATION("GenerateRecordCallTarget");
+ // Cache the called function in a feedback vector slot. Cache states are
+ // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
+ // x0 : number of arguments to the construct function
+ // x1 : the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi)
+ Label check_array, initialize_array, initialize_non_array, megamorphic, done;
+
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ Heap::RootListIndex kMegamorphicRootIndex = Heap::kUndefinedValueRootIndex;
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->the_hole_value());
+ Heap::RootListIndex kUninitializedRootIndex = Heap::kTheHoleValueRootIndex;
+ ASSERT_EQ(*TypeFeedbackInfo::PremonomorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->null_value());
+ Heap::RootListIndex kPremonomorphicRootIndex = Heap::kNullValueRootIndex;
+
+ // Load the cache state.
+ __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ Cmp(x4, x1);
+ __ B(eq, &done);
+ __ JumpIfRoot(x4, kMegamorphicRootIndex, &done);
+
+ // Check if we're dealing with the Array function or not.
+ __ LoadArrayFunction(x5);
+ __ Cmp(x1, x5);
+ __ B(eq, &check_array);
+
+ // Non-array cache: Check the cache state.
+ __ JumpIfRoot(x4, kPremonomorphicRootIndex, &initialize_non_array);
+ __ JumpIfNotRoot(x4, kUninitializedRootIndex, &megamorphic);
+
+ // Non-array cache: Uninitialized -> premonomorphic. The sentinel is an
+ // immortal immovable object (null) so no write-barrier is needed.
+ __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ __ LoadRoot(x10, kPremonomorphicRootIndex);
+ __ Str(x10, FieldMemOperand(x4, FixedArray::kHeaderSize));
+ __ B(&done);
+
+ // Array cache: Check the cache state to see if we're in a monomorphic
+ // state where the state object is an AllocationSite object.
+ __ Bind(&check_array);
+ __ Ldr(x5, FieldMemOperand(x4, AllocationSite::kMapOffset));
+ __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex, &done);
+
+ // Array cache: Uninitialized or premonomorphic -> monomorphic.
+ __ JumpIfRoot(x4, kUninitializedRootIndex, &initialize_array);
+ __ JumpIfRoot(x4, kPremonomorphicRootIndex, &initialize_array);
+
+ // Both caches: Monomorphic -> megamorphic. The sentinel is an
+ // immortal immovable object (undefined) so no write-barrier is needed.
+ __ Bind(&megamorphic);
+ __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ __ LoadRoot(x10, kMegamorphicRootIndex);
+ __ Str(x10, FieldMemOperand(x4, FixedArray::kHeaderSize));
+ __ B(&done);
+
+ // Array cache: Uninitialized or premonomorphic -> monomorphic.
+ __ Bind(&initialize_array);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateAllocationSiteStub create_stub;
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(x0);
+ __ Push(x0, x1, x2, x3);
+
+ __ CallStub(&create_stub);
+
+ __ Pop(x3, x2, x1, x0);
+ __ SmiUntag(x0);
+ }
+ __ B(&done);
+
+ // Non-array cache: Premonomorphic -> monomorphic.
+ __ Bind(&initialize_non_array);
+ __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ // TODO(all): Does the value need to be left in x4? If not, FieldMemOperand
+ // could be used to avoid this add.
+ __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Str(x1, MemOperand(x4, 0));
+
+ __ Push(x4, x2, x1);
+ __ RecordWrite(x2, x4, x1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(x1, x2, x4);
+
+ // TODO(all): Are x4, x2 and x1 outputs? This isn't clear.
+ __ Bind(&done);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallFunctionStub::Generate");
+ // x1 function the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi) (if x2 is not undefined)
+ Register function = x1;
+ Register cache_cell = x2;
+ Register slot = x3;
+ Register type = x4;
+ Label slow, non_function, wrap, cont;
+
+ // TODO(jbramley): This function has a lot of unnamed registers. Name them,
+ // and tidy things up a bit.
+
+ if (NeedsChecks()) {
+ // Check that the function is really a JavaScript function.
+ __ JumpIfSmi(function, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
+ }
+
+ // Fast-case: Invoke the function now.
+ // x1 function pushed function
+ ParameterCount actual(argc_);
+
+ if (CallAsMethod()) {
+ if (NeedsChecks()) {
+ // Do not transform the receiver for strict mode functions.
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, &cont);
+
+ // Do not transform the receiver for native (Compilerhints already in x3).
+ __ Tbnz(w4, SharedFunctionInfo::kNative, &cont);
+ }
+
+ // Compute the receiver in non-strict mode.
+ __ Peek(x3, argc_ * kPointerSize);
+
+ if (NeedsChecks()) {
+ __ JumpIfSmi(x3, &wrap);
+ __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
+ } else {
+ __ B(&wrap);
+ }
+
+ __ Bind(&cont);
+ }
+ __ InvokeFunction(function,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper());
+
+ if (NeedsChecks()) {
+ // Slow-case: Non-function called.
+ __ Bind(&slow);
+ if (RecordCallTarget()) {
+ // If there is a call target cache, mark it megamorphic in the
+ // non-function case. MegamorphicSentinel is an immortal immovable object
+ // (undefined) so no write barrier is needed.
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot,
+ kPointerSizeLog2));
+ __ LoadRoot(x11, Heap::kUndefinedValueRootIndex);
+ __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
+ }
+ // Check for function proxy.
+ // x10 : function type.
+ __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function);
+ __ Push(function); // put proxy as additional argument
+ __ Mov(x0, argc_ + 1);
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ Bind(&non_function);
+ __ Poke(function, argc_ * kXRegSizeInBytes);
+ __ Mov(x0, argc_); // Set up the number of arguments.
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ if (CallAsMethod()) {
+ __ Bind(&wrap);
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(x1, x3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Pop(x1);
+ }
+ __ Poke(x0, argc_ * kPointerSize);
+ __ B(&cont);
+ }
+}
+
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallConstructStub::Generate");
+ // x0 : number of arguments
+ // x1 : the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi) (if r2 is not undefined)
+ Register function = x1;
+ Label slow, non_function_call;
+
+ // Check that the function is not a smi.
+ __ JumpIfSmi(function, &non_function_call);
+ // Check that the function is a JSFunction.
+ Register object_type = x10;
+ __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
+ &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
+
+ // Jump to the function-specific construct stub.
+ Register jump_reg = x4;
+ Register shared_func_info = jump_reg;
+ Register cons_stub = jump_reg;
+ Register cons_stub_code = jump_reg;
+ __ Ldr(shared_func_info,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(cons_stub,
+ FieldMemOperand(shared_func_info,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(cons_stub_code);
+
+ Label do_call;
+ __ Bind(&slow);
+ __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
+ __ B(ne, &non_function_call);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ B(&do_call);
+
+ __ Bind(&non_function_call);
+ __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+
+ __ Bind(&do_call);
+ // Set expected number of arguments to zero (not changing x0).
+ __ Mov(x2, 0);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ // If the receiver is a smi trigger the non-string case.
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+
+ // If the receiver is not a string trigger the non-string case.
+ __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+
+ __ Bind(&got_smi_index_);
+ // Check for index out of range.
+ __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
+ __ Cmp(result_, Operand::UntagSmi(index_));
+ __ B(ls, index_out_of_range_);
+
+ __ SmiUntag(index_);
+
+ StringCharLoadGenerator::Generate(masm,
+ object_,
+ index_,
+ result_,
+ &call_runtime_);
+ __ SmiTag(result_);
+ __ Bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+ __ Bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ result_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
+ call_helper.BeforeCall(masm);
+ // Save object_ on the stack and pass index_ as argument for runtime call.
+ __ Push(object_, index_);
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ Mov(index_, x0);
+ __ Pop(object_);
+ // Reload the instance type.
+ __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(index_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ B(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ Bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ SmiTag(index_);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ Mov(result_, x0);
+ call_helper.AfterCall(masm);
+ __ B(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ __ JumpIfNotSmi(code_, &slow_case_);
+ __ Cmp(code_, Operand(Smi::FromInt(String::kMaxOneByteCharCode)));
+ __ B(hi, &slow_case_);
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ASCII char code.
+ STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
+ __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
+ __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
+ __ Bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+ __ Bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ Push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ Mov(result_, x0);
+ call_helper.AfterCall(masm);
+ __ B(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+ // Inputs are in x0 (lhs) and x1 (rhs).
+ ASSERT(state_ == CompareIC::SMI);
+ ASM_LOCATION("ICCompareStub[Smis]");
+ Label miss;
+ // Bail out (to 'miss') unless both x0 and x1 are smis.
+ __ JumpIfEitherNotSmi(x0, x1, &miss);
+
+ // TODO(jbramley): Why do we only set the flags for EQ?
+ if (GetCondition() == eq) {
+ // For equality we do not care about the sign of the result.
+ __ Subs(x0, x0, x1);
+ } else {
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(x1);
+ __ Sub(x0, x1, Operand::UntagSmi(x0));
+ }
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
+ ASM_LOCATION("ICCompareStub[HeapNumbers]");
+
+ Label unordered, maybe_undefined1, maybe_undefined2;
+ Label miss, handle_lhs, values_in_d_regs;
+ Label untag_rhs, untag_lhs;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+ FPRegister rhs_d = d0;
+ FPRegister lhs_d = d1;
+
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(lhs, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rhs, &miss);
+ }
+
+ __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
+ __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
+
+ // Load rhs if it's a heap number.
+ __ JumpIfSmi(rhs, &handle_lhs);
+ __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
+ // Load lhs if it's a heap number.
+ __ Bind(&handle_lhs);
+ __ JumpIfSmi(lhs, &values_in_d_regs);
+ __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+ __ Bind(&values_in_d_regs);
+ __ Fcmp(lhs_d, rhs_d);
+ __ B(vs, &unordered); // Overflow flag set if either is NaN.
+ STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
+ __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
+ __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
+ __ Ret();
+
+ __ Bind(&unordered);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+
+ __ Bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
+ __ JumpIfSmi(lhs, &unordered);
+ __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
+ __ B(&unordered);
+ }
+
+ __ Bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
+ }
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+ ASM_LOCATION("ICCompareStub[InternalizedStrings]");
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(lhs, rhs, &miss);
+
+ // Check that both operands are internalized strings.
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ Register rhs_type = x10;
+ Register lhs_type = x11;
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ __ Orr(x12, lhs_type, rhs_type);
+ __ TestAndBranchIfAnySet(
+ x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
+
+ // Internalized strings are compared by identity.
+ STATIC_ASSERT(EQUAL == 0);
+ __ Cmp(lhs, rhs);
+ __ Cset(result, ne);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASM_LOCATION("ICCompareStub[UniqueNames]");
+ ASSERT(GetCondition() == eq);
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ Register lhs_instance_type = w2;
+ Register rhs_instance_type = w3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(lhs, rhs, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
+
+ // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
+ // should have kInternalizedTag set.
+ __ JumpIfNotUniqueName(lhs_instance_type, &miss);
+ __ JumpIfNotUniqueName(rhs_instance_type, &miss);
+
+ // Unique names are compared by identity.
+ STATIC_ASSERT(EQUAL == 0);
+ __ Cmp(lhs, rhs);
+ __ Cset(result, ne);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRING);
+ ASM_LOCATION("ICCompareStub[Strings]");
+
+ Label miss;
+
+ bool equality = Token::IsEqualityOp(op_);
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ // Check that both operands are strings.
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ Register rhs_type = x10;
+ Register lhs_type = x11;
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ Orr(x12, lhs_type, rhs_type);
+ __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
+
+ // Fast check for identical strings.
+ Label not_equal;
+ __ Cmp(lhs, rhs);
+ __ B(ne, &not_equal);
+ __ Mov(result, EQUAL);
+ __ Ret();
+
+ __ Bind(&not_equal);
+ // Handle not identical strings
+
+ // Check that both strings are internalized strings. If they are, we're done
+ // because we already know they are not identical. We know they are both
+ // strings.
+ if (equality) {
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ Label not_internalized_strings;
+ __ Orr(x12, lhs_type, rhs_type);
+ __ TestAndBranchIfAnySet(
+ x12, kIsNotInternalizedMask, &not_internalized_strings);
+ // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
+ __ Ret();
+ __ Bind(&not_internalized_strings);
+ }
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(
+ lhs_type, rhs_type, x12, x13, &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ if (equality) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, lhs, rhs, x10, x11, x12);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(
+ masm, lhs, rhs, x10, x11, x12, x13);
+ }
+
+ // Handle more complex cases in runtime.
+ __ Bind(&runtime);
+ __ Push(lhs, rhs);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ }
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::OBJECT);
+ ASM_LOCATION("ICCompareStub[Objects]");
+
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
+ __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
+
+ ASSERT(GetCondition() == eq);
+ __ Sub(result, rhs, lhs);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ ASM_LOCATION("ICCompareStub[KnownObjects]");
+
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Cmp(rhs_map, Operand(known_map_));
+ __ B(ne, &miss);
+ __ Cmp(lhs_map, Operand(known_map_));
+ __ B(ne, &miss);
+
+ __ Sub(result, rhs, lhs);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+// This method handles the case where a compare stub had the wrong
+// implementation. It calls a miss handler, which re-writes the stub. All other
+// ICCompareStub::Generate* methods should fall back into this one if their
+// operands were not the expected types.
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("ICCompareStub[Miss]");
+
+ Register stub_entry = x11;
+ {
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ Register op = x10;
+ Register left = x1;
+ Register right = x0;
+ // Preserve some caller-saved registers.
+ __ Push(x1, x0, lr);
+ // Push the arguments.
+ __ Mov(op, Operand(Smi::FromInt(op_)));
+ __ Push(left, right, op);
+
+ // Call the miss handler. This also pops the arguments.
+ __ CallExternalReference(miss, 3);
+
+ // Compute the entry point of the rewritten stub.
+ __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
+ // Restore caller-saved registers.
+ __ Pop(lr, x0, x1);
+ }
+
+ // Tail-call to the new stub.
+ __ Jump(stub_entry);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ ASSERT(!AreAliased(hash, character));
+
+ // hash = character + (character << 10);
+ __ LoadRoot(hash, Heap::kHashSeedRootIndex);
+ // Untag smi seed and add the character.
+ __ Add(hash, character, Operand(hash, LSR, kSmiShift));
+
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+
+ // hash += hash << 10;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
+ // hash ^= hash >> 6;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ ASSERT(!AreAliased(hash, character));
+
+ // hash += character;
+ __ Add(hash, hash, character);
+
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+
+ // hash += hash << 10;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
+ // hash ^= hash >> 6;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+ Register scratch_w = scratch.W();
+ ASSERT(!AreAliased(hash_w, scratch_w));
+
+ // hash += hash << 3;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
+ // hash ^= hash >> 11;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
+ // hash += hash << 15;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
+
+ __ Ands(hash_w, hash_w, String::kHashBitMask);
+
+ // if (hash == 0) hash = 27;
+ __ Mov(scratch_w, StringHasher::kZeroHash);
+ __ Csel(hash_w, scratch_w, hash_w, eq);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("SubStringStub::Generate");
+ Label runtime;
+
+ // Stack frame on entry.
+ // lr: return address
+ // jssp[0]: substring "to" offset
+ // jssp[8]: substring "from" offset
+ // jssp[16]: pointer to string object
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length (in debug mode.)
+ // If any of these assumptions fail, we call the runtime system.
+
+ static const int kToOffset = 0 * kPointerSize;
+ static const int kFromOffset = 1 * kPointerSize;
+ static const int kStringOffset = 2 * kPointerSize;
+
+ Register to = x0;
+ Register from = x15;
+ Register input_string = x10;
+ Register input_length = x11;
+ Register input_type = x12;
+ Register result_string = x0;
+ Register result_length = x1;
+ Register temp = x3;
+
+ __ Peek(to, kToOffset);
+ __ Peek(from, kFromOffset);
+
+ // Check that both from and to are smis. If not, jump to runtime.
+ __ JumpIfEitherNotSmi(from, to, &runtime);
+ __ SmiUntag(from);
+ __ SmiUntag(to);
+
+ // Calculate difference between from and to. If to < from, branch to runtime.
+ __ Subs(result_length, to, from);
+ __ B(mi, &runtime);
+
+ // Check from is positive.
+ __ Tbnz(from, kWSignBit, &runtime);
+
+ // Make sure first argument is a string.
+ __ Peek(input_string, kStringOffset);
+ __ JumpIfSmi(input_string, &runtime);
+ __ IsObjectJSStringType(input_string, input_type, &runtime);
+
+ Label single_char;
+ __ Cmp(result_length, 1);
+ __ B(eq, &single_char);
+
+ // Short-cut for the case of trivial substring.
+ Label return_x0;
+ __ Ldrsw(input_length,
+ UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
+
+ __ Cmp(result_length, input_length);
+ __ CmovX(x0, input_string, eq);
+ // Return original string.
+ __ B(eq, &return_x0);
+
+ // Longer than original string's length or negative: unsafe arguments.
+ __ B(hi, &runtime);
+
+ // Shorter than original string's length: an actual substring.
+
+ // x0 to substring end character offset
+ // x1 result_length length of substring result
+ // x10 input_string pointer to input string object
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x15 from substring start character offset
+
+ // Deal with different string types: update the index if necessary and put
+ // the underlying string into register unpacked_string.
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ Label update_instance_type;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+
+ // Test for string types, and branch/fall through to appropriate unpacking
+ // code.
+ __ Tst(input_type, kIsIndirectStringMask);
+ __ B(eq, &seq_or_external_string);
+ __ Tst(input_type, kSlicedNotConsMask);
+ __ B(ne, &sliced_string);
+
+ Register unpacked_string = input_string;
+
+ // Cons string. Check whether it is flat, then fetch first part.
+ __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
+ __ Ldr(unpacked_string,
+ FieldMemOperand(input_string, ConsString::kFirstOffset));
+ __ B(&update_instance_type);
+
+ __ Bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ Ldrsw(temp,
+ UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
+ __ Add(from, from, temp);
+ __ Ldr(unpacked_string,
+ FieldMemOperand(input_string, SlicedString::kParentOffset));
+
+ __ Bind(&update_instance_type);
+ __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
+ __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+ // TODO(all): This generates "b #+0x4". Can these be optimised out?
+ __ B(&underlying_unpacked);
+
+ __ Bind(&seq_or_external_string);
+ // Sequential or external string. Registers unpacked_string and input_string
+ // alias, so there's nothing to do here.
+
+ // x0 result_string pointer to result string object (uninit)
+ // x1 result_length length of substring result
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x15 from substring start character offset
+ __ Bind(&underlying_unpacked);
+
+ if (FLAG_string_slices) {
+ Label copy_routine;
+ __ Cmp(result_length, SlicedString::kMinLength);
+ // Short slice. Copy instead of slicing.
+ __ B(lt, &copy_routine);
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyway due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
+ __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
+ &runtime);
+ __ B(&set_slice_header);
+
+ __ Bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
+ &runtime);
+
+ __ Bind(&set_slice_header);
+ __ SmiTag(from);
+ __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
+ __ Str(unpacked_string,
+ FieldMemOperand(result_string, SlicedString::kParentOffset));
+ __ B(&return_x0);
+
+ __ Bind(&copy_routine);
+ }
+
+ // x0 result_string pointer to result string object (uninit)
+ // x1 result_length length of substring result
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
+ // x13 substring_char0 pointer to first char of substring (uninit)
+ // x14 result_char0 pointer to first char of result (uninit)
+ // x15 from substring start character offset
+ Register unpacked_char0 = x13;
+ Register substring_char0 = x13;
+ Register result_char0 = x14;
+ Label two_byte_sequential, sequential_string, allocate_result;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+
+ __ Tst(input_type, kExternalStringTag);
+ __ B(eq, &sequential_string);
+
+ __ Tst(input_type, kShortExternalStringTag);
+ __ B(ne, &runtime);
+ __ Ldr(unpacked_char0,
+ FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
+ // unpacked_char0 points to the first character of the underlying string.
+ __ B(&allocate_result);
+
+ __ Bind(&sequential_string);
+ // Locate first character of underlying subject string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Add(unpacked_char0, unpacked_string,
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ __ Bind(&allocate_result);
+ // Sequential ASCII string. Allocate the result.
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
+
+ // Allocate and copy the resulting ASCII string.
+ __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
+
+ // Locate first character of substring to copy.
+ __ Add(substring_char0, unpacked_char0, from);
+
+ // Locate first character of result.
+ __ Add(result_char0, result_string,
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
+ __ B(&return_x0);
+
+ // Allocate and copy the resulting two-byte string.
+ __ Bind(&two_byte_sequential);
+ __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
+
+ // Locate first character of substring to copy.
+ __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
+
+ // Locate first character of result.
+ __ Add(result_char0, result_string,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ __ Add(result_length, result_length, result_length);
+ __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
+
+ __ Bind(&return_x0);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
+ __ Drop(3);
+ __ Ret();
+
+ __ Bind(&runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // x1: result_length
+ // x10: input_string
+ // x12: input_type
+ // x15: from (untagged)
+ __ SmiTag(from);
+ StringCharAtGenerator generator(
+ input_string, from, result_length, x0,
+ &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ // TODO(jbramley): Why doesn't this jump to return_x0?
+ __ Drop(3);
+ __ Ret();
+ generator.SkipSlow(masm, &runtime);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
+ Register result = x0;
+ Register left_length = scratch1;
+ Register right_length = scratch2;
+
+ // Compare lengths. If lengths differ, strings can't be equal. Lengths are
+ // smis, and don't need to be untagged.
+ Label strings_not_equal, check_zero_length;
+ __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
+ __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
+ __ Cmp(left_length, right_length);
+ __ B(eq, &check_zero_length);
+
+ __ Bind(&strings_not_equal);
+ __ Mov(result, Operand(Smi::FromInt(NOT_EQUAL)));
+ __ Ret();
+
+ // Check if the length is zero. If so, the strings must be equal (and empty.)
+ Label compare_chars;
+ __ Bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Cbnz(left_length, &compare_chars);
+ __ Mov(result, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+
+ // Compare characters. Falls through if all characters are equal.
+ __ Bind(&compare_chars);
+ GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
+ scratch3, &strings_not_equal);
+
+ // Characters in strings are equal.
+ __ Mov(result, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
+ Label result_not_equal, compare_lengths;
+
+ // Find minimum length and length difference.
+ Register length_delta = scratch3;
+ __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Subs(length_delta, scratch1, scratch2);
+
+ Register min_length = scratch1;
+ __ Csel(min_length, scratch2, scratch1, gt);
+ __ Cbz(min_length, &compare_lengths);
+
+ // Compare loop.
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, min_length, scratch2, scratch4,
+ &result_not_equal);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ Bind(&compare_lengths);
+
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+
+ // Use length_delta as result if it's zero.
+ Register result = x0;
+ __ Subs(result, length_delta, 0);
+
+ __ Bind(&result_not_equal);
+ Register greater = x10;
+ Register less = x11;
+ __ Mov(greater, Operand(Smi::FromInt(GREATER)));
+ __ Mov(less, Operand(Smi::FromInt(LESS)));
+ __ CmovX(result, greater, gt);
+ __ CmovX(result, less, lt);
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal) {
+ ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
+
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ Add(left, left, scratch1);
+ __ Add(right, right, scratch1);
+
+ Register index = length;
+ __ Neg(index, length); // index = -length;
+
+ // Compare loop
+ Label loop;
+ __ Bind(&loop);
+ __ Ldrb(scratch1, MemOperand(left, index));
+ __ Ldrb(scratch2, MemOperand(right, index));
+ __ Cmp(scratch1, scratch2);
+ __ B(ne, chars_not_equal);
+ __ Add(index, index, 1);
+ __ Cbnz(index, &loop);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ Counters* counters = masm->isolate()->counters();
+
+ // Stack frame on entry.
+ // sp[0]: right string
+ // sp[8]: left string
+ Register right = x10;
+ Register left = x11;
+ Register result = x0;
+ __ Pop(right, left);
+
+ Label not_same;
+ __ Subs(result, right, left);
+ __ B(ne, &not_same);
+ STATIC_ASSERT(EQUAL == 0);
+ __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
+ __ Ret();
+
+ __ Bind(&not_same);
+
+ // Check that both objects are sequential ASCII strings.
+ __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
+
+ // Compare flat ASCII strings natively. Remove arguments from stack first,
+ // as this function will generate a return.
+ __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
+ GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
+
+ __ Bind(&runtime);
+
+ // Push arguments back on to the stack.
+ // sp[0] = right string
+ // sp[8] = left string.
+ __ Push(left, right);
+
+ // Call the runtime.
+ // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+
+void ArrayPushStub::Generate(MacroAssembler* masm) {
+ Register receiver = x0;
+
+ int argc = arguments_count();
+
+ if (argc == 0) {
+ // Nothing to do, just return the length.
+ __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Drop(argc + 1);
+ __ Ret();
+ return;
+ }
+
+ Isolate* isolate = masm->isolate();
+
+ if (argc != 1) {
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ Label call_builtin, attempt_to_grow_elements, with_write_barrier;
+
+ Register elements_length = x8;
+ Register length = x7;
+ Register elements = x6;
+ Register end_elements = x5;
+ Register value = x4;
+ // Get the elements array of the object.
+ __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ x10,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+ }
+
+ // Get the array's length and calculate new length.
+ __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Add(length, length, Operand(Smi::FromInt(argc)));
+
+ // Check if we could survive without allocation.
+ __ Ldr(elements_length,
+ FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(length, elements_length);
+
+ const int kEndElementsOffset =
+ FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ __ B(gt, &attempt_to_grow_elements);
+
+ // Check if value is a smi.
+ __ Peek(value, (argc - 1) * kPointerSize);
+ __ JumpIfNotSmi(value, &with_write_barrier);
+
+ // Store the value.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ Add(end_elements, elements,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+ } else {
+ // TODO(all): ARM has a redundant cmp here.
+ __ B(gt, &call_builtin);
+
+ __ Peek(value, (argc - 1) * kPointerSize);
+ __ StoreNumberToDoubleElements(value, length, elements, x10, d0, d1,
+ &call_builtin, argc * kDoubleSize);
+ }
+
+ // Save new length.
+ __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Return length.
+ __ Drop(argc + 1);
+ __ Mov(x0, length);
+ __ Ret();
+
+ if (IsFastDoubleElementsKind(elements_kind())) {
+ __ Bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ __ Bind(&with_write_barrier);
+
+ if (IsFastSmiElementsKind(elements_kind())) {
+ if (FLAG_trace_elements_transitions) {
+ __ B(&call_builtin);
+ }
+
+ __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ JumpIfHeapNumber(x10, &call_builtin);
+
+ ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
+ ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kNativeContextOffset));
+ __ Ldr(x10, ContextMemOperand(x10, Context::JS_ARRAY_MAPS_INDEX));
+ const int header_size = FixedArrayBase::kHeaderSize;
+ // Verify that the object can be transitioned in place.
+ const int origin_offset = header_size + elements_kind() * kPointerSize;
+ __ ldr(x11, FieldMemOperand(receiver, origin_offset));
+ __ ldr(x12, FieldMemOperand(x10, HeapObject::kMapOffset));
+ __ cmp(x11, x12);
+ __ B(ne, &call_builtin);
+
+ const int target_offset = header_size + target_kind * kPointerSize;
+ __ Ldr(x10, FieldMemOperand(x10, target_offset));
+ __ Mov(x11, receiver);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, DONT_TRACK_ALLOCATION_SITE, NULL);
+ }
+
+ // Save new length.
+ __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Store the value.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ Add(end_elements, elements,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+
+ __ RecordWrite(elements,
+ end_elements,
+ value,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Drop(argc + 1);
+ __ Mov(x0, length);
+ __ Ret();
+
+ __ Bind(&attempt_to_grow_elements);
+
+ if (!FLAG_inline_new) {
+ __ B(&call_builtin);
+ }
+
+ Register argument = x2;
+ __ Peek(argument, (argc - 1) * kPointerSize);
+ // Growing elements that are SMI-only requires special handling in case
+ // the new element is non-Smi. For now, delegate to the builtin.
+ if (IsFastSmiElementsKind(elements_kind())) {
+ __ JumpIfNotSmi(argument, &call_builtin);
+ }
+
+ // We could be lucky and the elements array could be at the top of new-space.
+ // In this case we can just grow it in place by moving the allocation pointer
+ // up.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate);
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate);
+
+ const int kAllocationDelta = 4;
+ ASSERT(kAllocationDelta >= argc);
+ Register allocation_top_addr = x5;
+ Register allocation_top = x9;
+ // Load top and check if it is the end of elements.
+ __ Add(end_elements, elements,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Add(end_elements, end_elements, kEndElementsOffset);
+ __ Mov(allocation_top_addr, Operand(new_space_allocation_top));
+ __ Ldr(allocation_top, MemOperand(allocation_top_addr));
+ __ Cmp(end_elements, allocation_top);
+ __ B(ne, &call_builtin);
+
+ __ Mov(x10, Operand(new_space_allocation_limit));
+ __ Ldr(x10, MemOperand(x10));
+ __ Add(allocation_top, allocation_top, kAllocationDelta * kPointerSize);
+ __ Cmp(allocation_top, x10);
+ __ B(hi, &call_builtin);
+
+ // We fit and could grow elements.
+ // Update new_space_allocation_top.
+ __ Str(allocation_top, MemOperand(allocation_top_addr));
+ // Push the argument.
+ __ Str(argument, MemOperand(end_elements));
+ // Fill the rest with holes.
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < kAllocationDelta; i++) {
+ // TODO(all): Try to use stp here.
+ __ Str(x10, MemOperand(end_elements, i * kPointerSize));
+ }
+
+ // Update elements' and array's sizes.
+ __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Add(elements_length,
+ elements_length,
+ Operand(Smi::FromInt(kAllocationDelta)));
+ __ Str(elements_length,
+ FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Elements are in new space, so write barrier is not required.
+ __ Drop(argc + 1);
+ __ Mov(x0, length);
+ __ Ret();
+
+ __ Bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x1 : left
+ // -- x0 : right
+ // -- lr : return address
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ // Load x2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ LoadObject(x2, handle(isolate->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(x2, kExpectedAllocationSite);
+ __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
+ kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(state_);
+ __ TailCallStub(&stub);
+}
+
+
+bool CodeStub::CanUseFPRegisters() {
+ // FP registers always available on A64.
+ return true;
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ // We need some extra registers for this stub, they have been allocated
+ // but we need to save them before using them.
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ Register value = regs_.scratch0();
+ __ Ldr(value, MemOperand(regs_.address()));
+ __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
+
+ __ CheckPageFlagSet(regs_.object(),
+ value,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ Bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ Register address =
+ x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ ASSERT(!address.Is(regs_.object()));
+ ASSERT(!address.Is(x0));
+ __ Mov(address, regs_.address());
+ __ Mov(x0, regs_.object());
+ __ Mov(x1, address);
+ __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate())));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ ExternalReference function = (mode == INCREMENTAL_COMPACTION)
+ ? ExternalReference::incremental_evacuation_record_write_function(
+ masm->isolate())
+ : ExternalReference::incremental_marking_record_write_function(
+ masm->isolate());
+ __ CallCFunction(function, 3, 0);
+
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ Register mem_chunk = regs_.scratch0();
+ Register counter = regs_.scratch1();
+ __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
+ __ Ldr(counter,
+ MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
+ __ Subs(counter, counter, 1);
+ __ Str(counter,
+ MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
+ __ B(mi, &need_incremental);
+
+ // If the object is not black we don't have to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ Bind(&on_black);
+ // Get the value from the slot.
+ Register value = regs_.scratch0();
+ __ Ldr(value, MemOperand(regs_.address()));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlagClear(value,
+ regs_.scratch1(),
+ MemoryChunk::kEvacuationCandidateMask,
+ &ensure_not_white);
+
+ __ CheckPageFlagClear(regs_.object(),
+ regs_.scratch1(),
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ &need_incremental);
+
+ __ Bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.address(), regs_.object());
+ __ EnsureNotWhite(value,
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ regs_.scratch2(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ Bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ Bind(&need_incremental);
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // We patch these two first instructions back and forth between a nop and
+ // real branch when we start and stop incremental heap marking.
+ // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
+ // are generated.
+ // See RecordWriteStub::Patch for details.
+ {
+ InstructionAccurateScope scope(masm, 2);
+ __ adr(xzr, &skip_to_incremental_noncompacting);
+ __ adr(xzr, &skip_to_incremental_compacting);
+ }
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ Bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ Bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // TODO(all): Possible optimisations in this function:
+ // 1. Merge CheckFastElements and CheckFastSmiElements, so that the map
+ // bitfield is loaded only once.
+ // 2. Refactor the Ldr/Add sequence at the start of fast_elements and
+ // smi_element.
+
+ // x0 value element value to store
+ // x3 index_smi element index as smi
+ // sp[0] array_index_smi array literal index in function as smi
+ // sp[1] array array literal
+
+ Register value = x0;
+ Register index_smi = x3;
+
+ Register array = x1;
+ Register array_map = x2;
+ Register array_index_smi = x4;
+ __ PeekPair(array_index_smi, array, 0);
+ __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
+
+ Label double_elements, smi_element, fast_elements, slow_elements;
+ __ CheckFastElements(array_map, x10, &double_elements);
+ __ JumpIfSmi(value, &smi_element);
+ __ CheckFastSmiElements(array_map, x10, &fast_elements);
+
+ // Store into the array literal requires an elements transition. Call into
+ // the runtime.
+ __ Bind(&slow_elements);
+ __ Push(array, index_smi, value);
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
+ __ Push(x11, array_index_smi);
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ __ Bind(&fast_elements);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
+ __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Str(value, MemOperand(x11));
+ // Update the write barrier for the array store.
+ __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
+ __ Bind(&smi_element);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
+ __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
+ __ Ret();
+
+ __ Bind(&double_elements);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1,
+ &slow_elements);
+ __ Ret();
+}
+
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ // TODO(jbramley): The ARM code leaves the (shifted) offset in r1. Why?
+ CEntryStub ces(1, kSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ Ldr(x1, MemOperand(fp, parameter_count_offset));
+ if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ __ Add(x1, x1, 1);
+ }
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ Drop(x1);
+ // Return to IC Miss stub, continuation still on stack.
+ __ Ret();
+}
+
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ // TODO(all): This needs to be reliably consistent with
+ // kReturnAddressDistanceFromFunctionStart in ::Generate.
+ Assembler::BlockConstPoolScope no_const_pools(masm);
+ ProfileEntryHookStub stub;
+ __ Push(lr);
+ __ CallStub(&stub);
+ __ Pop(lr);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+ // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
+ // a "Push lr" instruction, followed by a call.
+ // TODO(jbramley): Verify that this call is always made with relocation.
+ static const int kReturnAddressDistanceFromFunctionStart =
+ Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
+
+ // Save all kCallerSaved registers (including lr), since this can be called
+ // from anywhere.
+ // TODO(jbramley): What about FP registers?
+ __ PushCPURegList(kCallerSaved);
+ ASSERT(kCallerSaved.IncludesAliasOf(lr));
+ const int kNumSavedRegs = kCallerSaved.Count();
+
+ // Compute the function's address as the first argument.
+ __ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart);
+
+#if V8_HOST_ARCH_A64
+ uintptr_t entry_hook =
+ reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook());
+ __ Mov(x10, entry_hook);
+#else
+ // Under the simulator we need to indirect the entry hook through a trampoline
+ // function at a known address.
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+ __ Mov(x10, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ masm->isolate())));
+ // It additionally takes an isolate as a third parameter
+ __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate())));
+#endif
+
+ // The caller's return address is above the saved temporaries.
+ // Grab its location for the second argument to the hook.
+ __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
+
+ {
+ // Create a dummy frame, as CallCFunction requires this.
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ CallCFunction(x10, 2, 0);
+ }
+
+ __ PopCPURegList(kCallerSaved);
+ __ Ret();
+}
+
+
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // When calling into C++ code the stack pointer must be csp.
+ // Therefore this code must use csp for peek/poke operations when the
+ // stub is generated. When the stub is called
+ // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
+ // and configure the stack pointer *before* doing the call.
+ const Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+
+ // Put return address on the stack (accessible to GC through exit frame pc).
+ __ Poke(lr, 0);
+ // Call the C++ function.
+ __ Blr(x10);
+ // Return to calling code.
+ __ Peek(lr, 0);
+ __ Ret();
+
+ __ SetStackPointer(old_stack_pointer);
+}
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ Register target) {
+ // Make sure the caller configured the stack pointer (see comment in
+ // DirectCEntryStub::Generate).
+ ASSERT(csp.Is(__ StackPointer()));
+
+ intptr_t code =
+ reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
+ __ Mov(x10, target);
+ // Branch to the stub.
+ __ Blr(lr);
+}
+
+
+// Probe the name dictionary in the 'elements' register.
+// Jump to the 'done' label if a property with the given name is found.
+// Jump to the 'miss' label otherwise.
+//
+// If lookup was successful 'scratch2' will be equal to elements + 4 * index.
+// 'elements' and 'name' registers are preserved on miss.
+void NameDictionaryLookupStub::GeneratePositiveLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+
+ // Assert that name contains a string.
+ __ AssertName(name);
+
+ // Compute the capacity mask.
+ __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
+ __ Sub(scratch1, scratch1, 1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ Add(scratch2, scratch2, Operand(
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+ }
+ __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+ // Check if the key is identical to the name.
+ __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
+ // TODO(jbramley): We need another scratch here, but some callers can't
+ // provide a scratch3 so we have to use Tmp1(). We should find a clean way
+ // to make it unavailable to the MacroAssembler for a short time.
+ __ Ldr(__ Tmp1(), FieldMemOperand(scratch2, kElementsStartOffset));
+ __ Cmp(name, __ Tmp1());
+ __ B(eq, done);
+ }
+
+ // The inlined probes didn't find the entry.
+ // Call the complete stub to scan the whole dictionary.
+
+ CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6);
+ spill_list.Combine(lr);
+ spill_list.Remove(scratch1);
+ spill_list.Remove(scratch2);
+
+ __ PushCPURegList(spill_list);
+
+ if (name.is(x0)) {
+ ASSERT(!elements.is(x1));
+ __ Mov(x1, name);
+ __ Mov(x0, elements);
+ } else {
+ __ Mov(x0, elements);
+ __ Mov(x1, name);
+ }
+
+ Label not_found;
+ NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ Cbz(x0, &not_found);
+ __ Mov(scratch2, x2); // Move entry index into scratch2.
+ __ PopCPURegList(spill_list);
+ __ B(done);
+
+ __ Bind(&not_found);
+ __ PopCPURegList(spill_list);
+ __ B(miss);
+}
+
+
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0) {
+ ASSERT(!AreAliased(receiver, properties, scratch0));
+ ASSERT(name->IsUniqueName());
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the hole value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch0;
+ // Capacity is smi 2^n.
+ __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
+ __ Sub(index, index, 1);
+ __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ Register entity_name = scratch0;
+ // Having undefined at this place means the name is not contained.
+ Register tmp = index;
+ __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
+ __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
+
+ // Stop if found the property.
+ __ Cmp(entity_name, Operand(name));
+ __ B(eq, miss);
+
+ Label good;
+ __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
+
+ // Check if the entry name is not a unique name.
+ __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ Ldrb(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entity_name, miss);
+ __ Bind(&good);
+ }
+
+ CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6);
+ spill_list.Combine(lr);
+ spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
+
+ __ PushCPURegList(spill_list);
+
+ __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Mov(x1, Operand(name));
+ NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ __ CallStub(&stub);
+ // Move stub return value to scratch0. Note that scratch0 is not included in
+ // spill_list and won't be clobbered by PopCPURegList.
+ __ Mov(scratch0, x0);
+ __ PopCPURegList(spill_list);
+
+ __ Cbz(scratch0, done);
+ __ B(miss);
+}
+
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
+ //
+ // Arguments are in x0 and x1:
+ // x0: property dictionary.
+ // x1: the name of the property we are looking for.
+ //
+ // Return value is in x0 and is zero if lookup failed, non zero otherwise.
+ // If the lookup is successful, x2 will contains the index of the entry.
+
+ Register result = x0;
+ Register dictionary = x0;
+ Register key = x1;
+ Register index = x2;
+ Register mask = x3;
+ Register hash = x4;
+ Register undefined = x5;
+ Register entry_key = x6;
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
+ __ Sub(mask, mask, 1);
+
+ __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ // Capacity is smi 2^n.
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ Add(index, hash,
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift);
+ } else {
+ __ Mov(index, hash);
+ }
+ __ And(index, mask, Operand(index, LSR, Name::kHashShift));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
+ __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+ // Having undefined at this place means the name is not contained.
+ __ Cmp(entry_key, undefined);
+ __ B(eq, &not_in_dictionary);
+
+ // Stop if found the property.
+ __ Cmp(entry_key, key);
+ __ B(eq, &in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a unique name.
+ __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ }
+ }
+
+ __ Bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup, probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ Mov(result, 0);
+ __ Ret();
+ }
+
+ __ Bind(&in_dictionary);
+ __ Mov(result, 1);
+ __ Ret();
+
+ __ Bind(&not_in_dictionary);
+ __ Mov(result, 0);
+ __ Ret();
+}
+
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ ASM_LOCATION("CreateArrayDispatch");
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(), mode);
+ __ TailCallStub(&stub);
+
+ } else if (mode == DONT_OVERRIDE) {
+ Register kind = x3;
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
+ // TODO(jbramley): Is this the best way to handle this? Can we make the
+ // tail calls conditional, rather than hopping over each one?
+ __ CompareAndBranch(kind, candidate_kind, ne, &next);
+ T stub(candidate_kind);
+ __ TailCallStub(&stub);
+ __ Bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+// TODO(jbramley): If this needs to be a special case, make it a proper template
+// specialization, and not a separate function.
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ ASM_LOCATION("CreateArrayDispatchOneArgument");
+ // x0 - argc
+ // x1 - constructor?
+ // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+ // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // sp[0] - last argument
+
+ Register allocation_site = x2;
+ Register kind = x3;
+
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // Is the low bit set? If so, the array is holey.
+ __ Tbnz(kind, 0, &normal_sequence);
+ }
+
+ // Look at the last argument.
+ // TODO(jbramley): What does a 0 argument represent?
+ __ Peek(x10, 0);
+ __ Cbz(x10, &normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ Bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the slot).
+ __ Orr(kind, kind, 1);
+
+ if (FLAG_debug_code) {
+ __ Ldr(x10, FieldMemOperand(allocation_site, 0));
+ __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
+ &normal_sequence);
+ __ Assert(eq, kExpectedAllocationSite);
+ }
+
+ // Save the resulting elements kind in type info. We can't just store 'kind'
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field; upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ Ldr(x11, FieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+ __ Add(x11, x11, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
+ __ Str(x11, FieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+
+ __ Bind(&normal_sequence);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
+ // TODO(jbramley): Is this the best way to handle this? Can we make the
+ // tail calls conditional, rather than hopping over each one?
+ __ CompareAndBranch(kind, candidate_kind, ne, &next);
+ ArraySingleArgumentConstructorStub stub(candidate_kind);
+ __ TailCallStub(&stub);
+ __ Bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(kind);
+ stub.GetCode(isolate);
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode(isolate);
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
+ stubh1.GetCode(isolate);
+ InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
+ stubh2.GetCode(isolate);
+ InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
+ stubh3.GetCode(isolate);
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ Register argc = x0;
+ if (argument_count_ == ANY) {
+ Label zero_case, n_case;
+ __ Cbz(argc, &zero_case);
+ __ Cmp(argc, 1);
+ __ B(ne, &n_case);
+
+ // One argument.
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ Bind(&zero_case);
+ // No arguments.
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ Bind(&n_case);
+ // N arguments.
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("ArrayConstructorStub::Generate");
+ // ----------- S t a t e -------------
+ // -- x0 : argc (only if argument_count_ == ANY)
+ // -- x1 : constructor
+ // -- x2 : feedback vector (fixed array or undefined)
+ // -- x3 : slot index (if x2 is fixed array)
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+ Register constructor = x1;
+ Register feedback_vector = x2;
+ Register slot_index = x3;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ Label unexpected_map, map_ok;
+ // Initial map for the builtin Array function should be a map.
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ JumpIfSmi(x10, &unexpected_map);
+ __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
+ __ Bind(&unexpected_map);
+ __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Bind(&map_ok);
+
+ // In feedback_vector, we expect either undefined or a valid fixed array.
+ Label okay_here;
+ Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, &okay_here);
+ __ Ldr(x10, FieldMemOperand(feedback_vector, FixedArray::kMapOffset));
+ __ Cmp(x10, Operand(fixed_array_map));
+ __ Assert(eq, kExpectedFixedArrayInFeedbackVector);
+
+ // slot_index should be a smi if we don't have undefined in feedback_vector.
+ __ AssertSmi(slot_index);
+
+ __ Bind(&okay_here);
+ }
+
+ Register allocation_site = x2; // Overwrites feedback_vector.
+ Register kind = x3;
+ Label no_info;
+ // Get the elements kind and case on that.
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, &no_info);
+ __ Add(feedback_vector, feedback_vector,
+ Operand::UntagSmiAndScale(slot_index, kPointerSizeLog2));
+ __ Ldr(allocation_site, FieldMemOperand(feedback_vector,
+ FixedArray::kHeaderSize));
+
+ // If the feedback vector is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
+ __ Ldr(x10, FieldMemOperand(allocation_site, AllocationSite::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex, &no_info);
+
+ __ Ldrsw(kind,
+ UntagSmiFieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+ __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ Bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label zero_case, n_case;
+ Register argc = x0;
+
+ __ Cbz(argc, &zero_case);
+ __ CompareAndBranch(argc, 1, ne, &n_case);
+
+ // One argument.
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+
+ // We might need to create a holey array; look at the first argument.
+ __ Peek(x10, 0);
+ __ Cbz(x10, &packed_case);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+
+ __ Bind(&packed_case);
+ }
+ InternalArraySingleArgumentConstructorStub stub1(kind);
+ __ TailCallStub(&stub1);
+
+ __ Bind(&zero_case);
+ // No arguments.
+ InternalArrayNoArgumentConstructorStub stub0(kind);
+ __ TailCallStub(&stub0);
+
+ __ Bind(&n_case);
+ // N arguments.
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- x1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+
+ Register constructor = x1;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ Label unexpected_map, map_ok;
+ // Initial map for the builtin Array function should be a map.
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ JumpIfSmi(x10, &unexpected_map);
+ __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
+ __ Bind(&unexpected_map);
+ __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Bind(&map_ok);
+ }
+
+ Register kind = w3;
+ // Figure out the right elements kind
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // TODO(jbramley): Add a helper function to read elements kind from an
+ // existing map.
+ // Load the map's "bit field 2" into result.
+ __ Ldr(kind, FieldMemOperand(x10, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ubfx(kind, kind, Map::kElementsKindShift, Map::kElementsKindBitCount);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ Cmp(x3, FAST_ELEMENTS);
+ __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
+ __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ }
+
+ Label fast_elements_case;
+ __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ Bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : callee
+ // -- x4 : call_data
+ // -- x2 : holder
+ // -- x1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1) * 8] : first argument
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+
+ Register callee = x0;
+ Register call_data = x4;
+ Register holder = x2;
+ Register api_function_address = x1;
+ Register context = cp;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ Isolate* isolate = masm->isolate();
+
+ // FunctionCallbackArguments: context, callee and call data.
+ __ Push(context, callee, call_data);
+
+ // Load context from callee
+ __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ if (!call_data_undefined) {
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ }
+ Register isolate_reg = x5;
+ __ Mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate)));
+
+ // FunctionCallbackArguments:
+ // return value, return value default, isolate, holder.
+ __ Push(call_data, call_data, isolate_reg, holder);
+
+ // Prepare arguments.
+ Register args = x6;
+ __ Mov(args, masm->StackPointer());
+
+ // Allocate the v8::Arguments structure in the arguments' space, since it's
+ // not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ // Allocate space for CallApiFunctionAndReturn can store some scratch
+ // registeres on the stack.
+ const int kCallApiFunctionSpillSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
+
+ // TODO(all): Optimize this with stp and suchlike.
+ ASSERT(!AreAliased(x0, api_function_address));
+ // x0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
+ // FunctionCallbackInfo::implicit_args_
+ __ Str(args, MemOperand(x0, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ Str(x10, MemOperand(x0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ Mov(x10, argc);
+ __ Str(x10, MemOperand(x0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ Str(xzr, MemOperand(x0, 3 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ const int spill_offset = 1 + kApiStackSpace;
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ spill_offset,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object
+ // -- ...
+ // -- x2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = x2;
+
+ __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
+ __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
+
+ const int kApiStackSpace = 1;
+
+ // Allocate space for CallApiFunctionAndReturn can store some scratch
+ // registeres on the stack.
+ const int kCallApiFunctionSpillSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // x1 (internal::Object** args_) as the data.
+ __ Poke(x1, 1 * kPointerSize);
+ __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference::Type thunk_type =
+ ExternalReference::PROFILING_GETTER_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
+ const int spill_offset = 1 + kApiStackSpace;
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ spill_offset,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/code-stubs-a64.h b/deps/v8/src/a64/code-stubs-a64.h
new file mode 100644
index 0000000000..0709bfc511
--- /dev/null
+++ b/deps/v8/src/a64/code-stubs-a64.h
@@ -0,0 +1,469 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_CODE_STUBS_A64_H_
+#define V8_A64_CODE_STUBS_A64_H_
+
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+
+class StoreBufferOverflowStub: public PlatformCodeStub {
+ public:
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+ : save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
+class StringHelper : public AllStatic {
+ public:
+ // TODO(all): These don't seem to be used any more. Delete them.
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+class RecordWriteStub: public PlatformCodeStub {
+ public:
+ // Stub to record the write of 'value' at 'address' in 'object'.
+ // Typically 'address' = 'object' + <some offset>.
+ // See MacroAssembler::RecordWriteField() for example.
+ RecordWriteStub(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static Mode GetMode(Code* stub) {
+ // Find the mode depending on the first two instructions.
+ Instruction* instr1 =
+ reinterpret_cast<Instruction*>(stub->instruction_start());
+ Instruction* instr2 = instr1->following();
+
+ if (instr1->IsUncondBranchImm()) {
+ ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
+ return INCREMENTAL;
+ }
+
+ ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
+
+ if (instr2->IsUncondBranchImm()) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(instr2->IsPCRelAddressing());
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ // We patch the two first instructions of the stub back and forth between an
+ // adr and branch when we start and stop incremental heap marking.
+ // The branch is
+ // b label
+ // The adr is
+ // adr xzr label
+ // so effectively a nop.
+ static void Patch(Code* stub, Mode mode) {
+ // We are going to patch the two first instructions of the stub.
+ PatchingAssembler patcher(
+ reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
+ Instruction* instr1 = patcher.InstructionAt(0);
+ Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
+ // Instructions must be either 'adr' or 'b'.
+ ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
+ ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
+ // Retrieve the offsets to the labels.
+ int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
+ int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
+
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.adr(xzr, offset_to_incremental_compacting);
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
+ patcher.adr(xzr, offset_to_incremental_compacting);
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ }
+
+ private:
+ // This is a helper class to manage the registers associated with the stub.
+ // The 'object' and 'address' registers must be preserved.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch)
+ : object_(object),
+ address_(address),
+ scratch0_(scratch),
+ saved_regs_(kCallerSaved) {
+ ASSERT(!AreAliased(scratch, object, address));
+
+ // We would like to require more scratch registers for this stub,
+ // but the number of registers comes down to the ones used in
+ // FullCodeGen::SetVar(), which is architecture independent.
+ // We allocate 2 extra scratch registers that we'll save on the stack.
+ CPURegList pool_available = GetValidRegistersForAllocation();
+ CPURegList used_regs(object, address, scratch);
+ pool_available.Remove(used_regs);
+ scratch1_ = Register(pool_available.PopLowestIndex());
+ scratch2_ = Register(pool_available.PopLowestIndex());
+
+ // SaveCallerRegisters method needs to save caller saved register, however
+ // we don't bother saving ip0 and ip1 because they are used as scratch
+ // registers by the MacroAssembler.
+ saved_regs_.Remove(ip0);
+ saved_regs_.Remove(ip1);
+
+ // The scratch registers will be restored by other means so we don't need
+ // to save them with the other caller saved registers.
+ saved_regs_.Remove(scratch0_);
+ saved_regs_.Remove(scratch1_);
+ saved_regs_.Remove(scratch2_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->Push(scratch1_, scratch2_);
+ }
+
+ void Restore(MacroAssembler* masm) {
+ masm->Pop(scratch2_, scratch1_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ // TODO(all): This can be very expensive, and it is likely that not every
+ // register will need to be preserved. Can we improve this?
+ masm->PushCPURegList(saved_regs_);
+ if (mode == kSaveFPRegs) {
+ masm->PushCPURegList(kCallerSavedFP);
+ }
+ }
+
+ void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
+ // TODO(all): This can be very expensive, and it is likely that not every
+ // register will need to be preserved. Can we improve this?
+ if (mode == kSaveFPRegs) {
+ masm->PopCPURegList(kCallerSavedFP);
+ }
+ masm->PopCPURegList(saved_regs_);
+ }
+
+ Register object() { return object_; }
+ Register address() { return address_; }
+ Register scratch0() { return scratch0_; }
+ Register scratch1() { return scratch1_; }
+ Register scratch2() { return scratch2_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+ Register scratch2_;
+ CPURegList saved_regs_;
+
+ // TODO(all): We should consider moving this somewhere else.
+ static CPURegList GetValidRegistersForAllocation() {
+ // The list of valid registers for allocation is defined as all the
+ // registers without those with a special meaning.
+ //
+ // The default list excludes registers x26 to x31 because they are
+ // reserved for the following purpose:
+ // - x26 root register
+ // - x27 context pointer register
+ // - x28 jssp
+ // - x29 frame pointer
+ // - x30 link register(lr)
+ // - x31 xzr/stack pointer
+ CPURegList list(CPURegister::kRegister, kXRegSize, 0, 25);
+
+ // We also remove MacroAssembler's scratch registers.
+ list.Remove(ip0);
+ list.Remove(ip1);
+ list.Remove(x8);
+ list.Remove(x9);
+
+ return list;
+ }
+
+ friend class RecordWriteStub;
+ };
+
+ // A list of stub variants which are pregenerated.
+ // The variants are stored in the same format as the minor key, so
+ // MinorKeyFor() can be used to populate and check this list.
+ static const int kAheadOfTime[];
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return MinorKeyFor(object_, value_, address_, remembered_set_action_,
+ save_fp_regs_mode_);
+ }
+
+ static int MinorKeyFor(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction action,
+ SaveFPRegsMode fp_mode) {
+ ASSERT(object.Is64Bits());
+ ASSERT(value.Is64Bits());
+ ASSERT(address.Is64Bits());
+ return ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(action) |
+ SaveFPRegsModeBits::encode(fp_mode);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 5> {};
+ class ValueBits: public BitField<int, 5, 5> {};
+ class AddressBits: public BitField<int, 10, 5> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
+// Helper to call C++ functions from generated code. The caller must prepare
+// the exit frame before doing the call with GenerateCall.
+class DirectCEntryStub: public PlatformCodeStub {
+ public:
+ DirectCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+ void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+ Major MajorKey() { return DirectCEntry; }
+ int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+};
+
+
+class NameDictionaryLookupStub: public PlatformCodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2);
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+
+ Major MajorKey() { return NameDictionaryLookup; }
+
+ int MinorKey() {
+ return LookupModeBits::encode(mode_);
+ }
+
+ class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+ LookupMode mode_;
+};
+
+
+class SubStringStub: public PlatformCodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public PlatformCodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compares two flat ASCII strings and returns result in x0.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Compare two flat ASCII strings for equality and returns result
+ // in x0.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal);
+};
+
+
+struct PlatformCallInterfaceDescriptor {
+ explicit PlatformCallInterfaceDescriptor(
+ TargetAddressStorageMode storage_mode)
+ : storage_mode_(storage_mode) { }
+
+ TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+ TargetAddressStorageMode storage_mode_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_A64_CODE_STUBS_A64_H_
diff --git a/deps/v8/src/a64/codegen-a64.cc b/deps/v8/src/a64/codegen-a64.cc
new file mode 100644
index 0000000000..3f0e2295df
--- /dev/null
+++ b/deps/v8/src/a64/codegen-a64.cc
@@ -0,0 +1,616 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "codegen.h"
+#include "macro-assembler.h"
+#include "simulator-a64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_a64_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ Simulator * simulator = Simulator::current(Isolate::Current());
+ Simulator::CallArgument args[] = {
+ Simulator::CallArgument(x),
+ Simulator::CallArgument::End()
+ };
+ return simulator->CallDouble(fast_exp_a64_machine_code, args);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &std::exp;
+
+ // Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
+ // an AAPCS64-compliant exp() function. This will be faster than the C
+ // library's exp() function, but probably less accurate.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::exp;
+
+ ExternalReference::InitializeMathExpData();
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ masm.SetStackPointer(csp);
+
+ // The argument will be in d0 on entry.
+ DoubleRegister input = d0;
+ // Use other caller-saved registers for all other values.
+ DoubleRegister result = d1;
+ DoubleRegister double_temp1 = d2;
+ DoubleRegister double_temp2 = d3;
+ Register temp1 = x10;
+ Register temp2 = x11;
+ Register temp3 = x12;
+
+ MathExpGenerator::EmitMathExp(&masm, input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3);
+ // Move the result to the return register.
+ masm.Fmov(d0, result);
+ masm.Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_a64_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+ return &std::sqrt;
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+
+// -------------------------------------------------------------------------
+// Code generators
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
+ // ----------- S t a t e -------------
+ // -- x2 : receiver
+ // -- x3 : target map
+ // -----------------------------------
+ Register receiver = x2;
+ Register map = x3;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
+ allocation_memento_found);
+ }
+
+ // Set transitioned map.
+ __ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver,
+ HeapObject::kMapOffset,
+ map,
+ x10,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- x3 : target map, scratch for subsequent call
+ // -----------------------------------
+ Register receiver = x2;
+ Register target_map = x3;
+
+ Label gc_required, only_change_map;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ Register elements = x4;
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
+
+ __ Push(lr);
+ Register length = x5;
+ __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
+ FixedArray::kLengthOffset));
+
+ // Allocate new FixedDoubleArray.
+ Register array_size = x6;
+ Register array = x7;
+ __ Lsl(array_size, length, kDoubleSizeLog2);
+ __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
+ __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
+ // Register array is non-tagged heap object.
+
+ // Set the destination FixedDoubleArray's length and map.
+ Register map_root = x6;
+ __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
+ __ SmiTag(x11, length);
+ __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+ kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ Add(x10, array, kHeapObjectTag);
+ __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
+ x6, kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Prepare for conversion loop.
+ Register src_elements = x10;
+ Register dst_elements = x11;
+ Register dst_end = x12;
+ __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
+ __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
+
+ FPRegister nan_d = d1;
+ __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
+
+ Label entry, done;
+ __ B(&entry);
+
+ __ Bind(&only_change_map);
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ B(&done);
+
+ // Call into runtime if GC is required.
+ __ Bind(&gc_required);
+ __ Pop(lr);
+ __ B(fail);
+
+ // Iterate over the array, copying and coverting smis to doubles. If an
+ // element is non-smi, write a hole to the destination.
+ {
+ Label loop;
+ __ Bind(&loop);
+ __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
+ __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
+ __ Tst(x13, kSmiTagMask);
+ __ Fcsel(d0, d0, nan_d, eq);
+ __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
+
+ __ Bind(&entry);
+ __ Cmp(dst_elements, dst_end);
+ __ B(lt, &loop);
+ }
+
+ __ Pop(lr);
+ __ Bind(&done);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -- x3 : target map, scratch for subsequent call
+ // -- x4 : scratch (elements)
+ // -----------------------------------
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register target_map = x3;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ Label only_change_map;
+ Register elements = x4;
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
+
+ __ Push(lr);
+ // TODO(all): These registers may not need to be pushed. Examine
+ // RecordWriteStub and check whether it's needed.
+ __ Push(target_map, receiver, key, value);
+ Register length = x5;
+ __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
+ FixedArray::kLengthOffset));
+
+ // Allocate new FixedArray.
+ Register array_size = x6;
+ Register array = x7;
+ Label gc_required;
+ __ Mov(array_size, FixedDoubleArray::kHeaderSize);
+ __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
+ __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
+
+ // Set destination FixedDoubleArray's length and map.
+ Register map_root = x6;
+ __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
+ __ SmiTag(x11, length);
+ __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+
+ // Prepare for conversion loop.
+ Register src_elements = x10;
+ Register dst_elements = x11;
+ Register dst_end = x12;
+ __ Add(src_elements, elements,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ Add(dst_elements, array, FixedArray::kHeaderSize);
+ __ Add(array, array, kHeapObjectTag);
+ __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
+
+ Register the_hole = x14;
+ Register heap_num_map = x15;
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
+
+ Label entry;
+ __ B(&entry);
+
+ // Call into runtime if GC is required.
+ __ Bind(&gc_required);
+ __ Pop(value, key, receiver, target_map);
+ __ Pop(lr);
+ __ B(fail);
+
+ {
+ Label loop, convert_hole;
+ __ Bind(&loop);
+ __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
+ __ Cmp(x13, kHoleNanInt64);
+ __ B(eq, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ Register heap_num = x5;
+ __ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map);
+ __ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset));
+ __ Mov(x13, dst_elements);
+ __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
+ __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ __ B(&entry);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ Bind(&convert_hole);
+ __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
+
+ __ Bind(&entry);
+ __ Cmp(dst_elements, dst_end);
+ __ B(lt, &loop);
+ }
+
+ __ Pop(value, key, receiver, target_map);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
+ kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Pop(lr);
+
+ __ Bind(&only_change_map);
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ return MacroAssembler::IsYoungSequence(sequence);
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ byte* target = sequence + kCodeAgeStubEntryOffset;
+ Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize);
+ if (age == kNoAgeCodeAge) {
+ MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
+ } else {
+ Code * stub = GetCodeAgeStub(isolate, age, parity);
+ MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
+ }
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ Ldrsw(result,
+ UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ Add(index, index, result);
+ __ B(&indirect_string_loaded);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ Bind(&cons_string);
+ __ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
+ // Get the first of the two strings and load its instance type.
+ __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ Bind(&indirect_string_loaded);
+ __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label external_string, check_encoding;
+ __ Bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
+
+ // Prepare sequential strings
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ B(&check_encoding);
+
+ // Handle external strings.
+ __ Bind(&external_string);
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ Tst(result, kIsIndirectStringMask);
+ __ Assert(eq, kExternalStringExpectedButNotFound);
+ }
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
+ // can be bound far away in deferred code.
+ __ Tst(result, kShortExternalStringMask);
+ __ B(ne, call_runtime);
+ __ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+ Label ascii, done;
+ __ Bind(&check_encoding);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
+ // Two-byte string.
+ __ Ldrh(result, MemOperand(string, index, LSL, 1));
+ __ B(&done);
+ __ Bind(&ascii);
+ // Ascii string.
+ __ Ldrb(result, MemOperand(string, index));
+ __ Bind(&done);
+}
+
+
+static MemOperand ExpConstant(Register base, int index) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_temp1,
+ DoubleRegister double_temp2,
+ Register temp1,
+ Register temp2,
+ Register temp3) {
+ // TODO(jbramley): There are several instances where fnmsub could be used
+ // instead of fmul and fsub. Doing this changes the result, but since this is
+ // an estimation anyway, does it matter?
+
+ ASSERT(!AreAliased(input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+ DoubleRegister double_temp3 = result;
+ Register constants = temp3;
+
+ // The algorithm used relies on some magic constants which are initialized in
+ // ExternalReference::InitializeMathExpData().
+
+ // Load the address of the start of the array.
+ __ Mov(constants, Operand(ExternalReference::math_exp_constants(0)));
+
+ // We have to do a four-way split here:
+ // - If input <= about -708.4, the output always rounds to zero.
+ // - If input >= about 709.8, the output always rounds to +infinity.
+ // - If the input is NaN, the output is NaN.
+ // - Otherwise, the result needs to be calculated.
+ Label result_is_finite_non_zero;
+ // Assert that we can load offset 0 (the small input threshold) and offset 1
+ // (the large input threshold) with a single ldp.
+ ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 1).offset() -
+ ExpConstant(constants, 0).offset()));
+ __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
+
+ __ Fcmp(input, double_temp1);
+ __ Fccmp(input, double_temp2, NoFlag, hi);
+ // At this point, the condition flags can be in one of five states:
+ // NZCV
+ // 1000 -708.4 < input < 709.8 result = exp(input)
+ // 0110 input == 709.8 result = +infinity
+ // 0010 input > 709.8 result = +infinity
+ // 0011 input is NaN result = input
+ // 0000 input <= -708.4 result = +0.0
+
+ // Continue the common case first. 'mi' tests N == 1.
+ __ B(&result_is_finite_non_zero, mi);
+
+ // TODO(jbramley): Add (and use) a zero D register for A64.
+ // TODO(jbramley): Consider adding a +infinity register for A64.
+ __ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
+ __ Fsub(double_temp1, double_temp1, double_temp1); // Synthesize +0.0.
+
+ // Select between +0.0 and +infinity. 'lo' tests C == 0.
+ __ Fcsel(result, double_temp1, double_temp2, lo);
+ // Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
+ __ Fcsel(result, result, input, vc);
+ __ B(&done);
+
+ // The rest is magic, as described in InitializeMathExpData().
+ __ Bind(&result_is_finite_non_zero);
+
+ // Assert that we can load offset 3 and offset 4 with a single ldp.
+ ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 4).offset() -
+ ExpConstant(constants, 3).offset()));
+ __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
+ __ Fmadd(double_temp1, double_temp1, input, double_temp3);
+ __ Fmov(temp2.W(), double_temp1.S());
+ __ Fsub(double_temp1, double_temp1, double_temp3);
+
+ // Assert that we can load offset 5 and offset 6 with a single ldp.
+ ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 6).offset() -
+ ExpConstant(constants, 5).offset()));
+ __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
+ // TODO(jbramley): Consider using Fnmsub here.
+ __ Fmul(double_temp1, double_temp1, double_temp2);
+ __ Fsub(double_temp1, double_temp1, input);
+
+ __ Fmul(double_temp2, double_temp1, double_temp1);
+ __ Fsub(double_temp3, double_temp3, double_temp1);
+ __ Fmul(double_temp3, double_temp3, double_temp2);
+
+ __ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
+
+ __ Ldr(double_temp2, ExpConstant(constants, 7));
+ // TODO(jbramley): Consider using Fnmsub here.
+ __ Fmul(double_temp3, double_temp3, double_temp2);
+ __ Fsub(double_temp3, double_temp3, double_temp1);
+
+ // The 8th constant is 1.0, so use an immediate move rather than a load.
+ // We can't generate a runtime assertion here as we would need to call Abort
+ // in the runtime and we don't have an Isolate when we generate this code.
+ __ Fmov(double_temp2, 1.0);
+ __ Fadd(double_temp3, double_temp3, double_temp2);
+
+ __ And(temp2, temp2, 0x7ff);
+ __ Add(temp1, temp1, 0x3ff);
+
+ // Do the final table lookup.
+ __ Mov(temp3, Operand(ExternalReference::math_exp_log_table()));
+
+ __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeInBytesLog2));
+ __ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
+ __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
+ __ Bfi(temp2, temp1, 32, 32);
+ __ Fmov(double_temp1, temp2);
+
+ __ Fmul(result, double_temp3, double_temp1);
+
+ __ Bind(&done);
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/codegen-a64.h b/deps/v8/src/a64/codegen-a64.h
new file mode 100644
index 0000000000..d66bd34a93
--- /dev/null
+++ b/deps/v8/src/a64/codegen-a64.h
@@ -0,0 +1,70 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_CODEGEN_A64_H_
+#define V8_A64_CODEGEN_A64_H_
+
+#include "ast.h"
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_CODEGEN_A64_H_
diff --git a/deps/v8/src/a64/constants-a64.h b/deps/v8/src/a64/constants-a64.h
new file mode 100644
index 0000000000..4f43f13537
--- /dev/null
+++ b/deps/v8/src/a64/constants-a64.h
@@ -0,0 +1,1262 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_CONSTANTS_A64_H_
+#define V8_A64_CONSTANTS_A64_H_
+
+
+// Assert that this is an LP64 system.
+STATIC_ASSERT(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
+STATIC_ASSERT(sizeof(void *) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(1) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(1L) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
+
+
+// Get the standard printf format macros for C99 stdint types.
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
+
+namespace v8 {
+namespace internal {
+
+
+const unsigned kInstructionSize = 4;
+const unsigned kInstructionSizeLog2 = 2;
+const unsigned kLiteralEntrySize = 4;
+const unsigned kLiteralEntrySizeLog2 = 2;
+const unsigned kMaxLoadLiteralRange = 1 * MB;
+
+const unsigned kNumberOfRegisters = 32;
+const unsigned kNumberOfFPRegisters = 32;
+// Callee saved registers are x19-x30(lr).
+const int kNumberOfCalleeSavedRegisters = 11;
+const int kFirstCalleeSavedRegisterIndex = 19;
+// Callee saved FP registers are d8-d15.
+const int kNumberOfCalleeSavedFPRegisters = 8;
+const int kFirstCalleeSavedFPRegisterIndex = 8;
+// Callee saved registers with no specific purpose in JS are x19-x25.
+const unsigned kJSCalleeSavedRegList = 0x03f80000;
+// TODO(all): k<Y>RegSize should probably be k<Y>RegSizeInBits.
+const unsigned kWRegSize = 32;
+const unsigned kWRegSizeLog2 = 5;
+const unsigned kWRegSizeInBytes = kWRegSize >> 3;
+const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
+const unsigned kXRegSize = 64;
+const unsigned kXRegSizeLog2 = 6;
+const unsigned kXRegSizeInBytes = kXRegSize >> 3;
+const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
+const unsigned kSRegSize = 32;
+const unsigned kSRegSizeLog2 = 5;
+const unsigned kSRegSizeInBytes = kSRegSize >> 3;
+const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
+const unsigned kDRegSize = 64;
+const unsigned kDRegSizeLog2 = 6;
+const unsigned kDRegSizeInBytes = kDRegSize >> 3;
+const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
+const int64_t kWRegMask = 0x00000000ffffffffL;
+const int64_t kXRegMask = 0xffffffffffffffffL;
+const int64_t kSRegMask = 0x00000000ffffffffL;
+const int64_t kDRegMask = 0xffffffffffffffffL;
+// TODO(all) check if the expression below works on all compilers or if it
+// triggers an overflow error.
+const int64_t kDSignMask = 0x1L << 63;
+const int64_t kDSignBit = 63;
+const int64_t kXSignMask = 0x1L << 63;
+const int64_t kXSignBit = 63;
+const int64_t kWSignMask = 0x1L << 31;
+const int64_t kWSignBit = 31;
+const int64_t kByteMask = 0xffL;
+const int64_t kHalfWordMask = 0xffffL;
+const int64_t kWordMask = 0xffffffffL;
+const uint64_t kXMaxUInt = 0xffffffffffffffffUL;
+const uint64_t kWMaxUInt = 0xffffffffUL;
+const int64_t kXMaxInt = 0x7fffffffffffffffL;
+const int64_t kXMinInt = 0x8000000000000000L;
+const int32_t kWMaxInt = 0x7fffffff;
+const int32_t kWMinInt = 0x80000000;
+const unsigned kFramePointerRegCode = 29;
+const unsigned kLinkRegCode = 30;
+const unsigned kZeroRegCode = 31;
+const unsigned kJSSPCode = 28;
+const unsigned kSPRegInternalCode = 63;
+const unsigned kRegCodeMask = 0x1f;
+// Standard machine types defined by AAPCS64.
+const unsigned kByteSize = 8;
+const unsigned kByteSizeInBytes = kByteSize >> 3;
+const unsigned kHalfWordSize = 16;
+const unsigned kHalfWordSizeLog2 = 4;
+const unsigned kHalfWordSizeInBytes = kHalfWordSize >> 3;
+const unsigned kHalfWordSizeInBytesLog2 = kHalfWordSizeLog2 - 3;
+const unsigned kWordSize = 32;
+const unsigned kWordSizeLog2 = 5;
+const unsigned kWordSizeInBytes = kWordSize >> 3;
+const unsigned kWordSizeInBytesLog2 = kWordSizeLog2 - 3;
+const unsigned kDoubleWordSize = 64;
+const unsigned kDoubleWordSizeInBytes = kDoubleWordSize >> 3;
+const unsigned kQuadWordSize = 128;
+const unsigned kQuadWordSizeInBytes = kQuadWordSize >> 3;
+// AArch64 floating-point specifics. These match IEEE-754.
+const unsigned kDoubleMantissaBits = 52;
+const unsigned kDoubleExponentBits = 11;
+const unsigned kFloatMantissaBits = 23;
+const unsigned kFloatExponentBits = 8;
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+#define INSTRUCTION_FIELDS_LIST(V_) \
+/* Register fields */ \
+V_(Rd, 4, 0, Bits) /* Destination register. */ \
+V_(Rn, 9, 5, Bits) /* First source register. */ \
+V_(Rm, 20, 16, Bits) /* Second source register. */ \
+V_(Ra, 14, 10, Bits) /* Third source register. */ \
+V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
+V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
+ /* store second source. */ \
+V_(PrefetchMode, 4, 0, Bits) \
+ \
+/* Common bits */ \
+V_(SixtyFourBits, 31, 31, Bits) \
+V_(FlagsUpdate, 29, 29, Bits) \
+ \
+/* PC relative addressing */ \
+V_(ImmPCRelHi, 23, 5, SignedBits) \
+V_(ImmPCRelLo, 30, 29, Bits) \
+ \
+/* Add/subtract/logical shift register */ \
+V_(ShiftDP, 23, 22, Bits) \
+V_(ImmDPShift, 15, 10, Bits) \
+ \
+/* Add/subtract immediate */ \
+V_(ImmAddSub, 21, 10, Bits) \
+V_(ShiftAddSub, 23, 22, Bits) \
+ \
+/* Add/substract extend */ \
+V_(ImmExtendShift, 12, 10, Bits) \
+V_(ExtendMode, 15, 13, Bits) \
+ \
+/* Move wide */ \
+V_(ImmMoveWide, 20, 5, Bits) \
+V_(ShiftMoveWide, 22, 21, Bits) \
+ \
+/* Logical immediate, bitfield and extract */ \
+V_(BitN, 22, 22, Bits) \
+V_(ImmRotate, 21, 16, Bits) \
+V_(ImmSetBits, 15, 10, Bits) \
+V_(ImmR, 21, 16, Bits) \
+V_(ImmS, 15, 10, Bits) \
+ \
+/* Test and branch immediate */ \
+V_(ImmTestBranch, 18, 5, SignedBits) \
+V_(ImmTestBranchBit40, 23, 19, Bits) \
+V_(ImmTestBranchBit5, 31, 31, Bits) \
+ \
+/* Conditionals */ \
+V_(Condition, 15, 12, Bits) \
+V_(ConditionBranch, 3, 0, Bits) \
+V_(Nzcv, 3, 0, Bits) \
+V_(ImmCondCmp, 20, 16, Bits) \
+V_(ImmCondBranch, 23, 5, SignedBits) \
+ \
+/* Floating point */ \
+V_(FPType, 23, 22, Bits) \
+V_(ImmFP, 20, 13, Bits) \
+V_(FPScale, 15, 10, Bits) \
+ \
+/* Load Store */ \
+V_(ImmLS, 20, 12, SignedBits) \
+V_(ImmLSUnsigned, 21, 10, Bits) \
+V_(ImmLSPair, 21, 15, SignedBits) \
+V_(SizeLS, 31, 30, Bits) \
+V_(ImmShiftLS, 12, 12, Bits) \
+ \
+/* Other immediates */ \
+V_(ImmUncondBranch, 25, 0, SignedBits) \
+V_(ImmCmpBranch, 23, 5, SignedBits) \
+V_(ImmLLiteral, 23, 5, SignedBits) \
+V_(ImmException, 20, 5, Bits) \
+V_(ImmHint, 11, 5, Bits) \
+V_(ImmBarrierDomain, 11, 10, Bits) \
+V_(ImmBarrierType, 9, 8, Bits) \
+ \
+/* System (MRS, MSR) */ \
+V_(ImmSystemRegister, 19, 5, Bits) \
+V_(SysO0, 19, 19, Bits) \
+V_(SysOp1, 18, 16, Bits) \
+V_(SysOp2, 7, 5, Bits) \
+V_(CRn, 15, 12, Bits) \
+V_(CRm, 11, 8, Bits) \
+
+
+#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
+/* NZCV */ \
+V_(Flags, 31, 28, Bits) \
+V_(N, 31, 31, Bits) \
+V_(Z, 30, 30, Bits) \
+V_(C, 29, 29, Bits) \
+V_(V, 28, 28, Bits) \
+M_(NZCV, Flags_mask) \
+ \
+/* FPCR */ \
+V_(AHP, 26, 26, Bits) \
+V_(DN, 25, 25, Bits) \
+V_(FZ, 24, 24, Bits) \
+V_(RMode, 23, 22, Bits) \
+M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
+
+
+// Fields offsets.
+#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X) \
+const int Name##_offset = LowBit; \
+const int Name##_width = HighBit - LowBit + 1; \
+const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
+#define NOTHING(A, B)
+INSTRUCTION_FIELDS_LIST(DECLARE_FIELDS_OFFSETS)
+SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
+#undef NOTHING
+#undef DECLARE_FIELDS_BITS
+
+// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed
+// from ImmPCRelLo and ImmPCRelHi.
+const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
+
+// Condition codes.
+enum Condition {
+ eq = 0,
+ ne = 1,
+ hs = 2,
+ lo = 3,
+ mi = 4,
+ pl = 5,
+ vs = 6,
+ vc = 7,
+ hi = 8,
+ ls = 9,
+ ge = 10,
+ lt = 11,
+ gt = 12,
+ le = 13,
+ al = 14,
+ nv = 15 // Behaves as always/al.
+};
+
+inline Condition InvertCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no never condition.
+ ASSERT((cond != al) && (cond != nv));
+ return static_cast<Condition>(cond ^ 1);
+}
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseConditionForCmp(Condition cond) {
+ switch (cond) {
+ case lo:
+ return hi;
+ case hi:
+ return lo;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ case eq:
+ return eq;
+ default:
+ // In practice this function is only used with a condition coming from
+ // TokenToCondition in lithium-codegen-a64.cc. Any other condition is
+ // invalid as it doesn't necessary make sense to reverse it (consider
+ // 'mi' for instance).
+ UNREACHABLE();
+ return nv;
+ };
+}
+
+enum FlagsUpdate {
+ SetFlags = 1,
+ LeaveFlags = 0
+};
+
+enum StatusFlags {
+ NoFlag = 0,
+
+ // Derive the flag combinations from the system register bit descriptions.
+ NFlag = N_mask,
+ ZFlag = Z_mask,
+ CFlag = C_mask,
+ VFlag = V_mask,
+ NZFlag = NFlag | ZFlag,
+ NCFlag = NFlag | CFlag,
+ NVFlag = NFlag | VFlag,
+ ZCFlag = ZFlag | CFlag,
+ ZVFlag = ZFlag | VFlag,
+ CVFlag = CFlag | VFlag,
+ NZCFlag = NFlag | ZFlag | CFlag,
+ NZVFlag = NFlag | ZFlag | VFlag,
+ NCVFlag = NFlag | CFlag | VFlag,
+ ZCVFlag = ZFlag | CFlag | VFlag,
+ NZCVFlag = NFlag | ZFlag | CFlag | VFlag,
+
+ // Floating-point comparison results.
+ FPEqualFlag = ZCFlag,
+ FPLessThanFlag = NFlag,
+ FPGreaterThanFlag = CFlag,
+ FPUnorderedFlag = CVFlag
+};
+
+enum Shift {
+ NO_SHIFT = -1,
+ LSL = 0x0,
+ LSR = 0x1,
+ ASR = 0x2,
+ ROR = 0x3
+};
+
+enum Extend {
+ NO_EXTEND = -1,
+ UXTB = 0,
+ UXTH = 1,
+ UXTW = 2,
+ UXTX = 3,
+ SXTB = 4,
+ SXTH = 5,
+ SXTW = 6,
+ SXTX = 7
+};
+
+enum SystemHint {
+ NOP = 0,
+ YIELD = 1,
+ WFE = 2,
+ WFI = 3,
+ SEV = 4,
+ SEVL = 5
+};
+
+enum BarrierDomain {
+ OuterShareable = 0,
+ NonShareable = 1,
+ InnerShareable = 2,
+ FullSystem = 3
+};
+
+enum BarrierType {
+ BarrierOther = 0,
+ BarrierReads = 1,
+ BarrierWrites = 2,
+ BarrierAll = 3
+};
+
+// System/special register names.
+// This information is not encoded as one field but as the concatenation of
+// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
+enum SystemRegister {
+ NZCV = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x2 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset,
+ FPCR = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x4 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset
+};
+
+// Instruction enumerations.
+//
+// These are the masks that define a class of instructions, and the list of
+// instructions within each class. Each enumeration has a Fixed, FMask and
+// Mask value.
+//
+// Fixed: The fixed bits in this instruction class.
+// FMask: The mask used to extract the fixed bits in the class.
+// Mask: The mask used to identify the instructions within a class.
+//
+// The enumerations can be used like this:
+//
+// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
+// switch(instr->Mask(PCRelAddressingMask)) {
+// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
+// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
+// default: printf("Unknown instruction\n");
+// }
+
+
+// Generic fields.
+enum GenericInstrField {
+ SixtyFourBits = 0x80000000,
+ ThirtyTwoBits = 0x00000000,
+ FP32 = 0x00000000,
+ FP64 = 0x00400000
+};
+
+// PC relative addressing.
+enum PCRelAddressingOp {
+ PCRelAddressingFixed = 0x10000000,
+ PCRelAddressingFMask = 0x1F000000,
+ PCRelAddressingMask = 0x9F000000,
+ ADR = PCRelAddressingFixed | 0x00000000,
+ ADRP = PCRelAddressingFixed | 0x80000000
+};
+
+// Add/sub (immediate, shifted and extended.)
+const int kSFOffset = 31;
+enum AddSubOp {
+ AddSubOpMask = 0x60000000,
+ AddSubSetFlagsBit = 0x20000000,
+ ADD = 0x00000000,
+ ADDS = ADD | AddSubSetFlagsBit,
+ SUB = 0x40000000,
+ SUBS = SUB | AddSubSetFlagsBit
+};
+
+#define ADD_SUB_OP_LIST(V) \
+ V(ADD), \
+ V(ADDS), \
+ V(SUB), \
+ V(SUBS)
+
+enum AddSubImmediateOp {
+ AddSubImmediateFixed = 0x11000000,
+ AddSubImmediateFMask = 0x1F000000,
+ AddSubImmediateMask = 0xFF000000,
+ #define ADD_SUB_IMMEDIATE(A) \
+ A##_w_imm = AddSubImmediateFixed | A, \
+ A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE)
+ #undef ADD_SUB_IMMEDIATE
+};
+
+enum AddSubShiftedOp {
+ AddSubShiftedFixed = 0x0B000000,
+ AddSubShiftedFMask = 0x1F200000,
+ AddSubShiftedMask = 0xFF200000,
+ #define ADD_SUB_SHIFTED(A) \
+ A##_w_shift = AddSubShiftedFixed | A, \
+ A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_SHIFTED)
+ #undef ADD_SUB_SHIFTED
+};
+
+enum AddSubExtendedOp {
+ AddSubExtendedFixed = 0x0B200000,
+ AddSubExtendedFMask = 0x1F200000,
+ AddSubExtendedMask = 0xFFE00000,
+ #define ADD_SUB_EXTENDED(A) \
+ A##_w_ext = AddSubExtendedFixed | A, \
+ A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_EXTENDED)
+ #undef ADD_SUB_EXTENDED
+};
+
+// Add/sub with carry.
+enum AddSubWithCarryOp {
+ AddSubWithCarryFixed = 0x1A000000,
+ AddSubWithCarryFMask = 0x1FE00000,
+ AddSubWithCarryMask = 0xFFE0FC00,
+ ADC_w = AddSubWithCarryFixed | ADD,
+ ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits,
+ ADC = ADC_w,
+ ADCS_w = AddSubWithCarryFixed | ADDS,
+ ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits,
+ SBC_w = AddSubWithCarryFixed | SUB,
+ SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits,
+ SBC = SBC_w,
+ SBCS_w = AddSubWithCarryFixed | SUBS,
+ SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits
+};
+
+
+// Logical (immediate and shifted register).
+enum LogicalOp {
+ LogicalOpMask = 0x60200000,
+ NOT = 0x00200000,
+ AND = 0x00000000,
+ BIC = AND | NOT,
+ ORR = 0x20000000,
+ ORN = ORR | NOT,
+ EOR = 0x40000000,
+ EON = EOR | NOT,
+ ANDS = 0x60000000,
+ BICS = ANDS | NOT
+};
+
+// Logical immediate.
+enum LogicalImmediateOp {
+ LogicalImmediateFixed = 0x12000000,
+ LogicalImmediateFMask = 0x1F800000,
+ LogicalImmediateMask = 0xFF800000,
+ AND_w_imm = LogicalImmediateFixed | AND,
+ AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits,
+ ORR_w_imm = LogicalImmediateFixed | ORR,
+ ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits,
+ EOR_w_imm = LogicalImmediateFixed | EOR,
+ EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits,
+ ANDS_w_imm = LogicalImmediateFixed | ANDS,
+ ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits
+};
+
+// Logical shifted register.
+enum LogicalShiftedOp {
+ LogicalShiftedFixed = 0x0A000000,
+ LogicalShiftedFMask = 0x1F000000,
+ LogicalShiftedMask = 0xFF200000,
+ AND_w = LogicalShiftedFixed | AND,
+ AND_x = LogicalShiftedFixed | AND | SixtyFourBits,
+ AND_shift = AND_w,
+ BIC_w = LogicalShiftedFixed | BIC,
+ BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits,
+ BIC_shift = BIC_w,
+ ORR_w = LogicalShiftedFixed | ORR,
+ ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits,
+ ORR_shift = ORR_w,
+ ORN_w = LogicalShiftedFixed | ORN,
+ ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits,
+ ORN_shift = ORN_w,
+ EOR_w = LogicalShiftedFixed | EOR,
+ EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits,
+ EOR_shift = EOR_w,
+ EON_w = LogicalShiftedFixed | EON,
+ EON_x = LogicalShiftedFixed | EON | SixtyFourBits,
+ EON_shift = EON_w,
+ ANDS_w = LogicalShiftedFixed | ANDS,
+ ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits,
+ ANDS_shift = ANDS_w,
+ BICS_w = LogicalShiftedFixed | BICS,
+ BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits,
+ BICS_shift = BICS_w
+};
+
+// Move wide immediate.
+enum MoveWideImmediateOp {
+ MoveWideImmediateFixed = 0x12800000,
+ MoveWideImmediateFMask = 0x1F800000,
+ MoveWideImmediateMask = 0xFF800000,
+ MOVN = 0x00000000,
+ MOVZ = 0x40000000,
+ MOVK = 0x60000000,
+ MOVN_w = MoveWideImmediateFixed | MOVN,
+ MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits,
+ MOVZ_w = MoveWideImmediateFixed | MOVZ,
+ MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits,
+ MOVK_w = MoveWideImmediateFixed | MOVK,
+ MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits
+};
+
+// Bitfield.
+const int kBitfieldNOffset = 22;
+enum BitfieldOp {
+ BitfieldFixed = 0x13000000,
+ BitfieldFMask = 0x1F800000,
+ BitfieldMask = 0xFF800000,
+ SBFM_w = BitfieldFixed | 0x00000000,
+ SBFM_x = BitfieldFixed | 0x80000000,
+ SBFM = SBFM_w,
+ BFM_w = BitfieldFixed | 0x20000000,
+ BFM_x = BitfieldFixed | 0xA0000000,
+ BFM = BFM_w,
+ UBFM_w = BitfieldFixed | 0x40000000,
+ UBFM_x = BitfieldFixed | 0xC0000000,
+ UBFM = UBFM_w
+ // Bitfield N field.
+};
+
+// Extract.
+enum ExtractOp {
+ ExtractFixed = 0x13800000,
+ ExtractFMask = 0x1F800000,
+ ExtractMask = 0xFFA00000,
+ EXTR_w = ExtractFixed | 0x00000000,
+ EXTR_x = ExtractFixed | 0x80000000,
+ EXTR = EXTR_w
+};
+
+// Unconditional branch.
+enum UnconditionalBranchOp {
+ UnconditionalBranchFixed = 0x14000000,
+ UnconditionalBranchFMask = 0x7C000000,
+ UnconditionalBranchMask = 0xFC000000,
+ B = UnconditionalBranchFixed | 0x00000000,
+ BL = UnconditionalBranchFixed | 0x80000000
+};
+
+// Unconditional branch to register.
+enum UnconditionalBranchToRegisterOp {
+ UnconditionalBranchToRegisterFixed = 0xD6000000,
+ UnconditionalBranchToRegisterFMask = 0xFE000000,
+ UnconditionalBranchToRegisterMask = 0xFFFFFC1F,
+ BR = UnconditionalBranchToRegisterFixed | 0x001F0000,
+ BLR = UnconditionalBranchToRegisterFixed | 0x003F0000,
+ RET = UnconditionalBranchToRegisterFixed | 0x005F0000
+};
+
+// Compare and branch.
+enum CompareBranchOp {
+ CompareBranchFixed = 0x34000000,
+ CompareBranchFMask = 0x7E000000,
+ CompareBranchMask = 0xFF000000,
+ CBZ_w = CompareBranchFixed | 0x00000000,
+ CBZ_x = CompareBranchFixed | 0x80000000,
+ CBZ = CBZ_w,
+ CBNZ_w = CompareBranchFixed | 0x01000000,
+ CBNZ_x = CompareBranchFixed | 0x81000000,
+ CBNZ = CBNZ_w
+};
+
+// Test and branch.
+enum TestBranchOp {
+ TestBranchFixed = 0x36000000,
+ TestBranchFMask = 0x7E000000,
+ TestBranchMask = 0x7F000000,
+ TBZ = TestBranchFixed | 0x00000000,
+ TBNZ = TestBranchFixed | 0x01000000
+};
+
+// Conditional branch.
+enum ConditionalBranchOp {
+ ConditionalBranchFixed = 0x54000000,
+ ConditionalBranchFMask = 0xFE000000,
+ ConditionalBranchMask = 0xFF000010,
+ B_cond = ConditionalBranchFixed | 0x00000000
+};
+
+// System.
+// System instruction encoding is complicated because some instructions use op
+// and CR fields to encode parameters. To handle this cleanly, the system
+// instructions are split into more than one enum.
+
+enum SystemOp {
+ SystemFixed = 0xD5000000,
+ SystemFMask = 0xFFC00000
+};
+
+enum SystemSysRegOp {
+ SystemSysRegFixed = 0xD5100000,
+ SystemSysRegFMask = 0xFFD00000,
+ SystemSysRegMask = 0xFFF00000,
+ MRS = SystemSysRegFixed | 0x00200000,
+ MSR = SystemSysRegFixed | 0x00000000
+};
+
+enum SystemHintOp {
+ SystemHintFixed = 0xD503201F,
+ SystemHintFMask = 0xFFFFF01F,
+ SystemHintMask = 0xFFFFF01F,
+ HINT = SystemHintFixed | 0x00000000
+};
+
+// Exception.
+enum ExceptionOp {
+ ExceptionFixed = 0xD4000000,
+ ExceptionFMask = 0xFF000000,
+ ExceptionMask = 0xFFE0001F,
+ HLT = ExceptionFixed | 0x00400000,
+ BRK = ExceptionFixed | 0x00200000,
+ SVC = ExceptionFixed | 0x00000001,
+ HVC = ExceptionFixed | 0x00000002,
+ SMC = ExceptionFixed | 0x00000003,
+ DCPS1 = ExceptionFixed | 0x00A00001,
+ DCPS2 = ExceptionFixed | 0x00A00002,
+ DCPS3 = ExceptionFixed | 0x00A00003
+};
+// Code used to spot hlt instructions that should not be hit.
+const int kHltBadCode = 0xbad;
+
+enum MemBarrierOp {
+ MemBarrierFixed = 0xD503309F,
+ MemBarrierFMask = 0xFFFFF09F,
+ MemBarrierMask = 0xFFFFF0FF,
+ DSB = MemBarrierFixed | 0x00000000,
+ DMB = MemBarrierFixed | 0x00000020,
+ ISB = MemBarrierFixed | 0x00000040
+};
+
+// Any load or store (including pair).
+enum LoadStoreAnyOp {
+ LoadStoreAnyFMask = 0x0a000000,
+ LoadStoreAnyFixed = 0x08000000
+};
+
+// Any load pair or store pair.
+enum LoadStorePairAnyOp {
+ LoadStorePairAnyFMask = 0x3a000000,
+ LoadStorePairAnyFixed = 0x28000000
+};
+
+#define LOAD_STORE_PAIR_OP_LIST(V) \
+ V(STP, w, 0x00000000), \
+ V(LDP, w, 0x00400000), \
+ V(LDPSW, x, 0x40400000), \
+ V(STP, x, 0x80000000), \
+ V(LDP, x, 0x80400000), \
+ V(STP, s, 0x04000000), \
+ V(LDP, s, 0x04400000), \
+ V(STP, d, 0x44000000), \
+ V(LDP, d, 0x44400000)
+
+// Load/store pair (post, pre and offset.)
+enum LoadStorePairOp {
+ LoadStorePairMask = 0xC4400000,
+ LoadStorePairLBit = 1 << 22,
+ #define LOAD_STORE_PAIR(A, B, C) \
+ A##_##B = C
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR)
+ #undef LOAD_STORE_PAIR
+};
+
+enum LoadStorePairPostIndexOp {
+ LoadStorePairPostIndexFixed = 0x28800000,
+ LoadStorePairPostIndexFMask = 0x3B800000,
+ LoadStorePairPostIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \
+ A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX)
+ #undef LOAD_STORE_PAIR_POST_INDEX
+};
+
+enum LoadStorePairPreIndexOp {
+ LoadStorePairPreIndexFixed = 0x29800000,
+ LoadStorePairPreIndexFMask = 0x3B800000,
+ LoadStorePairPreIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \
+ A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX)
+ #undef LOAD_STORE_PAIR_PRE_INDEX
+};
+
+enum LoadStorePairOffsetOp {
+ LoadStorePairOffsetFixed = 0x29000000,
+ LoadStorePairOffsetFMask = 0x3B800000,
+ LoadStorePairOffsetMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_OFFSET(A, B, C) \
+ A##_##B##_off = LoadStorePairOffsetFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET)
+ #undef LOAD_STORE_PAIR_OFFSET
+};
+
+enum LoadStorePairNonTemporalOp {
+ LoadStorePairNonTemporalFixed = 0x28000000,
+ LoadStorePairNonTemporalFMask = 0x3B800000,
+ LoadStorePairNonTemporalMask = 0xFFC00000,
+ STNP_w = LoadStorePairNonTemporalFixed | STP_w,
+ LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
+ STNP_x = LoadStorePairNonTemporalFixed | STP_x,
+ LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
+ STNP_s = LoadStorePairNonTemporalFixed | STP_s,
+ LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
+ STNP_d = LoadStorePairNonTemporalFixed | STP_d,
+ LDNP_d = LoadStorePairNonTemporalFixed | LDP_d
+};
+
+// Load literal.
+enum LoadLiteralOp {
+ LoadLiteralFixed = 0x18000000,
+ LoadLiteralFMask = 0x3B000000,
+ LoadLiteralMask = 0xFF000000,
+ LDR_w_lit = LoadLiteralFixed | 0x00000000,
+ LDR_x_lit = LoadLiteralFixed | 0x40000000,
+ LDRSW_x_lit = LoadLiteralFixed | 0x80000000,
+ PRFM_lit = LoadLiteralFixed | 0xC0000000,
+ LDR_s_lit = LoadLiteralFixed | 0x04000000,
+ LDR_d_lit = LoadLiteralFixed | 0x44000000
+};
+
+#define LOAD_STORE_OP_LIST(V) \
+ V(ST, RB, w, 0x00000000), \
+ V(ST, RH, w, 0x40000000), \
+ V(ST, R, w, 0x80000000), \
+ V(ST, R, x, 0xC0000000), \
+ V(LD, RB, w, 0x00400000), \
+ V(LD, RH, w, 0x40400000), \
+ V(LD, R, w, 0x80400000), \
+ V(LD, R, x, 0xC0400000), \
+ V(LD, RSB, x, 0x00800000), \
+ V(LD, RSH, x, 0x40800000), \
+ V(LD, RSW, x, 0x80800000), \
+ V(LD, RSB, w, 0x00C00000), \
+ V(LD, RSH, w, 0x40C00000), \
+ V(ST, R, s, 0x84000000), \
+ V(ST, R, d, 0xC4000000), \
+ V(LD, R, s, 0x84400000), \
+ V(LD, R, d, 0xC4400000)
+
+
+// Load/store unscaled offset.
+enum LoadStoreUnscaledOffsetOp {
+ LoadStoreUnscaledOffsetFixed = 0x38000000,
+ LoadStoreUnscaledOffsetFMask = 0x3B200C00,
+ LoadStoreUnscaledOffsetMask = 0xFFE00C00,
+ #define LOAD_STORE_UNSCALED(A, B, C, D) \
+ A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
+ #undef LOAD_STORE_UNSCALED
+};
+
+// Load/store (post, pre, offset and unsigned.)
+enum LoadStoreOp {
+ LoadStoreOpMask = 0xC4C00000,
+ #define LOAD_STORE(A, B, C, D) \
+ A##B##_##C = D
+ LOAD_STORE_OP_LIST(LOAD_STORE),
+ #undef LOAD_STORE
+ PRFM = 0xC0800000
+};
+
+// Load/store post index.
+enum LoadStorePostIndex {
+ LoadStorePostIndexFixed = 0x38000400,
+ LoadStorePostIndexFMask = 0x3B200C00,
+ LoadStorePostIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_POST_INDEX(A, B, C, D) \
+ A##B##_##C##_post = LoadStorePostIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX)
+ #undef LOAD_STORE_POST_INDEX
+};
+
+// Load/store pre index.
+enum LoadStorePreIndex {
+ LoadStorePreIndexFixed = 0x38000C00,
+ LoadStorePreIndexFMask = 0x3B200C00,
+ LoadStorePreIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_PRE_INDEX(A, B, C, D) \
+ A##B##_##C##_pre = LoadStorePreIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX)
+ #undef LOAD_STORE_PRE_INDEX
+};
+
+// Load/store unsigned offset.
+enum LoadStoreUnsignedOffset {
+ LoadStoreUnsignedOffsetFixed = 0x39000000,
+ LoadStoreUnsignedOffsetFMask = 0x3B000000,
+ LoadStoreUnsignedOffsetMask = 0xFFC00000,
+ PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM,
+ #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \
+ A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET)
+ #undef LOAD_STORE_UNSIGNED_OFFSET
+};
+
+// Load/store register offset.
+enum LoadStoreRegisterOffset {
+ LoadStoreRegisterOffsetFixed = 0x38200800,
+ LoadStoreRegisterOffsetFMask = 0x3B200C00,
+ LoadStoreRegisterOffsetMask = 0xFFE00C00,
+ PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM,
+ #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \
+ A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET)
+ #undef LOAD_STORE_REGISTER_OFFSET
+};
+
+// Conditional compare.
+enum ConditionalCompareOp {
+ ConditionalCompareMask = 0x60000000,
+ CCMN = 0x20000000,
+ CCMP = 0x60000000
+};
+
+// Conditional compare register.
+enum ConditionalCompareRegisterOp {
+ ConditionalCompareRegisterFixed = 0x1A400000,
+ ConditionalCompareRegisterFMask = 0x1FE00800,
+ ConditionalCompareRegisterMask = 0xFFE00C10,
+ CCMN_w = ConditionalCompareRegisterFixed | CCMN,
+ CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN,
+ CCMP_w = ConditionalCompareRegisterFixed | CCMP,
+ CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP
+};
+
+// Conditional compare immediate.
+enum ConditionalCompareImmediateOp {
+ ConditionalCompareImmediateFixed = 0x1A400800,
+ ConditionalCompareImmediateFMask = 0x1FE00800,
+ ConditionalCompareImmediateMask = 0xFFE00C10,
+ CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN,
+ CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN,
+ CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP,
+ CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP
+};
+
+// Conditional select.
+enum ConditionalSelectOp {
+ ConditionalSelectFixed = 0x1A800000,
+ ConditionalSelectFMask = 0x1FE00000,
+ ConditionalSelectMask = 0xFFE00C00,
+ CSEL_w = ConditionalSelectFixed | 0x00000000,
+ CSEL_x = ConditionalSelectFixed | 0x80000000,
+ CSEL = CSEL_w,
+ CSINC_w = ConditionalSelectFixed | 0x00000400,
+ CSINC_x = ConditionalSelectFixed | 0x80000400,
+ CSINC = CSINC_w,
+ CSINV_w = ConditionalSelectFixed | 0x40000000,
+ CSINV_x = ConditionalSelectFixed | 0xC0000000,
+ CSINV = CSINV_w,
+ CSNEG_w = ConditionalSelectFixed | 0x40000400,
+ CSNEG_x = ConditionalSelectFixed | 0xC0000400,
+ CSNEG = CSNEG_w
+};
+
+// Data processing 1 source.
+enum DataProcessing1SourceOp {
+ DataProcessing1SourceFixed = 0x5AC00000,
+ DataProcessing1SourceFMask = 0x5FE00000,
+ DataProcessing1SourceMask = 0xFFFFFC00,
+ RBIT = DataProcessing1SourceFixed | 0x00000000,
+ RBIT_w = RBIT,
+ RBIT_x = RBIT | SixtyFourBits,
+ REV16 = DataProcessing1SourceFixed | 0x00000400,
+ REV16_w = REV16,
+ REV16_x = REV16 | SixtyFourBits,
+ REV = DataProcessing1SourceFixed | 0x00000800,
+ REV_w = REV,
+ REV32_x = REV | SixtyFourBits,
+ REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00,
+ CLZ = DataProcessing1SourceFixed | 0x00001000,
+ CLZ_w = CLZ,
+ CLZ_x = CLZ | SixtyFourBits,
+ CLS = DataProcessing1SourceFixed | 0x00001400,
+ CLS_w = CLS,
+ CLS_x = CLS | SixtyFourBits
+};
+
+// Data processing 2 source.
+enum DataProcessing2SourceOp {
+ DataProcessing2SourceFixed = 0x1AC00000,
+ DataProcessing2SourceFMask = 0x5FE00000,
+ DataProcessing2SourceMask = 0xFFE0FC00,
+ UDIV_w = DataProcessing2SourceFixed | 0x00000800,
+ UDIV_x = DataProcessing2SourceFixed | 0x80000800,
+ UDIV = UDIV_w,
+ SDIV_w = DataProcessing2SourceFixed | 0x00000C00,
+ SDIV_x = DataProcessing2SourceFixed | 0x80000C00,
+ SDIV = SDIV_w,
+ LSLV_w = DataProcessing2SourceFixed | 0x00002000,
+ LSLV_x = DataProcessing2SourceFixed | 0x80002000,
+ LSLV = LSLV_w,
+ LSRV_w = DataProcessing2SourceFixed | 0x00002400,
+ LSRV_x = DataProcessing2SourceFixed | 0x80002400,
+ LSRV = LSRV_w,
+ ASRV_w = DataProcessing2SourceFixed | 0x00002800,
+ ASRV_x = DataProcessing2SourceFixed | 0x80002800,
+ ASRV = ASRV_w,
+ RORV_w = DataProcessing2SourceFixed | 0x00002C00,
+ RORV_x = DataProcessing2SourceFixed | 0x80002C00,
+ RORV = RORV_w,
+ CRC32B = DataProcessing2SourceFixed | 0x00004000,
+ CRC32H = DataProcessing2SourceFixed | 0x00004400,
+ CRC32W = DataProcessing2SourceFixed | 0x00004800,
+ CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00,
+ CRC32CB = DataProcessing2SourceFixed | 0x00005000,
+ CRC32CH = DataProcessing2SourceFixed | 0x00005400,
+ CRC32CW = DataProcessing2SourceFixed | 0x00005800,
+ CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00
+};
+
+// Data processing 3 source.
+enum DataProcessing3SourceOp {
+ DataProcessing3SourceFixed = 0x1B000000,
+ DataProcessing3SourceFMask = 0x1F000000,
+ DataProcessing3SourceMask = 0xFFE08000,
+ MADD_w = DataProcessing3SourceFixed | 0x00000000,
+ MADD_x = DataProcessing3SourceFixed | 0x80000000,
+ MADD = MADD_w,
+ MSUB_w = DataProcessing3SourceFixed | 0x00008000,
+ MSUB_x = DataProcessing3SourceFixed | 0x80008000,
+ MSUB = MSUB_w,
+ SMADDL_x = DataProcessing3SourceFixed | 0x80200000,
+ SMSUBL_x = DataProcessing3SourceFixed | 0x80208000,
+ SMULH_x = DataProcessing3SourceFixed | 0x80400000,
+ UMADDL_x = DataProcessing3SourceFixed | 0x80A00000,
+ UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000,
+ UMULH_x = DataProcessing3SourceFixed | 0x80C00000
+};
+
+// Floating point compare.
+enum FPCompareOp {
+ FPCompareFixed = 0x1E202000,
+ FPCompareFMask = 0x5F203C00,
+ FPCompareMask = 0xFFE0FC1F,
+ FCMP_s = FPCompareFixed | 0x00000000,
+ FCMP_d = FPCompareFixed | FP64 | 0x00000000,
+ FCMP = FCMP_s,
+ FCMP_s_zero = FPCompareFixed | 0x00000008,
+ FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008,
+ FCMP_zero = FCMP_s_zero,
+ FCMPE_s = FPCompareFixed | 0x00000010,
+ FCMPE_d = FPCompareFixed | FP64 | 0x00000010,
+ FCMPE_s_zero = FPCompareFixed | 0x00000018,
+ FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018
+};
+
+// Floating point conditional compare.
+enum FPConditionalCompareOp {
+ FPConditionalCompareFixed = 0x1E200400,
+ FPConditionalCompareFMask = 0x5F200C00,
+ FPConditionalCompareMask = 0xFFE00C10,
+ FCCMP_s = FPConditionalCompareFixed | 0x00000000,
+ FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000,
+ FCCMP = FCCMP_s,
+ FCCMPE_s = FPConditionalCompareFixed | 0x00000010,
+ FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010,
+ FCCMPE = FCCMPE_s
+};
+
+// Floating point conditional select.
+enum FPConditionalSelectOp {
+ FPConditionalSelectFixed = 0x1E200C00,
+ FPConditionalSelectFMask = 0x5F200C00,
+ FPConditionalSelectMask = 0xFFE00C00,
+ FCSEL_s = FPConditionalSelectFixed | 0x00000000,
+ FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000,
+ FCSEL = FCSEL_s
+};
+
+// Floating point immediate.
+enum FPImmediateOp {
+ FPImmediateFixed = 0x1E201000,
+ FPImmediateFMask = 0x5F201C00,
+ FPImmediateMask = 0xFFE01C00,
+ FMOV_s_imm = FPImmediateFixed | 0x00000000,
+ FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000
+};
+
+// Floating point data processing 1 source.
+enum FPDataProcessing1SourceOp {
+ FPDataProcessing1SourceFixed = 0x1E204000,
+ FPDataProcessing1SourceFMask = 0x5F207C00,
+ FPDataProcessing1SourceMask = 0xFFFFFC00,
+ FMOV_s = FPDataProcessing1SourceFixed | 0x00000000,
+ FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000,
+ FMOV = FMOV_s,
+ FABS_s = FPDataProcessing1SourceFixed | 0x00008000,
+ FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000,
+ FABS = FABS_s,
+ FNEG_s = FPDataProcessing1SourceFixed | 0x00010000,
+ FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000,
+ FNEG = FNEG_s,
+ FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000,
+ FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000,
+ FSQRT = FSQRT_s,
+ FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000,
+ FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
+ FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000,
+ FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000,
+ FRINTN = FRINTN_s,
+ FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000,
+ FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000,
+ FRINTP = FRINTP_s,
+ FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000,
+ FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000,
+ FRINTM = FRINTM_s,
+ FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000,
+ FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000,
+ FRINTZ = FRINTZ_s,
+ FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000,
+ FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000,
+ FRINTA = FRINTA_s,
+ FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000,
+ FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000,
+ FRINTX = FRINTX_s,
+ FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000,
+ FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000,
+ FRINTI = FRINTI_s
+};
+
+// Floating point data processing 2 source.
+enum FPDataProcessing2SourceOp {
+ FPDataProcessing2SourceFixed = 0x1E200800,
+ FPDataProcessing2SourceFMask = 0x5F200C00,
+ FPDataProcessing2SourceMask = 0xFFE0FC00,
+ FMUL = FPDataProcessing2SourceFixed | 0x00000000,
+ FMUL_s = FMUL,
+ FMUL_d = FMUL | FP64,
+ FDIV = FPDataProcessing2SourceFixed | 0x00001000,
+ FDIV_s = FDIV,
+ FDIV_d = FDIV | FP64,
+ FADD = FPDataProcessing2SourceFixed | 0x00002000,
+ FADD_s = FADD,
+ FADD_d = FADD | FP64,
+ FSUB = FPDataProcessing2SourceFixed | 0x00003000,
+ FSUB_s = FSUB,
+ FSUB_d = FSUB | FP64,
+ FMAX = FPDataProcessing2SourceFixed | 0x00004000,
+ FMAX_s = FMAX,
+ FMAX_d = FMAX | FP64,
+ FMIN = FPDataProcessing2SourceFixed | 0x00005000,
+ FMIN_s = FMIN,
+ FMIN_d = FMIN | FP64,
+ FMAXNM = FPDataProcessing2SourceFixed | 0x00006000,
+ FMAXNM_s = FMAXNM,
+ FMAXNM_d = FMAXNM | FP64,
+ FMINNM = FPDataProcessing2SourceFixed | 0x00007000,
+ FMINNM_s = FMINNM,
+ FMINNM_d = FMINNM | FP64,
+ FNMUL = FPDataProcessing2SourceFixed | 0x00008000,
+ FNMUL_s = FNMUL,
+ FNMUL_d = FNMUL | FP64
+};
+
+// Floating point data processing 3 source.
+enum FPDataProcessing3SourceOp {
+ FPDataProcessing3SourceFixed = 0x1F000000,
+ FPDataProcessing3SourceFMask = 0x5F000000,
+ FPDataProcessing3SourceMask = 0xFFE08000,
+ FMADD_s = FPDataProcessing3SourceFixed | 0x00000000,
+ FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000,
+ FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000,
+ FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000,
+ FMADD_d = FPDataProcessing3SourceFixed | 0x00400000,
+ FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000,
+ FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000,
+ FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000
+};
+
+// Conversion between floating point and integer.
+enum FPIntegerConvertOp {
+ FPIntegerConvertFixed = 0x1E200000,
+ FPIntegerConvertFMask = 0x5F20FC00,
+ FPIntegerConvertMask = 0xFFFFFC00,
+ FCVTNS = FPIntegerConvertFixed | 0x00000000,
+ FCVTNS_ws = FCVTNS,
+ FCVTNS_xs = FCVTNS | SixtyFourBits,
+ FCVTNS_wd = FCVTNS | FP64,
+ FCVTNS_xd = FCVTNS | SixtyFourBits | FP64,
+ FCVTNU = FPIntegerConvertFixed | 0x00010000,
+ FCVTNU_ws = FCVTNU,
+ FCVTNU_xs = FCVTNU | SixtyFourBits,
+ FCVTNU_wd = FCVTNU | FP64,
+ FCVTNU_xd = FCVTNU | SixtyFourBits | FP64,
+ FCVTPS = FPIntegerConvertFixed | 0x00080000,
+ FCVTPS_ws = FCVTPS,
+ FCVTPS_xs = FCVTPS | SixtyFourBits,
+ FCVTPS_wd = FCVTPS | FP64,
+ FCVTPS_xd = FCVTPS | SixtyFourBits | FP64,
+ FCVTPU = FPIntegerConvertFixed | 0x00090000,
+ FCVTPU_ws = FCVTPU,
+ FCVTPU_xs = FCVTPU | SixtyFourBits,
+ FCVTPU_wd = FCVTPU | FP64,
+ FCVTPU_xd = FCVTPU | SixtyFourBits | FP64,
+ FCVTMS = FPIntegerConvertFixed | 0x00100000,
+ FCVTMS_ws = FCVTMS,
+ FCVTMS_xs = FCVTMS | SixtyFourBits,
+ FCVTMS_wd = FCVTMS | FP64,
+ FCVTMS_xd = FCVTMS | SixtyFourBits | FP64,
+ FCVTMU = FPIntegerConvertFixed | 0x00110000,
+ FCVTMU_ws = FCVTMU,
+ FCVTMU_xs = FCVTMU | SixtyFourBits,
+ FCVTMU_wd = FCVTMU | FP64,
+ FCVTMU_xd = FCVTMU | SixtyFourBits | FP64,
+ FCVTZS = FPIntegerConvertFixed | 0x00180000,
+ FCVTZS_ws = FCVTZS,
+ FCVTZS_xs = FCVTZS | SixtyFourBits,
+ FCVTZS_wd = FCVTZS | FP64,
+ FCVTZS_xd = FCVTZS | SixtyFourBits | FP64,
+ FCVTZU = FPIntegerConvertFixed | 0x00190000,
+ FCVTZU_ws = FCVTZU,
+ FCVTZU_xs = FCVTZU | SixtyFourBits,
+ FCVTZU_wd = FCVTZU | FP64,
+ FCVTZU_xd = FCVTZU | SixtyFourBits | FP64,
+ SCVTF = FPIntegerConvertFixed | 0x00020000,
+ SCVTF_sw = SCVTF,
+ SCVTF_sx = SCVTF | SixtyFourBits,
+ SCVTF_dw = SCVTF | FP64,
+ SCVTF_dx = SCVTF | SixtyFourBits | FP64,
+ UCVTF = FPIntegerConvertFixed | 0x00030000,
+ UCVTF_sw = UCVTF,
+ UCVTF_sx = UCVTF | SixtyFourBits,
+ UCVTF_dw = UCVTF | FP64,
+ UCVTF_dx = UCVTF | SixtyFourBits | FP64,
+ FCVTAS = FPIntegerConvertFixed | 0x00040000,
+ FCVTAS_ws = FCVTAS,
+ FCVTAS_xs = FCVTAS | SixtyFourBits,
+ FCVTAS_wd = FCVTAS | FP64,
+ FCVTAS_xd = FCVTAS | SixtyFourBits | FP64,
+ FCVTAU = FPIntegerConvertFixed | 0x00050000,
+ FCVTAU_ws = FCVTAU,
+ FCVTAU_xs = FCVTAU | SixtyFourBits,
+ FCVTAU_wd = FCVTAU | FP64,
+ FCVTAU_xd = FCVTAU | SixtyFourBits | FP64,
+ FMOV_ws = FPIntegerConvertFixed | 0x00060000,
+ FMOV_sw = FPIntegerConvertFixed | 0x00070000,
+ FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
+ FMOV_dx = FMOV_sw | SixtyFourBits | FP64
+};
+
+// Conversion between fixed point and floating point.
+enum FPFixedPointConvertOp {
+ FPFixedPointConvertFixed = 0x1E000000,
+ FPFixedPointConvertFMask = 0x5F200000,
+ FPFixedPointConvertMask = 0xFFFF0000,
+ FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000,
+ FCVTZS_ws_fixed = FCVTZS_fixed,
+ FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits,
+ FCVTZS_wd_fixed = FCVTZS_fixed | FP64,
+ FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64,
+ FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000,
+ FCVTZU_ws_fixed = FCVTZU_fixed,
+ FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits,
+ FCVTZU_wd_fixed = FCVTZU_fixed | FP64,
+ FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64,
+ SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000,
+ SCVTF_sw_fixed = SCVTF_fixed,
+ SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits,
+ SCVTF_dw_fixed = SCVTF_fixed | FP64,
+ SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64,
+ UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000,
+ UCVTF_sw_fixed = UCVTF_fixed,
+ UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits,
+ UCVTF_dw_fixed = UCVTF_fixed | FP64,
+ UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
+};
+
+// Unimplemented and unallocated instructions. These are defined to make fixed
+// bit assertion easier.
+enum UnimplementedOp {
+ UnimplementedFixed = 0x00000000,
+ UnimplementedFMask = 0x00000000
+};
+
+enum UnallocatedOp {
+ UnallocatedFixed = 0x00000000,
+ UnallocatedFMask = 0x00000000
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_CONSTANTS_A64_H_
diff --git a/deps/v8/src/a64/cpu-a64.cc b/deps/v8/src/a64/cpu-a64.cc
new file mode 100644
index 0000000000..6dd5e52ae2
--- /dev/null
+++ b/deps/v8/src/a64/cpu-a64.cc
@@ -0,0 +1,199 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for arm independent of OS goes here.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "a64/cpu-a64.h"
+#include "a64/utils-a64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+unsigned CpuFeatures::cross_compile_ = 0;
+
+// Initialise to smallest possible cache size.
+unsigned CpuFeatures::dcache_line_size_ = 1;
+unsigned CpuFeatures::icache_line_size_ = 1;
+
+
+void CPU::SetUp() {
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return true;
+}
+
+
+void CPU::FlushICache(void* address, size_t length) {
+ if (length == 0) {
+ return;
+ }
+
+#ifdef USE_SIMULATOR
+ // TODO(all): consider doing some cache simulation to ensure every address
+ // run has been synced.
+ USE(address);
+ USE(length);
+#else
+ // The code below assumes user space cache operations are allowed. The goal
+ // of this routine is to make sure the code generated is visible to the I
+ // side of the CPU.
+
+ uintptr_t start = reinterpret_cast<uintptr_t>(address);
+ // Sizes will be used to generate a mask big enough to cover a pointer.
+ uintptr_t dsize = static_cast<uintptr_t>(CpuFeatures::dcache_line_size());
+ uintptr_t isize = static_cast<uintptr_t>(CpuFeatures::icache_line_size());
+ // Cache line sizes are always a power of 2.
+ ASSERT(CountSetBits(dsize, 64) == 1);
+ ASSERT(CountSetBits(isize, 64) == 1);
+ uintptr_t dstart = start & ~(dsize - 1);
+ uintptr_t istart = start & ~(isize - 1);
+ uintptr_t end = start + length;
+
+ __asm__ __volatile__ ( // NOLINT
+ // Clean every line of the D cache containing the target data.
+ "0: \n\t"
+ // dc : Data Cache maintenance
+ // c : Clean
+ // va : by (Virtual) Address
+ // u : to the point of Unification
+ // The point of unification for a processor is the point by which the
+ // instruction and data caches are guaranteed to see the same copy of a
+ // memory location. See ARM DDI 0406B page B2-12 for more information.
+ "dc cvau, %[dline] \n\t"
+ "add %[dline], %[dline], %[dsize] \n\t"
+ "cmp %[dline], %[end] \n\t"
+ "b.lt 0b \n\t"
+ // Barrier to make sure the effect of the code above is visible to the rest
+ // of the world.
+ // dsb : Data Synchronisation Barrier
+ // ish : Inner SHareable domain
+ // The point of unification for an Inner Shareable shareability domain is
+ // the point by which the instruction and data caches of all the processors
+ // in that Inner Shareable shareability domain are guaranteed to see the
+ // same copy of a memory location. See ARM DDI 0406B page B2-12 for more
+ // information.
+ "dsb ish \n\t"
+ // Invalidate every line of the I cache containing the target data.
+ "1: \n\t"
+ // ic : instruction cache maintenance
+ // i : invalidate
+ // va : by address
+ // u : to the point of unification
+ "ic ivau, %[iline] \n\t"
+ "add %[iline], %[iline], %[isize] \n\t"
+ "cmp %[iline], %[end] \n\t"
+ "b.lt 1b \n\t"
+ // Barrier to make sure the effect of the code above is visible to the rest
+ // of the world.
+ "dsb ish \n\t"
+ // Barrier to ensure any prefetching which happened before this code is
+ // discarded.
+ // isb : Instruction Synchronisation Barrier
+ "isb \n\t"
+ : [dline] "+r" (dstart),
+ [iline] "+r" (istart)
+ : [dsize] "r" (dsize),
+ [isize] "r" (isize),
+ [end] "r" (end)
+ // This code does not write to memory but without the dependency gcc might
+ // move this code before the code is generated.
+ : "cc", "memory"
+ ); // NOLINT
+#endif
+}
+
+
+void CpuFeatures::Probe() {
+ // Compute I and D cache line size. The cache type register holds
+ // information about the caches.
+ uint32_t cache_type_register = GetCacheType();
+
+ static const int kDCacheLineSizeShift = 16;
+ static const int kICacheLineSizeShift = 0;
+ static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
+ static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
+
+ // The cache type register holds the size of the I and D caches as a power of
+ // two.
+ uint32_t dcache_line_size_power_of_two =
+ (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
+ uint32_t icache_line_size_power_of_two =
+ (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
+
+ dcache_line_size_ = 1 << dcache_line_size_power_of_two;
+ icache_line_size_ = 1 << icache_line_size_power_of_two;
+
+ // AArch64 has no configuration options, no further probing is required.
+ supported_ = 0;
+
+#ifdef DEBUG
+ initialized_ = true;
+#endif
+}
+
+
+unsigned CpuFeatures::dcache_line_size() {
+ ASSERT(initialized_);
+ return dcache_line_size_;
+}
+
+
+unsigned CpuFeatures::icache_line_size() {
+ ASSERT(initialized_);
+ return icache_line_size_;
+}
+
+
+uint32_t CpuFeatures::GetCacheType() {
+#ifdef USE_SIMULATOR
+ // This will lead to a cache with 1 byte long lines, which is fine since the
+ // simulator will not need this information.
+ return 0;
+#else
+ uint32_t cache_type_register;
+ // Copy the content of the cache type register to a core register.
+ __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
+ : [ctr] "=r" (cache_type_register));
+ return cache_type_register;
+#endif
+}
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/cpu-a64.h b/deps/v8/src/a64/cpu-a64.h
new file mode 100644
index 0000000000..969312b3c4
--- /dev/null
+++ b/deps/v8/src/a64/cpu-a64.h
@@ -0,0 +1,107 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_CPU_A64_H_
+#define V8_A64_CPU_A64_H_
+
+#include <stdio.h>
+#include "serialize.h"
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
+
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a CpuFeatureScope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
+ // There are no optional features for A64.
+ return false;
+ };
+
+ static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
+ ASSERT(initialized_);
+ // There are no optional features for A64.
+ return false;
+ }
+
+ static bool IsSafeForSnapshot(CpuFeature f) {
+ return (IsSupported(f) &&
+ (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ }
+
+ // I and D cache line size in bytes.
+ static unsigned dcache_line_size();
+ static unsigned icache_line_size();
+
+ static unsigned supported_;
+
+ static bool VerifyCrossCompiling() {
+ // There are no optional features for A64.
+ ASSERT(cross_compile_ == 0);
+ return true;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ // There are no optional features for A64.
+ USE(f);
+ ASSERT(cross_compile_ == 0);
+ return true;
+ }
+
+ private:
+ // Return the content of the cache type register.
+ static uint32_t GetCacheType();
+
+ // I and D cache line size in bytes.
+ static unsigned icache_line_size_;
+ static unsigned dcache_line_size_;
+
+#ifdef DEBUG
+ static bool initialized_;
+#endif
+
+ // This isn't used (and is always 0), but it is required by V8.
+ static unsigned found_by_runtime_probing_only_;
+
+ static unsigned cross_compile_;
+
+ friend class PlatformFeatureScope;
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_CPU_A64_H_
diff --git a/deps/v8/src/a64/debug-a64.cc b/deps/v8/src/a64/debug-a64.cc
new file mode 100644
index 0000000000..d8711650c1
--- /dev/null
+++ b/deps/v8/src/a64/debug-a64.cc
@@ -0,0 +1,394 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "codegen.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ // Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
+ // the return from JS function sequence from
+ // mov sp, fp
+ // ldp fp, lr, [sp] #16
+ // lrd ip0, [pc, #(3 * kInstructionSize)]
+ // add sp, sp, ip0
+ // ret
+ // <number of paramters ...
+ // ... plus one (64 bits)>
+ // to a call to the debug break return code.
+ // ldr ip0, [pc, #(3 * kInstructionSize)]
+ // blr ip0
+ // hlt kHltBadCode @ code should not return, catch if it does.
+ // <debug break return code ...
+ // ... entry point address (64 bits)>
+
+ // The patching code must not overflow the space occupied by the return
+ // sequence.
+ STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5);
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
+ byte* entry =
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry();
+
+ // The first instruction of a patched return sequence must be a load literal
+ // loading the address of the debug break return code.
+ patcher.LoadLiteral(ip0, 3 * kInstructionSize);
+ // TODO(all): check the following is correct.
+ // The debug break return code will push a frame and call statically compiled
+ // code. By using blr, even though control will not return after the branch,
+ // this call site will be registered in the frame (lr being saved as the pc
+ // of the next instruction to execute for this frame). The debugger can now
+ // iterate on the frames to find call to debug break return code.
+ patcher.blr(ip0);
+ patcher.hlt(kHltBadCode);
+ patcher.dc64(reinterpret_cast<int64_t>(entry));
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ // Reset the code emitted by EmitReturnSequence to its original state.
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kJSRetSequenceInstructions);
+}
+
+
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ return rinfo->IsPatchedReturnSequence();
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ // Patch the code emitted by Debug::GenerateSlots, changing the debug break
+ // slot code from
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // to a call to the debug slot code.
+ // ldr ip0, [pc, #(2 * kInstructionSize)]
+ // blr ip0
+ // <debug break slot code ...
+ // ... entry point address (64 bits)>
+
+ // TODO(all): consider adding a hlt instruction after the blr as we don't
+ // expect control to return here. This implies increasing
+ // kDebugBreakSlotInstructions to 5 instructions.
+
+ // The patching code must not overflow the space occupied by the return
+ // sequence.
+ STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
+ byte* entry =
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry();
+
+ // The first instruction of a patched debug break slot must be a load literal
+ // loading the address of the debug break slot code.
+ patcher.LoadLiteral(ip0, 2 * kInstructionSize);
+ // TODO(all): check the following is correct.
+ // The debug break slot code will push a frame and call statically compiled
+ // code. By using blr, event hough control will not return after the branch,
+ // this call site will be registered in the frame (lr being saved as the pc
+ // of the next instruction to execute for this frame). The debugger can now
+ // iterate on the frames to find call to debug break slot code.
+ patcher.blr(ip0);
+ patcher.dc64(reinterpret_cast<int64_t>(entry));
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kDebugBreakSlotInstructions);
+}
+
+const bool Debug::FramePaddingLayout::kIsSupported = false;
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList object_regs,
+ RegList non_object_regs,
+ Register scratch) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Any live values (object_regs and non_object_regs) in caller-saved
+ // registers (or lr) need to be stored on the stack so that their values are
+ // safely preserved for a call into C code.
+ //
+ // Also:
+ // * object_regs may be modified during the C code by the garbage
+ // collector. Every object register must be a valid tagged pointer or
+ // SMI.
+ //
+ // * non_object_regs will be converted to SMIs so that the garbage
+ // collector doesn't try to interpret them as pointers.
+ //
+ // TODO(jbramley): Why can't this handle callee-saved registers?
+ ASSERT((~kCallerSaved.list() & object_regs) == 0);
+ ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ ASSERT((scratch.Bit() & object_regs) == 0);
+ ASSERT((scratch.Bit() & non_object_regs) == 0);
+ ASSERT((ip0.Bit() & (object_regs | non_object_regs)) == 0);
+ ASSERT((ip1.Bit() & (object_regs | non_object_regs)) == 0);
+ STATIC_ASSERT(kSmiValueSize == 32);
+
+ CPURegList non_object_list =
+ CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs);
+ while (!non_object_list.IsEmpty()) {
+ // Store each non-object register as two SMIs.
+ Register reg = Register(non_object_list.PopLowestIndex());
+ __ Push(reg);
+ __ Poke(wzr, 0);
+ __ Push(reg.W(), wzr);
+ // Stack:
+ // jssp[12]: reg[63:32]
+ // jssp[8]: 0x00000000 (SMI tag & padding)
+ // jssp[4]: reg[31:0]
+ // jssp[0]: 0x00000000 (SMI tag & padding)
+ STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
+ }
+
+ if (object_regs != 0) {
+ __ PushXRegList(object_regs);
+ }
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ Mov(x0, 0); // No arguments.
+ __ Mov(x1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+
+ // Restore the register values from the expression stack.
+ if (object_regs != 0) {
+ __ PopXRegList(object_regs);
+ }
+
+ non_object_list =
+ CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs);
+ while (!non_object_list.IsEmpty()) {
+ // Load each non-object register from two SMIs.
+ // Stack:
+ // jssp[12]: reg[63:32]
+ // jssp[8]: 0x00000000 (SMI tag & padding)
+ // jssp[4]: reg[31:0]
+ // jssp[0]: 0x00000000 (SMI tag & padding)
+ Register reg = Register(non_object_list.PopHighestIndex());
+ __ Pop(scratch, reg);
+ __ Bfxil(reg, scratch, 32, 32);
+ }
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target(Debug_Address::AfterBreakTarget(),
+ masm->isolate());
+ __ Mov(scratch, Operand(after_break_target));
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Br(scratch);
+}
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC load (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -- [sp] : receiver
+ // -----------------------------------
+ // Registers x0 and x2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC store (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+ // Registers x0, x1, and x2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC call (from ic-arm.cc)
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ // In places other than IC call sites it is expected that r0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-a64.cc).
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-a64.cc).
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -- x2 : feedback array
+ // -- x3 : slot in feedback array
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit() | x2.Bit() | x3.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-a64.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments (not smi)
+ // -- x1 : constructor function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
+}
+
+
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-a64.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments (not smi)
+ // -- x1 : constructor function
+ // -- x2 : feedback array
+ // -- x3 : feedback slot (smi)
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(
+ masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
+}
+
+
+void Debug::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the constant pool in the debug break slot code.
+ InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
+
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(Assembler::DEBUG_BREAK_NOP);
+ }
+}
+
+
+void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, 0, x10);
+}
+
+
+void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
+}
+
+
+void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
+}
+
+const bool Debug::kFrameDropperSupported = false;
+
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/debugger-a64.cc b/deps/v8/src/a64/debugger-a64.cc
new file mode 100644
index 0000000000..5bccc39776
--- /dev/null
+++ b/deps/v8/src/a64/debugger-a64.cc
@@ -0,0 +1,111 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#if V8_TARGET_ARCH_A64
+
+#if defined(USE_SIMULATOR)
+
+#include "a64/debugger-a64.h"
+
+namespace v8 {
+namespace internal {
+
+
+void Debugger::VisitException(Instruction* instr) {
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: {
+ if (instr->ImmException() == kImmExceptionIsDebug) {
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t code;
+ uint32_t parameters;
+ char const * message;
+
+ ASSERT(sizeof(*pc_) == 1);
+ memcpy(&code, pc_ + kDebugCodeOffset, sizeof(code));
+ memcpy(&parameters, pc_ + kDebugParamsOffset, sizeof(parameters));
+ message = reinterpret_cast<char const *>(pc_ + kDebugMessageOffset);
+
+ if (message[0] == '\0') {
+ fprintf(stream_, "Debugger hit %" PRIu32 ".\n", code);
+ } else {
+ fprintf(stream_, "Debugger hit %" PRIu32 ": %s\n", code, message);
+ }
+
+ // Other options.
+ switch (parameters & kDebuggerTracingDirectivesMask) {
+ case TRACE_ENABLE:
+ set_log_parameters(log_parameters() | parameters);
+ break;
+ case TRACE_DISABLE:
+ set_log_parameters(log_parameters() & ~parameters);
+ break;
+ case TRACE_OVERRIDE:
+ set_log_parameters(parameters);
+ break;
+ default:
+ // We don't support a one-shot LOG_DISASM.
+ ASSERT((parameters & LOG_DISASM) == 0);
+ // Don't print information that is already being traced.
+ parameters &= ~log_parameters();
+ // Print the requested information.
+ if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
+ if (parameters & LOG_REGS) PrintRegisters(true);
+ if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
+ }
+
+ // Check if the debugger should break.
+ if (parameters & BREAK) OS::DebugBreak();
+
+ // The stop parameters are inlined in the code. Skip them:
+ // - Skip to the end of the message string.
+ pc_ += kDebugMessageOffset + strlen(message) + 1;
+ // - Advance to the next aligned location.
+ pc_ = AlignUp(pc_, kInstructionSize);
+ // - Verify that the unreachable marker is present.
+ ASSERT(reinterpret_cast<Instruction*>(pc_)->Mask(ExceptionMask) == HLT);
+ ASSERT(reinterpret_cast<Instruction*>(pc_)->ImmException() ==
+ kImmExceptionIsUnreachable);
+ // - Skip past the unreachable marker.
+ pc_ += kInstructionSize;
+ pc_modified_ = true;
+ } else {
+ Simulator::VisitException(instr);
+ }
+ break;
+ }
+
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // USE_SIMULATOR
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/debugger-a64.h b/deps/v8/src/a64/debugger-a64.h
new file mode 100644
index 0000000000..1317b5f37d
--- /dev/null
+++ b/deps/v8/src/a64/debugger-a64.h
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_DEBUGGER_A64_H_
+#define V8_A64_DEBUGGER_A64_H_
+
+#if defined(USE_SIMULATOR)
+
+#include "globals.h"
+#include "utils.h"
+#include "a64/constants-a64.h"
+#include "a64/simulator-a64.h"
+
+namespace v8 {
+namespace internal {
+
+
+class Debugger : public Simulator {
+ public:
+ Debugger(Decoder* decoder, FILE* stream = stderr)
+ : Simulator(decoder, NULL, stream) {}
+
+ // Functions overloading.
+ void VisitException(Instruction* instr);
+};
+
+
+} } // namespace v8::internal
+
+#endif // USE_SIMULATOR
+
+#endif // V8_A64_DEBUGGER_A64_H_
diff --git a/deps/v8/src/a64/decoder-a64.cc b/deps/v8/src/a64/decoder-a64.cc
new file mode 100644
index 0000000000..e7383d446a
--- /dev/null
+++ b/deps/v8/src/a64/decoder-a64.cc
@@ -0,0 +1,726 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "globals.h"
+#include "utils.h"
+#include "a64/decoder-a64.h"
+
+
+namespace v8 {
+namespace internal {
+
+// Top-level instruction decode function.
+void Decoder::Decode(Instruction *instr) {
+ if (instr->Bits(28, 27) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(27, 24)) {
+ // 0: PC relative addressing.
+ case 0x0: DecodePCRelAddressing(instr); break;
+
+ // 1: Add/sub immediate.
+ case 0x1: DecodeAddSubImmediate(instr); break;
+
+ // A: Logical shifted register.
+ // Add/sub with carry.
+ // Conditional compare register.
+ // Conditional compare immediate.
+ // Conditional select.
+ // Data processing 1 source.
+ // Data processing 2 source.
+ // B: Add/sub shifted register.
+ // Add/sub extended register.
+ // Data processing 3 source.
+ case 0xA:
+ case 0xB: DecodeDataProcessing(instr); break;
+
+ // 2: Logical immediate.
+ // Move wide immediate.
+ case 0x2: DecodeLogical(instr); break;
+
+ // 3: Bitfield.
+ // Extract.
+ case 0x3: DecodeBitfieldExtract(instr); break;
+
+ // 4: Unconditional branch immediate.
+ // Exception generation.
+ // Compare and branch immediate.
+ // 5: Compare and branch immediate.
+ // Conditional branch.
+ // System.
+ // 6,7: Unconditional branch.
+ // Test and branch immediate.
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7: DecodeBranchSystemException(instr); break;
+
+ // 8,9: Load/store register pair post-index.
+ // Load register literal.
+ // Load/store register unscaled immediate.
+ // Load/store register immediate post-index.
+ // Load/store register immediate pre-index.
+ // Load/store register offset.
+ // C,D: Load/store register pair offset.
+ // Load/store register pair pre-index.
+ // Load/store register unsigned immediate.
+ // Advanced SIMD.
+ case 0x8:
+ case 0x9:
+ case 0xC:
+ case 0xD: DecodeLoadStore(instr); break;
+
+ // E: FP fixed point conversion.
+ // FP integer conversion.
+ // FP data processing 1 source.
+ // FP compare.
+ // FP immediate.
+ // FP data processing 2 source.
+ // FP conditional compare.
+ // FP conditional select.
+ // Advanced SIMD.
+ // F: FP data processing 3 source.
+ // Advanced SIMD.
+ case 0xE:
+ case 0xF: DecodeFP(instr); break;
+ }
+ }
+}
+
+
+void Decoder::AppendVisitor(DecoderVisitor* new_visitor) {
+ visitors_.remove(new_visitor);
+ visitors_.push_front(new_visitor);
+}
+
+
+void Decoder::PrependVisitor(DecoderVisitor* new_visitor) {
+ visitors_.remove(new_visitor);
+ visitors_.push_back(new_visitor);
+}
+
+
+void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor) {
+ visitors_.remove(new_visitor);
+ std::list<DecoderVisitor*>::iterator it;
+ for (it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list. The last element must be
+ // registered_visitor.
+ ASSERT(*it == registered_visitor);
+ visitors_.insert(it, new_visitor);
+}
+
+
+void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor) {
+ visitors_.remove(new_visitor);
+ std::list<DecoderVisitor*>::iterator it;
+ for (it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ it++;
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list. The last element must be
+ // registered_visitor.
+ ASSERT(*it == registered_visitor);
+ visitors_.push_back(new_visitor);
+}
+
+
+void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
+ visitors_.remove(visitor);
+}
+
+
+void Decoder::DecodePCRelAddressing(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x0);
+ // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
+ // decode.
+ ASSERT(instr->Bit(28) == 0x1);
+ VisitPCRelAddressing(instr);
+}
+
+
+void Decoder::DecodeBranchSystemException(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0x4) ||
+ (instr->Bits(27, 24) == 0x5) ||
+ (instr->Bits(27, 24) == 0x6) ||
+ (instr->Bits(27, 24) == 0x7) );
+
+ switch (instr->Bits(31, 29)) {
+ case 0:
+ case 4: {
+ VisitUnconditionalBranch(instr);
+ break;
+ }
+ case 1:
+ case 5: {
+ if (instr->Bit(25) == 0) {
+ VisitCompareBranch(instr);
+ } else {
+ VisitTestBranch(instr);
+ }
+ break;
+ }
+ case 2: {
+ if (instr->Bit(25) == 0) {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Mask(0x01000010) == 0x00000010)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitConditionalBranch(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(25) == 0) {
+ if (instr->Bit(24) == 0) {
+ if ((instr->Bits(4, 2) != 0) ||
+ (instr->Mask(0x00E0001D) == 0x00200001) ||
+ (instr->Mask(0x00E0001D) == 0x00400001) ||
+ (instr->Mask(0x00E0001E) == 0x00200002) ||
+ (instr->Mask(0x00E0001E) == 0x00400002) ||
+ (instr->Mask(0x00E0001C) == 0x00600000) ||
+ (instr->Mask(0x00E0001C) == 0x00800000) ||
+ (instr->Mask(0x00E0001F) == 0x00A00000) ||
+ (instr->Mask(0x00C0001C) == 0x00C00000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitException(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
+ if ((instr->Bits(21, 19) == 0x4) ||
+ (masked_003FF0E0 == 0x00033000) ||
+ (masked_003FF0E0 == 0x003FF020) ||
+ (masked_003FF0E0 == 0x003FF060) ||
+ (masked_003FF0E0 == 0x003FF0E0) ||
+ (instr->Mask(0x00388000) == 0x00008000) ||
+ (instr->Mask(0x0038E000) == 0x00000000) ||
+ (instr->Mask(0x0039E000) == 0x00002000) ||
+ (instr->Mask(0x003AE000) == 0x00002000) ||
+ (instr->Mask(0x003CE000) == 0x00042000) ||
+ (instr->Mask(0x003FFFC0) == 0x000320C0) ||
+ (instr->Mask(0x003FF100) == 0x00032100) ||
+ (instr->Mask(0x003FF200) == 0x00032200) ||
+ (instr->Mask(0x003FF400) == 0x00032400) ||
+ (instr->Mask(0x003FF800) == 0x00032800) ||
+ (instr->Mask(0x0038F000) == 0x00005000) ||
+ (instr->Mask(0x0038E000) == 0x00006000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitSystem(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Bits(20, 16) != 0x1F) ||
+ (instr->Bits(15, 10) != 0) ||
+ (instr->Bits(4, 0) != 0) ||
+ (instr->Bits(24, 21) == 0x3) ||
+ (instr->Bits(24, 22) == 0x3)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitUnconditionalBranchToRegister(instr);
+ }
+ }
+ break;
+ }
+ case 3:
+ case 7: {
+ VisitUnallocated(instr);
+ break;
+ }
+ }
+}
+
+
+void Decoder::DecodeLoadStore(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0x8) ||
+ (instr->Bits(27, 24) == 0x9) ||
+ (instr->Bits(27, 24) == 0xC) ||
+ (instr->Bits(27, 24) == 0xD) );
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(26) == 0) {
+ // TODO(all): VisitLoadStoreExclusive.
+ VisitUnimplemented(instr);
+ } else {
+ DecodeAdvSIMDLoadStore(instr);
+ }
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Mask(0xC4400000) == 0xC0400000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePairNonTemporal(instr);
+ }
+ } else {
+ VisitLoadStorePairPostIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ if (instr->Mask(0xC4000000) == 0xC4000000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadLiteral(instr);
+ }
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(21) == 0) {
+ switch (instr->Bits(11, 10)) {
+ case 0: {
+ VisitLoadStoreUnscaledOffset(instr);
+ break;
+ }
+ case 1: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePostIndex(instr);
+ }
+ break;
+ }
+ case 2: {
+ // TODO(all): VisitLoadStoreRegisterOffsetUnpriv.
+ VisitUnimplemented(instr);
+ break;
+ }
+ case 3: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePreIndex(instr);
+ }
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(11, 10) == 0x2) {
+ if (instr->Bit(14) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStoreRegisterOffset(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitLoadStorePairOffset(instr);
+ } else {
+ VisitLoadStorePairPreIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStoreUnsignedOffset(instr);
+ }
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeLogical(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x2);
+
+ if (instr->Mask(0x80400000) == 0x00400000) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitLogicalImmediate(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x1) {
+ VisitUnallocated(instr);
+ } else {
+ VisitMoveWideImmediate(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeBitfieldExtract(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x3);
+
+ if ((instr->Mask(0x80400000) == 0x80000000) ||
+ (instr->Mask(0x80400000) == 0x00400000) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ VisitUnallocated(instr);
+ } else if (instr->Bit(23) == 0) {
+ if ((instr->Mask(0x80200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) == 0x60000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitBitfield(instr);
+ }
+ } else {
+ if ((instr->Mask(0x60200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitExtract(instr);
+ }
+ }
+}
+
+
+void Decoder::DecodeAddSubImmediate(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x1);
+ if (instr->Bit(23) == 1) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubImmediate(instr);
+ }
+}
+
+
+void Decoder::DecodeDataProcessing(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0xA) ||
+ (instr->Bits(27, 24) == 0xB) );
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Mask(0x80008000) == 0x00008000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLogicalShifted(instr);
+ }
+ } else {
+ switch (instr->Bits(23, 21)) {
+ case 0: {
+ if (instr->Mask(0x0000FC00) != 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubWithCarry(instr);
+ }
+ break;
+ }
+ case 2: {
+ if ((instr->Bit(29) == 0) ||
+ (instr->Mask(0x00000410) != 0)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(11) == 0) {
+ VisitConditionalCompareRegister(instr);
+ } else {
+ VisitConditionalCompareImmediate(instr);
+ }
+ }
+ break;
+ }
+ case 4: {
+ if (instr->Mask(0x20000800) != 0x00000000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitConditionalSelect(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(29) == 0x1) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(30) == 0) {
+ if ((instr->Bit(15) == 0x1) ||
+ (instr->Bits(15, 11) == 0) ||
+ (instr->Bits(15, 12) == 0x1) ||
+ (instr->Bits(15, 12) == 0x3) ||
+ (instr->Bits(15, 13) == 0x3) ||
+ (instr->Mask(0x8000EC00) == 0x00004C00) ||
+ (instr->Mask(0x8000E800) == 0x80004000) ||
+ (instr->Mask(0x8000E400) == 0x80004000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing2Source(instr);
+ }
+ } else {
+ if ((instr->Bit(13) == 1) ||
+ (instr->Bits(20, 16) != 0) ||
+ (instr->Bits(15, 14) != 0) ||
+ (instr->Mask(0xA01FFC00) == 0x00000C00) ||
+ (instr->Mask(0x201FF800) == 0x00001800)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing1Source(instr);
+ }
+ }
+ break;
+ }
+ }
+ case 1:
+ case 3:
+ case 5:
+ case 7: VisitUnallocated(instr); break;
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubShifted(instr);
+ }
+ } else {
+ if ((instr->Mask(0x00C00000) != 0x00000000) ||
+ (instr->Mask(0x00001400) == 0x00001400) ||
+ (instr->Mask(0x00001800) == 0x00001800)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubExtended(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(30) == 0x1) ||
+ (instr->Bits(30, 29) == 0x1) ||
+ (instr->Mask(0xE0600000) == 0x00200000) ||
+ (instr->Mask(0xE0608000) == 0x00400000) ||
+ (instr->Mask(0x60608000) == 0x00408000) ||
+ (instr->Mask(0x60E00000) == 0x00E00000) ||
+ (instr->Mask(0x60E00000) == 0x00800000) ||
+ (instr->Mask(0x60E00000) == 0x00600000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing3Source(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeFP(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0xE) ||
+ (instr->Bits(27, 24) == 0xF) );
+
+ if (instr->Bit(28) == 0) {
+ DecodeAdvSIMDDataProcessing(instr);
+ } else {
+ if (instr->Bit(29) == 1) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bits(31, 30) == 0x3) {
+ VisitUnallocated(instr);
+ } else if (instr->Bits(31, 30) == 0x1) {
+ DecodeAdvSIMDDataProcessing(instr);
+ } else {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bit(23) == 1) ||
+ (instr->Bit(18) == 1) ||
+ (instr->Mask(0x80008000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x00160000) == 0x00000000) ||
+ (instr->Mask(0x00160000) == 0x00120000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPFixedPointConvert(instr);
+ }
+ } else {
+ if (instr->Bits(15, 10) == 32) {
+ VisitUnallocated(instr);
+ } else if (instr->Bits(15, 10) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x000E0000) == 0x000C0000) ||
+ (instr->Mask(0x00160000) == 0x00120000) ||
+ (instr->Mask(0x00160000) == 0x00140000) ||
+ (instr->Mask(0x20C40000) == 0x00800000) ||
+ (instr->Mask(0x20C60000) == 0x00840000) ||
+ (instr->Mask(0xA0C60000) == 0x80060000) ||
+ (instr->Mask(0xA0C60000) == 0x00860000) ||
+ (instr->Mask(0xA0C60000) == 0x00460000) ||
+ (instr->Mask(0xA0CE0000) == 0x80860000) ||
+ (instr->Mask(0xA0CE0000) == 0x804E0000) ||
+ (instr->Mask(0xA0CE0000) == 0x000E0000) ||
+ (instr->Mask(0xA0D60000) == 0x00160000) ||
+ (instr->Mask(0xA0D60000) == 0x80560000) ||
+ (instr->Mask(0xA0D60000) == 0x80960000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPIntegerConvert(instr);
+ }
+ } else if (instr->Bits(14, 10) == 16) {
+ const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
+ if ((instr->Mask(0x80180000) != 0) ||
+ (masked_A0DF8000 == 0x00020000) ||
+ (masked_A0DF8000 == 0x00030000) ||
+ (masked_A0DF8000 == 0x00068000) ||
+ (masked_A0DF8000 == 0x00428000) ||
+ (masked_A0DF8000 == 0x00430000) ||
+ (masked_A0DF8000 == 0x00468000) ||
+ (instr->Mask(0xA0D80000) == 0x00800000) ||
+ (instr->Mask(0xA0DE0000) == 0x00C00000) ||
+ (instr->Mask(0xA0DF0000) == 0x00C30000) ||
+ (instr->Mask(0xA0DC0000) == 0x00C40000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing1Source(instr);
+ }
+ } else if (instr->Bits(13, 10) == 8) {
+ if ((instr->Bits(15, 14) != 0) ||
+ (instr->Bits(2, 0) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPCompare(instr);
+ }
+ } else if (instr->Bits(12, 10) == 4) {
+ if ((instr->Bits(9, 5) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPImmediate(instr);
+ }
+ } else {
+ if (instr->Mask(0x80800000) != 0x00000000) {
+ VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 1: {
+ VisitFPConditionalCompare(instr);
+ break;
+ }
+ case 2: {
+ if ((instr->Bits(15, 14) == 0x3) ||
+ (instr->Mask(0x00009000) == 0x00009000) ||
+ (instr->Mask(0x0000A000) == 0x0000A000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing2Source(instr);
+ }
+ break;
+ }
+ case 3: {
+ VisitFPConditionalSelect(instr);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ }
+ }
+ }
+ } else {
+ // Bit 30 == 1 has been handled earlier.
+ ASSERT(instr->Bit(30) == 0);
+ if (instr->Mask(0xA0800000) != 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing3Source(instr);
+ }
+ }
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeAdvSIMDLoadStore(Instruction* instr) {
+ // TODO(all): Implement Advanced SIMD load/store instruction decode.
+ ASSERT(instr->Bits(29, 25) == 0x6);
+ VisitUnimplemented(instr);
+}
+
+
+void Decoder::DecodeAdvSIMDDataProcessing(Instruction* instr) {
+ // TODO(all): Implement Advanced SIMD data processing instruction decode.
+ ASSERT(instr->Bits(27, 25) == 0x7);
+ VisitUnimplemented(instr);
+}
+
+
+#define DEFINE_VISITOR_CALLERS(A) \
+ void Decoder::Visit##A(Instruction *instr) { \
+ if (!(instr->Mask(A##FMask) == A##Fixed)) { \
+ ASSERT(instr->Mask(A##FMask) == A##Fixed); \
+ } \
+ std::list<DecoderVisitor*>::iterator it; \
+ for (it = visitors_.begin(); it != visitors_.end(); it++) { \
+ (*it)->Visit##A(instr); \
+ } \
+ }
+VISITOR_LIST(DEFINE_VISITOR_CALLERS)
+#undef DEFINE_VISITOR_CALLERS
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/decoder-a64.h b/deps/v8/src/a64/decoder-a64.h
new file mode 100644
index 0000000000..0f53c34e88
--- /dev/null
+++ b/deps/v8/src/a64/decoder-a64.h
@@ -0,0 +1,202 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_DECODER_A64_H_
+#define V8_A64_DECODER_A64_H_
+
+#include <list>
+
+#include "globals.h"
+#include "a64/instructions-a64.h"
+
+namespace v8 {
+namespace internal {
+
+
+// List macro containing all visitors needed by the decoder class.
+
+#define VISITOR_LIST(V) \
+ V(PCRelAddressing) \
+ V(AddSubImmediate) \
+ V(LogicalImmediate) \
+ V(MoveWideImmediate) \
+ V(Bitfield) \
+ V(Extract) \
+ V(UnconditionalBranch) \
+ V(UnconditionalBranchToRegister) \
+ V(CompareBranch) \
+ V(TestBranch) \
+ V(ConditionalBranch) \
+ V(System) \
+ V(Exception) \
+ V(LoadStorePairPostIndex) \
+ V(LoadStorePairOffset) \
+ V(LoadStorePairPreIndex) \
+ V(LoadStorePairNonTemporal) \
+ V(LoadLiteral) \
+ V(LoadStoreUnscaledOffset) \
+ V(LoadStorePostIndex) \
+ V(LoadStorePreIndex) \
+ V(LoadStoreRegisterOffset) \
+ V(LoadStoreUnsignedOffset) \
+ V(LogicalShifted) \
+ V(AddSubShifted) \
+ V(AddSubExtended) \
+ V(AddSubWithCarry) \
+ V(ConditionalCompareRegister) \
+ V(ConditionalCompareImmediate) \
+ V(ConditionalSelect) \
+ V(DataProcessing1Source) \
+ V(DataProcessing2Source) \
+ V(DataProcessing3Source) \
+ V(FPCompare) \
+ V(FPConditionalCompare) \
+ V(FPConditionalSelect) \
+ V(FPImmediate) \
+ V(FPDataProcessing1Source) \
+ V(FPDataProcessing2Source) \
+ V(FPDataProcessing3Source) \
+ V(FPIntegerConvert) \
+ V(FPFixedPointConvert) \
+ V(Unallocated) \
+ V(Unimplemented)
+
+// The Visitor interface. Disassembler and simulator (and other tools)
+// must provide implementations for all of these functions.
+class DecoderVisitor {
+ public:
+ #define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ virtual ~DecoderVisitor() {}
+
+ private:
+ // Visitors are registered in a list.
+ std::list<DecoderVisitor*> visitors_;
+
+ friend class Decoder;
+};
+
+
+class Decoder: public DecoderVisitor {
+ public:
+ explicit Decoder() {}
+
+ // Top-level instruction decoder function. Decodes an instruction and calls
+ // the visitor functions registered with the Decoder class.
+ void Decode(Instruction *instr);
+
+ // Register a new visitor class with the decoder.
+ // Decode() will call the corresponding visitor method from all registered
+ // visitor classes when decoding reaches the leaf node of the instruction
+ // decode tree.
+ // Visitors are called in the order.
+ // A visitor can only be registered once.
+ // Registering an already registered visitor will update its position.
+ //
+ // d.AppendVisitor(V1);
+ // d.AppendVisitor(V2);
+ // d.PrependVisitor(V2); // Move V2 at the start of the list.
+ // d.InsertVisitorBefore(V3, V2);
+ // d.AppendVisitor(V4);
+ // d.AppendVisitor(V4); // No effect.
+ //
+ // d.Decode(i);
+ //
+ // will call in order visitor methods in V3, V2, V1, V4.
+ void AppendVisitor(DecoderVisitor* visitor);
+ void PrependVisitor(DecoderVisitor* visitor);
+ void InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+ void InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+
+ // Remove a previously registered visitor class from the list of visitors
+ // stored by the decoder.
+ void RemoveVisitor(DecoderVisitor* visitor);
+
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ // Decode the PC relative addressing instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x0.
+ void DecodePCRelAddressing(Instruction* instr);
+
+ // Decode the add/subtract immediate instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x1.
+ void DecodeAddSubImmediate(Instruction* instr);
+
+ // Decode the branch, system command, and exception generation parts of
+ // the instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
+ void DecodeBranchSystemException(Instruction* instr);
+
+ // Decode the load and store parts of the instruction tree, and call
+ // the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
+ void DecodeLoadStore(Instruction* instr);
+
+ // Decode the logical immediate and move wide immediate parts of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x2.
+ void DecodeLogical(Instruction* instr);
+
+ // Decode the bitfield and extraction parts of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x3.
+ void DecodeBitfieldExtract(Instruction* instr);
+
+ // Decode the data processing parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
+ void DecodeDataProcessing(Instruction* instr);
+
+ // Decode the floating point parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0xE, 0xF}.
+ void DecodeFP(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 29:25 = 0x6.
+ void DecodeAdvSIMDLoadStore(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) data processing part of the instruction
+ // tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:25 = 0x7.
+ void DecodeAdvSIMDDataProcessing(Instruction* instr);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_A64_DECODER_A64_H_
diff --git a/deps/v8/src/a64/deoptimizer-a64.cc b/deps/v8/src/a64/deoptimizer-a64.cc
new file mode 100644
index 0000000000..660feb2394
--- /dev/null
+++ b/deps/v8/src/a64/deoptimizer-a64.cc
@@ -0,0 +1,376 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::patch_size() {
+ // Size of the code used to patch lazy bailout points.
+ // Patching is done by Deoptimizer::DeoptimizeFunction.
+ return 4 * kInstructionSize;
+}
+
+
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ Address code_start_address = code->instruction_start();
+#ifdef DEBUG
+ Address prev_call_address = NULL;
+#endif
+
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+
+ PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
+ patcher.LoadLiteral(ip0, 2 * kInstructionSize);
+ patcher.blr(ip0);
+ patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
+
+ ASSERT((prev_call_address == NULL) ||
+ (call_address >= prev_call_address + patch_size()));
+ ASSERT(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+ prev_call_address = call_address;
+#endif
+ }
+}
+
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers fp and sp are set to the correct values though.
+ for (int i = 0; i < Register::NumRegisters(); i++) {
+ input_->SetRegister(i, 0);
+ }
+
+ // TODO(all): Do we also need to set a value to csp?
+ input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+ }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on A64 in the input frame.
+ return false;
+}
+
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ ApiFunction function(descriptor->deoptimization_handler_);
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->GetHandlerParameterCount();
+ output_frame->SetRegister(x0.code(), params);
+ output_frame->SetRegister(x1.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
+}
+
+
+#define __ masm()->
+
+void Deoptimizer::EntryGenerator::Generate() {
+ GeneratePrologue();
+
+ // TODO(all): This code needs to be revisited. We probably only need to save
+ // caller-saved registers here. Callee-saved registers can be stored directly
+ // in the input frame.
+
+ // Save all allocatable floating point registers.
+ CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSize,
+ 0, FPRegister::NumAllocatableRegisters() - 1);
+ __ PushCPURegList(saved_fp_registers);
+
+ // We save all the registers expcept jssp, sp and lr.
+ CPURegList saved_registers(CPURegister::kRegister, kXRegSize, 0, 27);
+ saved_registers.Combine(fp);
+ __ PushCPURegList(saved_registers);
+
+ const int kSavedRegistersAreaSize =
+ (saved_registers.Count() * kXRegSizeInBytes) +
+ (saved_fp_registers.Count() * kDRegSizeInBytes);
+
+ // Floating point registers are saved on the stack above core registers.
+ const int kFPRegistersOffset = saved_registers.Count() * kXRegSizeInBytes;
+
+ // Get the bailout id from the stack.
+ Register bailout_id = x2;
+ __ Peek(bailout_id, kSavedRegistersAreaSize);
+
+ Register code_object = x3;
+ Register fp_to_sp = x4;
+ // Get the address of the location in the code object. This is the return
+ // address for lazy deoptimization.
+ __ Mov(code_object, lr);
+ // Compute the fp-to-sp delta, and correct one word for bailout id.
+ __ Add(fp_to_sp, masm()->StackPointer(),
+ kSavedRegistersAreaSize + (1 * kPointerSize));
+ __ Sub(fp_to_sp, fp, fp_to_sp);
+
+ // Allocate a new deoptimizer object.
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Mov(x1, type());
+ // Following arguments are already loaded:
+ // - x2: bailout id
+ // - x3: code object address
+ // - x4: fp-to-sp delta
+ __ Mov(x5, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ // Call Deoptimizer::New().
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+ }
+
+ // Preserve "deoptimizer" object in register x0.
+ Register deoptimizer = x0;
+
+ // Get the input frame descriptor pointer.
+ __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
+
+ // Copy core registers into the input frame.
+ CPURegList copy_to_input = saved_registers;
+ for (int i = 0; i < saved_registers.Count(); i++) {
+ // TODO(all): Look for opportunities to optimize this by using ldp/stp.
+ __ Peek(x2, i * kPointerSize);
+ CPURegister current_reg = copy_to_input.PopLowestIndex();
+ int offset = (current_reg.code() * kPointerSize) +
+ FrameDescription::registers_offset();
+ __ Str(x2, MemOperand(x1, offset));
+ }
+
+ // Copy FP registers to the input frame.
+ for (int i = 0; i < saved_fp_registers.Count(); i++) {
+ // TODO(all): Look for opportunities to optimize this by using ldp/stp.
+ int dst_offset = FrameDescription::double_registers_offset() +
+ (i * kDoubleSize);
+ int src_offset = kFPRegistersOffset + (i * kDoubleSize);
+ __ Peek(x2, src_offset);
+ __ Str(x2, MemOperand(x1, dst_offset));
+ }
+
+ // Remove the bailout id and the saved registers from the stack.
+ __ Drop(1 + (kSavedRegistersAreaSize / kXRegSizeInBytes));
+
+ // Compute a pointer to the unwinding limit in register x2; that is
+ // the first stack slot not part of the input frame.
+ Register unwind_limit = x2;
+ __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
+ __ Add(unwind_limit, unwind_limit, __ StackPointer());
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Add(x3, x1, FrameDescription::frame_content_offset());
+ Label pop_loop;
+ Label pop_loop_header;
+ __ B(&pop_loop_header);
+ __ Bind(&pop_loop);
+ __ Pop(x4);
+ __ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
+ __ Bind(&pop_loop_header);
+ __ Cmp(unwind_limit, __ StackPointer());
+ __ B(ne, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ Push(x0); // Preserve deoptimizer object across call.
+
+ {
+ // Call Deoptimizer::ComputeOutputFrames().
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate()), 1);
+ }
+ __ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
+ __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
+ __ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
+ __ B(&outer_loop_header);
+
+ __ Bind(&outer_push_loop);
+ Register current_frame = x2;
+ __ Ldr(current_frame, MemOperand(x0, 0));
+ __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
+ __ B(&inner_loop_header);
+
+ __ Bind(&inner_push_loop);
+ __ Sub(x3, x3, kPointerSize);
+ __ Add(x6, current_frame, x3);
+ __ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
+ __ Push(x7);
+ __ Bind(&inner_loop_header);
+ __ Cbnz(x3, &inner_push_loop);
+
+ __ Add(x0, x0, kPointerSize);
+ __ Bind(&outer_loop_header);
+ __ Cmp(x0, x1);
+ __ B(lt, &outer_push_loop);
+
+ __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
+ ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
+ !saved_fp_registers.IncludesAliasOf(fp_zero) &&
+ !saved_fp_registers.IncludesAliasOf(fp_scratch));
+ int src_offset = FrameDescription::double_registers_offset();
+ while (!saved_fp_registers.IsEmpty()) {
+ const CPURegister reg = saved_fp_registers.PopLowestIndex();
+ __ Ldr(reg, MemOperand(x1, src_offset));
+ src_offset += kDoubleSize;
+ }
+
+ // Push state from the last output frame.
+ __ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
+ __ Push(x6);
+
+ // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
+ // stack, then pops it all into registers. Here, we try to load it directly
+ // into the relevant registers. Is this correct? If so, we should improve the
+ // ARM code.
+
+ // TODO(all): This code needs to be revisited, We probably don't need to
+ // restore all the registers as fullcodegen does not keep live values in
+ // registers (note that at least fp must be restored though).
+
+ // Restore registers from the last output frame.
+ // Note that lr is not in the list of saved_registers and will be restored
+ // later. We can use it to hold the address of last output frame while
+ // reloading the other registers.
+ ASSERT(!saved_registers.IncludesAliasOf(lr));
+ Register last_output_frame = lr;
+ __ Mov(last_output_frame, current_frame);
+
+ // We don't need to restore x7 as it will be clobbered later to hold the
+ // continuation address.
+ Register continuation = x7;
+ saved_registers.Remove(continuation);
+
+ while (!saved_registers.IsEmpty()) {
+ // TODO(all): Look for opportunities to optimize this by using ldp.
+ CPURegister current_reg = saved_registers.PopLowestIndex();
+ int offset = (current_reg.code() * kPointerSize) +
+ FrameDescription::registers_offset();
+ __ Ldr(current_reg, MemOperand(last_output_frame, offset));
+ }
+
+ __ Ldr(continuation, MemOperand(last_output_frame,
+ FrameDescription::continuation_offset()));
+ __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
+ __ InitializeRootRegister();
+ __ Br(continuation);
+}
+
+
+// Size of an entry of the second level deopt table.
+// This is the code size generated by GeneratePrologue for one entry.
+const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
+ Label done;
+ {
+ InstructionAccurateScope scope(masm());
+
+ // The number of entry will never exceed kMaxNumberOfEntries.
+ // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
+ // a movz instruction to load the entry id.
+ ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
+
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ movz(masm()->Tmp0(), i);
+ __ b(&done);
+ ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ }
+ }
+ __ Bind(&done);
+ // TODO(all): We need to add some kind of assertion to verify that Tmp0()
+ // is not clobbered by Push.
+ __ Push(masm()->Tmp0());
+}
+
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/a64/disasm-a64.cc b/deps/v8/src/a64/disasm-a64.cc
new file mode 100644
index 0000000000..5ef75d55e2
--- /dev/null
+++ b/deps/v8/src/a64/disasm-a64.cc
@@ -0,0 +1,1854 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "disasm.h"
+#include "a64/disasm-a64.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+
+Disassembler::Disassembler() {
+ buffer_size_ = 256;
+ buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
+ buffer_pos_ = 0;
+ own_buffer_ = true;
+}
+
+
+Disassembler::Disassembler(char* text_buffer, int buffer_size) {
+ buffer_size_ = buffer_size;
+ buffer_ = text_buffer;
+ buffer_pos_ = 0;
+ own_buffer_ = false;
+}
+
+
+Disassembler::~Disassembler() {
+ if (own_buffer_) {
+ free(buffer_);
+ }
+}
+
+
+char* Disassembler::GetOutput() {
+ return buffer_;
+}
+
+
+void Disassembler::VisitAddSubImmediate(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
+ (instr->ImmAddSub() == 0) ? true : false;
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rns, 'IAddSub";
+ const char *form_cmp = "'Rns, 'IAddSub";
+ const char *form_mov = "'Rds, 'Rns";
+
+ switch (instr->Mask(AddSubImmediateMask)) {
+ case ADD_w_imm:
+ case ADD_x_imm: {
+ mnemonic = "add";
+ if (stack_op) {
+ mnemonic = "mov";
+ form = form_mov;
+ }
+ break;
+ }
+ case ADDS_w_imm:
+ case ADDS_x_imm: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_imm:
+ case SUB_x_imm: mnemonic = "sub"; break;
+ case SUBS_w_imm:
+ case SUBS_x_imm: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubShifted(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'HDP";
+ const char *form_cmp = "'Rn, 'Rm'HDP";
+ const char *form_neg = "'Rd, 'Rm'HDP";
+
+ switch (instr->Mask(AddSubShiftedMask)) {
+ case ADD_w_shift:
+ case ADD_x_shift: mnemonic = "add"; break;
+ case ADDS_w_shift:
+ case ADDS_x_shift: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_shift:
+ case SUB_x_shift: {
+ mnemonic = "sub";
+ if (rn_is_zr) {
+ mnemonic = "neg";
+ form = form_neg;
+ }
+ break;
+ }
+ case SUBS_w_shift:
+ case SUBS_x_shift: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ } else if (rn_is_zr) {
+ mnemonic = "negs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubExtended(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ const char *mnemonic = "";
+ Extend mode = static_cast<Extend>(instr->ExtendMode());
+ const char *form = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
+ const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
+
+ switch (instr->Mask(AddSubExtendedMask)) {
+ case ADD_w_ext:
+ case ADD_x_ext: mnemonic = "add"; break;
+ case ADDS_w_ext:
+ case ADDS_x_ext: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_ext:
+ case SUB_x_ext: mnemonic = "sub"; break;
+ case SUBS_w_ext:
+ case SUBS_x_ext: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm";
+ const char *form_neg = "'Rd, 'Rm";
+
+ switch (instr->Mask(AddSubWithCarryMask)) {
+ case ADC_w:
+ case ADC_x: mnemonic = "adc"; break;
+ case ADCS_w:
+ case ADCS_x: mnemonic = "adcs"; break;
+ case SBC_w:
+ case SBC_x: {
+ mnemonic = "sbc";
+ if (rn_is_zr) {
+ mnemonic = "ngc";
+ form = form_neg;
+ }
+ break;
+ }
+ case SBCS_w:
+ case SBCS_x: {
+ mnemonic = "sbcs";
+ if (rn_is_zr) {
+ mnemonic = "ngcs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLogicalImmediate(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rn, 'ITri";
+
+ if (instr->ImmLogical() == 0) {
+ // The immediate encoded in the instruction is not in the expected format.
+ Format(instr, "unallocated", "(LogicalImmediate)");
+ return;
+ }
+
+ switch (instr->Mask(LogicalImmediateMask)) {
+ case AND_w_imm:
+ case AND_x_imm: mnemonic = "and"; break;
+ case ORR_w_imm:
+ case ORR_x_imm: {
+ mnemonic = "orr";
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
+ : kWRegSize;
+ if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
+ mnemonic = "mov";
+ form = "'Rds, 'ITri";
+ }
+ break;
+ }
+ case EOR_w_imm:
+ case EOR_x_imm: mnemonic = "eor"; break;
+ case ANDS_w_imm:
+ case ANDS_x_imm: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'ITri";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
+ ASSERT((reg_size == kXRegSize) ||
+ ((reg_size == kWRegSize) && (value <= 0xffffffff)));
+
+ // Test for movz: 16-bits set at positions 0, 16, 32 or 48.
+ if (((value & 0xffffffffffff0000UL) == 0UL) ||
+ ((value & 0xffffffff0000ffffUL) == 0UL) ||
+ ((value & 0xffff0000ffffffffUL) == 0UL) ||
+ ((value & 0x0000ffffffffffffUL) == 0UL)) {
+ return true;
+ }
+
+ // Test for movn: NOT(16-bits set at positions 0, 16, 32 or 48).
+ if ((reg_size == kXRegSize) &&
+ (((value & 0xffffffffffff0000UL) == 0xffffffffffff0000UL) ||
+ ((value & 0xffffffff0000ffffUL) == 0xffffffff0000ffffUL) ||
+ ((value & 0xffff0000ffffffffUL) == 0xffff0000ffffffffUL) ||
+ ((value & 0x0000ffffffffffffUL) == 0x0000ffffffffffffUL))) {
+ return true;
+ }
+ if ((reg_size == kWRegSize) &&
+ (((value & 0xffff0000) == 0xffff0000) ||
+ ((value & 0x0000ffff) == 0x0000ffff))) {
+ return true;
+ }
+ return false;
+}
+
+
+void Disassembler::VisitLogicalShifted(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'HLo";
+
+ switch (instr->Mask(LogicalShiftedMask)) {
+ case AND_w:
+ case AND_x: mnemonic = "and"; break;
+ case BIC_w:
+ case BIC_x: mnemonic = "bic"; break;
+ case EOR_w:
+ case EOR_x: mnemonic = "eor"; break;
+ case EON_w:
+ case EON_x: mnemonic = "eon"; break;
+ case BICS_w:
+ case BICS_x: mnemonic = "bics"; break;
+ case ANDS_w:
+ case ANDS_x: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'Rm'HLo";
+ }
+ break;
+ }
+ case ORR_w:
+ case ORR_x: {
+ mnemonic = "orr";
+ if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) {
+ mnemonic = "mov";
+ form = "'Rd, 'Rm";
+ }
+ break;
+ }
+ case ORN_w:
+ case ORN_x: {
+ mnemonic = "orn";
+ if (rn_is_zr) {
+ mnemonic = "mvn";
+ form = "'Rd, 'Rm'HLo";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareRegisterMask)) {
+ case CCMN_w:
+ case CCMN_x: mnemonic = "ccmn"; break;
+ case CCMP_w:
+ case CCMP_x: mnemonic = "ccmp"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareImmediateMask)) {
+ case CCMN_w_imm:
+ case CCMN_x_imm: mnemonic = "ccmn"; break;
+ case CCMP_w_imm:
+ case CCMP_x_imm: mnemonic = "ccmp"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalSelect(Instruction* instr) {
+ bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
+ bool rn_is_rm = (instr->Rn() == instr->Rm());
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
+ const char *form_test = "'Rd, 'CInv";
+ const char *form_update = "'Rd, 'Rn, 'CInv";
+
+ Condition cond = static_cast<Condition>(instr->Condition());
+ bool invertible_cond = (cond != al) && (cond != nv);
+
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: mnemonic = "csel"; break;
+ case CSINC_w:
+ case CSINC_x: {
+ mnemonic = "csinc";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "cset";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinc";
+ form = form_update;
+ }
+ break;
+ }
+ case CSINV_w:
+ case CSINV_x: {
+ mnemonic = "csinv";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "csetm";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinv";
+ form = form_update;
+ }
+ break;
+ }
+ case CSNEG_w:
+ case CSNEG_x: {
+ mnemonic = "csneg";
+ if (rn_is_rm && invertible_cond) {
+ mnemonic = "cneg";
+ form = form_update;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitBitfield(Instruction* instr) {
+ unsigned s = instr->ImmS();
+ unsigned r = instr->ImmR();
+ unsigned rd_size_minus_1 =
+ ((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1;
+ const char *mnemonic = "";
+ const char *form = "";
+ const char *form_shift_right = "'Rd, 'Rn, 'IBr";
+ const char *form_extend = "'Rd, 'Wn";
+ const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
+ const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
+ const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
+
+ switch (instr->Mask(BitfieldMask)) {
+ case SBFM_w:
+ case SBFM_x: {
+ mnemonic = "sbfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "sxtb";
+ } else if (s == 15) {
+ mnemonic = "sxth";
+ } else if ((s == 31) && (instr->SixtyFourBits() == 1)) {
+ mnemonic = "sxtw";
+ } else {
+ form = form_bfx;
+ }
+ } else if (s == rd_size_minus_1) {
+ mnemonic = "asr";
+ form = form_shift_right;
+ } else if (s < r) {
+ mnemonic = "sbfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case UBFM_w:
+ case UBFM_x: {
+ mnemonic = "ubfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "uxtb";
+ } else if (s == 15) {
+ mnemonic = "uxth";
+ } else {
+ form = form_bfx;
+ }
+ }
+ if (s == rd_size_minus_1) {
+ mnemonic = "lsr";
+ form = form_shift_right;
+ } else if (r == s + 1) {
+ mnemonic = "lsl";
+ form = form_lsl;
+ } else if (s < r) {
+ mnemonic = "ubfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case BFM_w:
+ case BFM_x: {
+ mnemonic = "bfxil";
+ form = form_bfx;
+ if (s < r) {
+ mnemonic = "bfi";
+ form = form_bfiz;
+ }
+ }
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitExtract(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
+
+ switch (instr->Mask(ExtractMask)) {
+ case EXTR_w:
+ case EXTR_x: {
+ if (instr->Rn() == instr->Rm()) {
+ mnemonic = "ror";
+ form = "'Rd, 'Rn, 'IExtract";
+ } else {
+ mnemonic = "extr";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitPCRelAddressing(Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
+ // ADRP is not implemented.
+ default: Format(instr, "unimplemented", "(PCRelAddressing)");
+ }
+}
+
+
+void Disassembler::VisitConditionalBranch(Instruction* instr) {
+ switch (instr->Mask(ConditionalBranchMask)) {
+ case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Disassembler::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Xn";
+
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BR: mnemonic = "br"; break;
+ case BLR: mnemonic = "blr"; break;
+ case RET: {
+ mnemonic = "ret";
+ if (instr->Rn() == kLinkRegCode) {
+ form = NULL;
+ }
+ break;
+ }
+ default: form = "(UnconditionalBranchToRegister)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'BImmUncn";
+
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case B: mnemonic = "b"; break;
+ case BL: mnemonic = "bl"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn";
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(RBIT, "rbit");
+ FORMAT(REV16, "rev16");
+ FORMAT(REV, "rev");
+ FORMAT(CLZ, "clz");
+ FORMAT(CLS, "cls");
+ #undef FORMAT
+ case REV32_x: mnemonic = "rev32"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing2Source(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Rd, 'Rn, 'Rm";
+
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(UDIV, "udiv");
+ FORMAT(SDIV, "sdiv");
+ FORMAT(LSLV, "lsl");
+ FORMAT(LSRV, "lsr");
+ FORMAT(ASRV, "asr");
+ FORMAT(RORV, "ror");
+ #undef FORMAT
+ default: form = "(DataProcessing2Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
+ bool ra_is_zr = RaIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
+ const char *form_rrr = "'Rd, 'Rn, 'Rm";
+ const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
+ const char *form_xww = "'Xd, 'Wn, 'Wm";
+ const char *form_xxx = "'Xd, 'Xn, 'Xm";
+
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x: {
+ mnemonic = "madd";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mul";
+ form = form_rrr;
+ }
+ break;
+ }
+ case MSUB_w:
+ case MSUB_x: {
+ mnemonic = "msub";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mneg";
+ form = form_rrr;
+ }
+ break;
+ }
+ case SMADDL_x: {
+ mnemonic = "smaddl";
+ if (ra_is_zr) {
+ mnemonic = "smull";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMSUBL_x: {
+ mnemonic = "smsubl";
+ if (ra_is_zr) {
+ mnemonic = "smnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMADDL_x: {
+ mnemonic = "umaddl";
+ if (ra_is_zr) {
+ mnemonic = "umull";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMSUBL_x: {
+ mnemonic = "umsubl";
+ if (ra_is_zr) {
+ mnemonic = "umnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMULH_x: {
+ mnemonic = "smulh";
+ form = form_xxx;
+ break;
+ }
+ case UMULH_x: {
+ mnemonic = "umulh";
+ form = form_xxx;
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCompareBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rt, 'BImmCmpa";
+
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w:
+ case CBZ_x: mnemonic = "cbz"; break;
+ case CBNZ_w:
+ case CBNZ_x: mnemonic = "cbnz"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitTestBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ // If the top bit of the immediate is clear, the tested register is
+ // disassembled as Wt, otherwise Xt. As the top bit of the immediate is
+ // encoded in bit 31 of the instruction, we can reuse the Rt form, which
+ // uses bit 31 (normally "sf") to choose the register size.
+ const char *form = "'Rt, 'IS, 'BImmTest";
+
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: mnemonic = "tbz"; break;
+ case TBNZ: mnemonic = "tbnz"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'IMoveImm";
+
+ // Print the shift separately for movk, to make it clear which half word will
+ // be overwritten. Movn and movz print the computed immediate, which includes
+ // shift calculation.
+ switch (instr->Mask(MoveWideImmediateMask)) {
+ case MOVN_w:
+ case MOVN_x: mnemonic = "movn"; break;
+ case MOVZ_w:
+ case MOVZ_x: mnemonic = "movz"; break;
+ case MOVK_w:
+ case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_LIST(V) \
+ V(STRB_w, "strb", "'Wt") \
+ V(STRH_w, "strh", "'Wt") \
+ V(STR_w, "str", "'Wt") \
+ V(STR_x, "str", "'Xt") \
+ V(LDRB_w, "ldrb", "'Wt") \
+ V(LDRH_w, "ldrh", "'Wt") \
+ V(LDR_w, "ldr", "'Wt") \
+ V(LDR_x, "ldr", "'Xt") \
+ V(LDRSB_x, "ldrsb", "'Xt") \
+ V(LDRSH_x, "ldrsh", "'Xt") \
+ V(LDRSW_x, "ldrsw", "'Xt") \
+ V(LDRSB_w, "ldrsb", "'Wt") \
+ V(LDRSH_w, "ldrsh", "'Wt") \
+ V(STR_s, "str", "'St") \
+ V(STR_d, "str", "'Dt") \
+ V(LDR_s, "ldr", "'St") \
+ V(LDR_d, "ldr", "'Dt")
+
+void Disassembler::VisitLoadStorePreIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePreIndex)";
+
+ switch (instr->Mask(LoadStorePreIndexMask)) {
+ #define LS_PREINDEX(A, B, C) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
+ LOAD_STORE_LIST(LS_PREINDEX)
+ #undef LS_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePostIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePostIndex)";
+
+ switch (instr->Mask(LoadStorePostIndexMask)) {
+ #define LS_POSTINDEX(A, B, C) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
+ LOAD_STORE_LIST(LS_POSTINDEX)
+ #undef LS_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreUnsignedOffset)";
+
+ switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
+ #define LS_UNSIGNEDOFFSET(A, B, C) \
+ case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
+ LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
+ #undef LS_UNSIGNEDOFFSET
+ case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xn'ILU]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreRegisterOffset)";
+
+ switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
+ #define LS_REGISTEROFFSET(A, B, C) \
+ case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
+ LOAD_STORE_LIST(LS_REGISTEROFFSET)
+ #undef LS_REGISTEROFFSET
+ case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Wt, ['Xns'ILS]";
+ const char *form_x = "'Xt, ['Xns'ILS]";
+ const char *form_s = "'St, ['Xns'ILS]";
+ const char *form_d = "'Dt, ['Xns'ILS]";
+
+ switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
+ case STURB_w: mnemonic = "sturb"; break;
+ case STURH_w: mnemonic = "sturh"; break;
+ case STUR_w: mnemonic = "stur"; break;
+ case STUR_x: mnemonic = "stur"; form = form_x; break;
+ case STUR_s: mnemonic = "stur"; form = form_s; break;
+ case STUR_d: mnemonic = "stur"; form = form_d; break;
+ case LDURB_w: mnemonic = "ldurb"; break;
+ case LDURH_w: mnemonic = "ldurh"; break;
+ case LDUR_w: mnemonic = "ldur"; break;
+ case LDUR_x: mnemonic = "ldur"; form = form_x; break;
+ case LDUR_s: mnemonic = "ldur"; form = form_s; break;
+ case LDUR_d: mnemonic = "ldur"; form = form_d; break;
+ case LDURSB_x: form = form_x; // Fall through.
+ case LDURSB_w: mnemonic = "ldursb"; break;
+ case LDURSH_x: form = form_x; // Fall through.
+ case LDURSH_w: mnemonic = "ldursh"; break;
+ case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
+ default: form = "(LoadStoreUnscaledOffset)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadLiteral(Instruction* instr) {
+ const char *mnemonic = "ldr";
+ const char *form = "(LoadLiteral)";
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
+ case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
+ case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
+ case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
+ default: mnemonic = "unimplemented";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_PAIR_LIST(V) \
+ V(STP_w, "stp", "'Wt, 'Wt2", "4") \
+ V(LDP_w, "ldp", "'Wt, 'Wt2", "4") \
+ V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "4") \
+ V(STP_x, "stp", "'Xt, 'Xt2", "8") \
+ V(LDP_x, "ldp", "'Xt, 'Xt2", "8") \
+ V(STP_s, "stp", "'St, 'St2", "4") \
+ V(LDP_s, "ldp", "'St, 'St2", "4") \
+ V(STP_d, "stp", "'Dt, 'Dt2", "8") \
+ V(LDP_d, "ldp", "'Dt, 'Dt2", "8")
+
+void Disassembler::VisitLoadStorePairPostIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPostIndex)";
+
+ switch (instr->Mask(LoadStorePairPostIndexMask)) {
+ #define LSP_POSTINDEX(A, B, C, D) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
+ LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
+ #undef LSP_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairPreIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPreIndex)";
+
+ switch (instr->Mask(LoadStorePairPreIndexMask)) {
+ #define LSP_PREINDEX(A, B, C, D) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
+ LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
+ #undef LSP_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairOffset)";
+
+ switch (instr->Mask(LoadStorePairOffsetMask)) {
+ #define LSP_OFFSET(A, B, C, D) \
+ case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
+ LOAD_STORE_PAIR_LIST(LSP_OFFSET)
+ #undef LSP_OFFSET
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form;
+
+ switch (instr->Mask(LoadStorePairNonTemporalMask)) {
+ case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
+ case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
+ case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
+ case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
+ case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
+ case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
+ case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
+ case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
+ default: form = "(LoadStorePairNonTemporal)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPCompare(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm";
+ const char *form_zero = "'Fn, #0.0";
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s_zero:
+ case FCMP_d_zero: form = form_zero; // Fall through.
+ case FCMP_s:
+ case FCMP_d: mnemonic = "fcmp"; break;
+ default: form = "(FPCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalCompare(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: mnemonic = "fccmp"; break;
+ case FCCMPE_s:
+ case FCCMPE_d: mnemonic = "fccmpe"; break;
+ default: form = "(FPConditionalCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s:
+ case FCSEL_d: mnemonic = "fcsel"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing1Source(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fd, 'Fn";
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMOV, "fmov");
+ FORMAT(FABS, "fabs");
+ FORMAT(FNEG, "fneg");
+ FORMAT(FSQRT, "fsqrt");
+ FORMAT(FRINTN, "frintn");
+ FORMAT(FRINTP, "frintp");
+ FORMAT(FRINTM, "frintm");
+ FORMAT(FRINTZ, "frintz");
+ FORMAT(FRINTA, "frinta");
+ FORMAT(FRINTX, "frintx");
+ FORMAT(FRINTI, "frinti");
+ #undef FORMAT
+ case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
+ case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
+ default: form = "(FPDataProcessing1Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm";
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMUL, "fmul");
+ FORMAT(FDIV, "fdiv");
+ FORMAT(FADD, "fadd");
+ FORMAT(FSUB, "fsub");
+ FORMAT(FMAX, "fmax");
+ FORMAT(FMIN, "fmin");
+ FORMAT(FMAXNM, "fmaxnm");
+ FORMAT(FMINNM, "fminnm");
+ FORMAT(FNMUL, "fnmul");
+ #undef FORMAT
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMADD, "fmadd");
+ FORMAT(FMSUB, "fmsub");
+ FORMAT(FNMADD, "fnmadd");
+ FORMAT(FNMSUB, "fnmsub");
+ #undef FORMAT
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "(FPImmediate)";
+
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
+ case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPIntegerConvert(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(FPIntegerConvert)";
+ const char *form_rf = "'Rd, 'Fn";
+ const char *form_fr = "'Fd, 'Rn";
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FMOV_ws:
+ case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
+ case FMOV_sw:
+ case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+ case FCVTAS_ws:
+ case FCVTAS_xs:
+ case FCVTAS_wd:
+ case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
+ case FCVTAU_ws:
+ case FCVTAU_xs:
+ case FCVTAU_wd:
+ case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
+ case FCVTMS_ws:
+ case FCVTMS_xs:
+ case FCVTMS_wd:
+ case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
+ case FCVTMU_ws:
+ case FCVTMU_xs:
+ case FCVTMU_wd:
+ case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
+ case FCVTNS_ws:
+ case FCVTNS_xs:
+ case FCVTNS_wd:
+ case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
+ case FCVTNU_ws:
+ case FCVTNU_xs:
+ case FCVTNU_wd:
+ case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
+ case FCVTZU_xd:
+ case FCVTZU_ws:
+ case FCVTZU_wd:
+ case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
+ case FCVTZS_xd:
+ case FCVTZS_wd:
+ case FCVTZS_xs:
+ case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
+ case SCVTF_sw:
+ case SCVTF_sx:
+ case SCVTF_dw:
+ case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw:
+ case UCVTF_sx:
+ case UCVTF_dw:
+ case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Fn, 'IFPFBits";
+ const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ case FCVTZS_ws_fixed:
+ case FCVTZS_xs_fixed:
+ case FCVTZS_wd_fixed:
+ case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
+ case FCVTZU_ws_fixed:
+ case FCVTZU_xs_fixed:
+ case FCVTZU_wd_fixed:
+ case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
+ case SCVTF_sw_fixed:
+ case SCVTF_sx_fixed:
+ case SCVTF_dw_fixed:
+ case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw_fixed:
+ case UCVTF_sx_fixed:
+ case UCVTF_dw_fixed:
+ case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitSystem(Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ const char *mnemonic = "unimplemented";
+ const char *form = "(System)";
+
+ if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ mnemonic = "mrs";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "'Xt, nzcv"; break;
+ case FPCR: form = "'Xt, fpcr"; break;
+ default: form = "'Xt, (unknown)"; break;
+ }
+ break;
+ }
+ case MSR: {
+ mnemonic = "msr";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "nzcv, 'Xt"; break;
+ case FPCR: form = "fpcr, 'Xt"; break;
+ default: form = "(unknown), 'Xt"; break;
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: {
+ mnemonic = "nop";
+ form = NULL;
+ break;
+ }
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ switch (instr->Mask(MemBarrierMask)) {
+ case DMB: {
+ mnemonic = "dmb";
+ form = "'M";
+ break;
+ }
+ case DSB: {
+ mnemonic = "dsb";
+ form = "'M";
+ break;
+ }
+ case ISB: {
+ mnemonic = "isb";
+ form = NULL;
+ break;
+ }
+ }
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitException(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'IDebug";
+
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: mnemonic = "hlt"; break;
+ case BRK: mnemonic = "brk"; break;
+ case SVC: mnemonic = "svc"; break;
+ case HVC: mnemonic = "hvc"; break;
+ case SMC: mnemonic = "smc"; break;
+ case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
+ case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
+ case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
+ default: form = "(Exception)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnimplemented(Instruction* instr) {
+ Format(instr, "unimplemented", "(Unimplemented)");
+}
+
+
+void Disassembler::VisitUnallocated(Instruction* instr) {
+ Format(instr, "unallocated", "(Unallocated)");
+}
+
+
+void Disassembler::ProcessOutput(Instruction* /*instr*/) {
+ // The base disasm does nothing more than disassembling into a buffer.
+}
+
+
+void Disassembler::Format(Instruction* instr, const char* mnemonic,
+ const char* format) {
+ // TODO(mcapewel) don't think I can use the instr address here - there needs
+ // to be a base address too
+ ASSERT(mnemonic != NULL);
+ ResetOutput();
+ Substitute(instr, mnemonic);
+ if (format != NULL) {
+ buffer_[buffer_pos_++] = ' ';
+ Substitute(instr, format);
+ }
+ buffer_[buffer_pos_] = 0;
+ ProcessOutput(instr);
+}
+
+
+void Disassembler::Substitute(Instruction* instr, const char* string) {
+ char chr = *string++;
+ while (chr != '\0') {
+ if (chr == '\'') {
+ string += SubstituteField(instr, string);
+ } else {
+ buffer_[buffer_pos_++] = chr;
+ }
+ chr = *string++;
+ }
+}
+
+
+int Disassembler::SubstituteField(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'R': // Register. X or W, selected by sf bit.
+ case 'F': // FP Register. S or D, selected by type field.
+ case 'W':
+ case 'X':
+ case 'S':
+ case 'D': return SubstituteRegisterField(instr, format);
+ case 'I': return SubstituteImmediateField(instr, format);
+ case 'L': return SubstituteLiteralField(instr, format);
+ case 'H': return SubstituteShiftField(instr, format);
+ case 'P': return SubstitutePrefetchField(instr, format);
+ case 'C': return SubstituteConditionField(instr, format);
+ case 'E': return SubstituteExtendField(instr, format);
+ case 'A': return SubstitutePCRelAddressField(instr, format);
+ case 'B': return SubstituteBranchTargetField(instr, format);
+ case 'O': return SubstituteLSRegOffsetField(instr, format);
+ case 'M': return SubstituteBarrierField(instr, format);
+ default: {
+ UNREACHABLE();
+ return 1;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteRegisterField(Instruction* instr,
+ const char* format) {
+ unsigned reg_num = 0;
+ unsigned field_len = 2;
+ switch (format[1]) {
+ case 'd': reg_num = instr->Rd(); break;
+ case 'n': reg_num = instr->Rn(); break;
+ case 'm': reg_num = instr->Rm(); break;
+ case 'a': reg_num = instr->Ra(); break;
+ case 't': {
+ if (format[2] == '2') {
+ reg_num = instr->Rt2();
+ field_len = 3;
+ } else {
+ reg_num = instr->Rt();
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ // Increase field length for registers tagged as stack.
+ if (format[2] == 's') {
+ field_len = 3;
+ }
+
+ char reg_type;
+ if (format[0] == 'R') {
+ // Register type is R: use sf bit to choose X and W.
+ reg_type = instr->SixtyFourBits() ? 'x' : 'w';
+ } else if (format[0] == 'F') {
+ // Floating-point register: use type field to choose S or D.
+ reg_type = ((instr->FPType() & 1) == 0) ? 's' : 'd';
+ } else {
+ // Register type is specified. Make it lower case.
+ reg_type = format[0] + 0x20;
+ }
+
+ if ((reg_num != kZeroRegCode) || (reg_type == 's') || (reg_type == 'd')) {
+ // A normal register: w0 - w30, x0 - x30, s0 - s31, d0 - d31.
+
+ // Filter special registers
+ if ((reg_type == 'x') && (reg_num == 27)) {
+ AppendToOutput("cp");
+ } else if ((reg_type == 'x') && (reg_num == 28)) {
+ AppendToOutput("jssp");
+ } else if ((reg_type == 'x') && (reg_num == 29)) {
+ AppendToOutput("fp");
+ } else if ((reg_type == 'x') && (reg_num == 30)) {
+ AppendToOutput("lr");
+ } else {
+ AppendToOutput("%c%d", reg_type, reg_num);
+ }
+ } else if (format[2] == 's') {
+ // Disassemble w31/x31 as stack pointer wcsp/csp.
+ AppendToOutput("%s", (reg_type == 'w') ? "wcsp" : "csp");
+ } else {
+ // Disassemble w31/x31 as zero register wzr/xzr.
+ AppendToOutput("%czr", reg_type);
+ }
+
+ return field_len;
+}
+
+
+int Disassembler::SubstituteImmediateField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'I');
+
+ switch (format[1]) {
+ case 'M': { // IMoveImm or IMoveLSL.
+ if (format[5] == 'I') {
+ uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
+ AppendToOutput("#0x%" PRIx64, imm);
+ } else {
+ ASSERT(format[5] == 'L');
+ AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
+ if (instr->ShiftMoveWide() > 0) {
+ AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
+ }
+ }
+ return 8;
+ }
+ case 'L': {
+ switch (format[2]) {
+ case 'L': { // ILLiteral - Immediate Load Literal.
+ AppendToOutput("pc%+" PRId64,
+ instr->ImmLLiteral() << kLiteralEntrySizeLog2);
+ return 9;
+ }
+ case 'S': { // ILS - Immediate Load/Store.
+ if (instr->ImmLS() != 0) {
+ AppendToOutput(", #%" PRId64, instr->ImmLS());
+ }
+ return 3;
+ }
+ case 'P': { // ILPx - Immediate Load/Store Pair, x = access size.
+ if (instr->ImmLSPair() != 0) {
+ // format[3] is the scale value. Convert to a number.
+ int scale = format[3] - 0x30;
+ AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale);
+ }
+ return 4;
+ }
+ case 'U': { // ILU - Immediate Load/Store Unsigned.
+ if (instr->ImmLSUnsigned() != 0) {
+ AppendToOutput(", #%" PRIu64,
+ instr->ImmLSUnsigned() << instr->SizeLS());
+ }
+ return 3;
+ }
+ }
+ }
+ case 'C': { // ICondB - Immediate Conditional Branch.
+ int64_t offset = instr->ImmCondBranch() << 2;
+ char sign = (offset >= 0) ? '+' : '-';
+ AppendToOutput("#%c0x%" PRIx64, sign, offset);
+ return 6;
+ }
+ case 'A': { // IAddSub.
+ ASSERT(instr->ShiftAddSub() <= 1);
+ int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
+ AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
+ return 7;
+ }
+ case 'F': { // IFPSingle, IFPDouble or IFPFBits.
+ if (format[3] == 'F') { // IFPFBits.
+ AppendToOutput("#%d", 64 - instr->FPScale());
+ return 8;
+ } else {
+ AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
+ format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
+ return 9;
+ }
+ }
+ case 'T': { // ITri - Immediate Triangular Encoded.
+ AppendToOutput("#0x%" PRIx64, instr->ImmLogical());
+ return 4;
+ }
+ case 'N': { // INzcv.
+ int nzcv = (instr->Nzcv() << Flags_offset);
+ AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
+ ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
+ ((nzcv & CFlag) == 0) ? 'c' : 'C',
+ ((nzcv & VFlag) == 0) ? 'v' : 'V');
+ return 5;
+ }
+ case 'P': { // IP - Conditional compare.
+ AppendToOutput("#%d", instr->ImmCondCmp());
+ return 2;
+ }
+ case 'B': { // Bitfields.
+ return SubstituteBitfieldImmediateField(instr, format);
+ }
+ case 'E': { // IExtract.
+ AppendToOutput("#%d", instr->ImmS());
+ return 8;
+ }
+ case 'S': { // IS - Test and branch bit.
+ AppendToOutput("#%d", (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40());
+ return 2;
+ }
+ case 'D': { // IDebug - HLT and BRK instructions.
+ AppendToOutput("#0x%x", instr->ImmException());
+ return 6;
+ }
+ default: {
+ UNIMPLEMENTED();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
+ const char* format) {
+ ASSERT((format[0] == 'I') && (format[1] == 'B'));
+ unsigned r = instr->ImmR();
+ unsigned s = instr->ImmS();
+
+ switch (format[2]) {
+ case 'r': { // IBr.
+ AppendToOutput("#%d", r);
+ return 3;
+ }
+ case 's': { // IBs+1 or IBs-r+1.
+ if (format[3] == '+') {
+ AppendToOutput("#%d", s + 1);
+ return 5;
+ } else {
+ ASSERT(format[3] == '-');
+ AppendToOutput("#%d", s - r + 1);
+ return 7;
+ }
+ }
+ case 'Z': { // IBZ-r.
+ ASSERT((format[3] == '-') && (format[4] == 'r'));
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize;
+ AppendToOutput("#%d", reg_size - r);
+ return 5;
+ }
+ default: {
+ UNREACHABLE();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteLiteralField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "LValue", 6) == 0);
+ USE(format);
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit:
+ case LDR_x_lit:
+ case LDR_s_lit:
+ case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break;
+ default: UNREACHABLE();
+ }
+
+ return 6;
+}
+
+
+int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
+ ASSERT(format[0] == 'H');
+ ASSERT(instr->ShiftDP() <= 0x3);
+
+ switch (format[1]) {
+ case 'D': { // HDP.
+ ASSERT(instr->ShiftDP() != ROR);
+ } // Fall through.
+ case 'L': { // HLo.
+ if (instr->ImmDPShift() != 0) {
+ const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
+ AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()],
+ instr->ImmDPShift());
+ }
+ return 3;
+ }
+ default:
+ UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int Disassembler::SubstituteConditionField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'C');
+ const char* condition_code[] = { "eq", "ne", "hs", "lo",
+ "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt",
+ "gt", "le", "al", "nv" };
+ int cond;
+ switch (format[1]) {
+ case 'B': cond = instr->ConditionBranch(); break;
+ case 'I': {
+ cond = InvertCondition(static_cast<Condition>(instr->Condition()));
+ break;
+ }
+ default: cond = instr->Condition();
+ }
+ AppendToOutput("%s", condition_code[cond]);
+ return 4;
+}
+
+
+int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
+ const char* format) {
+ USE(format);
+ ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
+
+ int offset = instr->ImmPCRel();
+
+ // Only ADR (AddrPCRelByte) is supported.
+ ASSERT(strcmp(format, "AddrPCRelByte") == 0);
+
+ char sign = '+';
+ if (offset < 0) {
+ offset = -offset;
+ sign = '-';
+ }
+ // TODO(jbramley): Can we print the target address here?
+ AppendToOutput("#%c0x%x", sign, offset);
+ return 13;
+}
+
+
+int Disassembler::SubstituteBranchTargetField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "BImm", 4) == 0);
+
+ int64_t offset = 0;
+ switch (format[5]) {
+ // BImmUncn - unconditional branch immediate.
+ case 'n': offset = instr->ImmUncondBranch(); break;
+ // BImmCond - conditional branch immediate.
+ case 'o': offset = instr->ImmCondBranch(); break;
+ // BImmCmpa - compare and branch immediate.
+ case 'm': offset = instr->ImmCmpBranch(); break;
+ // BImmTest - test and branch immediate.
+ case 'e': offset = instr->ImmTestBranch(); break;
+ default: UNIMPLEMENTED();
+ }
+ offset <<= kInstructionSizeLog2;
+ char sign = '+';
+ if (offset < 0) {
+ offset = -offset;
+ sign = '-';
+ }
+ // TODO(mcapewel): look up pc + offset in label table.
+ AppendToOutput("#%c0x%" PRIx64, sign, offset);
+ return 8;
+}
+
+
+int Disassembler::SubstituteExtendField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "Ext", 3) == 0);
+ ASSERT(instr->ExtendMode() <= 7);
+ USE(format);
+
+ const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
+ "sxtb", "sxth", "sxtw", "sxtx" };
+
+ // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
+ // registers becomes lsl.
+ if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) &&
+ (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
+ (instr->ExtendMode() == UXTX))) {
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(", lsl #%d", instr->ImmExtendShift());
+ }
+ } else {
+ AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(" #%d", instr->ImmExtendShift());
+ }
+ }
+ return 3;
+}
+
+
+int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+ const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
+ "undefined", "undefined", "sxtw", "sxtx" };
+ USE(format);
+
+ unsigned shift = instr->ImmShiftLS();
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x';
+
+ unsigned rm = instr->Rm();
+ if (rm == kZeroRegCode) {
+ AppendToOutput("%czr", reg_type);
+ } else {
+ AppendToOutput("%c%d", reg_type, rm);
+ }
+
+ // Extend mode UXTX is an alias for shift mode LSL here.
+ if (!((ext == UXTX) && (shift == 0))) {
+ AppendToOutput(", %s", extend_mode[ext]);
+ if (shift != 0) {
+ AppendToOutput(" #%d", instr->SizeLS());
+ }
+ }
+ return 9;
+}
+
+
+int Disassembler::SubstitutePrefetchField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'P');
+ USE(format);
+
+ int prefetch_mode = instr->PrefetchMode();
+
+ const char* ls = (prefetch_mode & 0x10) ? "st" : "ld";
+ int level = (prefetch_mode >> 1) + 1;
+ const char* ks = (prefetch_mode & 1) ? "strm" : "keep";
+
+ AppendToOutput("p%sl%d%s", ls, level, ks);
+ return 6;
+}
+
+int Disassembler::SubstituteBarrierField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'M');
+ USE(format);
+
+ static const char* options[4][4] = {
+ { "sy (0b0000)", "oshld", "oshst", "osh" },
+ { "sy (0b0100)", "nshld", "nshst", "nsh" },
+ { "sy (0b1000)", "ishld", "ishst", "ish" },
+ { "sy (0b1100)", "ld", "st", "sy" }
+ };
+ int domain = instr->ImmBarrierDomain();
+ int type = instr->ImmBarrierType();
+
+ AppendToOutput("%s", options[domain][type]);
+ return 1;
+}
+
+
+void Disassembler::ResetOutput() {
+ buffer_pos_ = 0;
+ buffer_[buffer_pos_] = 0;
+}
+
+
+void Disassembler::AppendToOutput(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_, format, args);
+ va_end(args);
+}
+
+
+void PrintDisassembler::ProcessOutput(Instruction* instr) {
+ fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n",
+ reinterpret_cast<uint64_t>(instr), instr->InstructionBits(),
+ GetOutput());
+}
+
+} } // namespace v8::internal
+
+
+namespace disasm {
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ unsigned ureg = reg; // Avoid warnings about signed/unsigned comparisons.
+ if (ureg >= v8::internal::kNumberOfRegisters) {
+ return "noreg";
+ }
+ if (ureg == v8::internal::kZeroRegCode) {
+ return "xzr";
+ }
+ v8::internal::OS::SNPrintF(tmp_buffer_, "x%u", ureg);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE(); // A64 does not have the concept of a byte register
+ return "nobytereg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ UNREACHABLE(); // A64 does not have any XMM registers
+ return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code, so we will not try
+ // to access any memory.
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+class BufferDisassembler : public v8::internal::Disassembler {
+ public:
+ explicit BufferDisassembler(v8::internal::Vector<char> out_buffer)
+ : out_buffer_(out_buffer) { }
+
+ ~BufferDisassembler() { }
+
+ virtual void ProcessOutput(v8::internal::Instruction* instr) {
+ v8::internal::OS::SNPrintF(out_buffer_, "%s", GetOutput());
+ }
+
+ private:
+ v8::internal::Vector<char> out_buffer_;
+};
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instr) {
+ v8::internal::Decoder decoder;
+ BufferDisassembler disasm(buffer);
+ decoder.AppendVisitor(&disasm);
+
+ decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(instr));
+ return v8::internal::kInstructionSize;
+}
+
+
+int Disassembler::ConstantPoolSizeAt(byte* instr) {
+ return v8::internal::Assembler::ConstantPoolSizeAt(
+ reinterpret_cast<v8::internal::Instruction*>(instr));
+}
+
+
+void Disassembler::Disassemble(FILE* file, byte* start, byte* end) {
+ v8::internal::Decoder decoder;
+ v8::internal::PrintDisassembler disasm(file);
+ decoder.AppendVisitor(&disasm);
+
+ for (byte* pc = start; pc < end; pc += v8::internal::kInstructionSize) {
+ decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(pc));
+ }
+}
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/disasm-a64.h b/deps/v8/src/a64/disasm-a64.h
new file mode 100644
index 0000000000..35b8fe1f63
--- /dev/null
+++ b/deps/v8/src/a64/disasm-a64.h
@@ -0,0 +1,115 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_DISASM_A64_H
+#define V8_A64_DISASM_A64_H
+
+#include "v8.h"
+
+#include "globals.h"
+#include "utils.h"
+#include "instructions-a64.h"
+#include "decoder-a64.h"
+
+namespace v8 {
+namespace internal {
+
+
+class Disassembler: public DecoderVisitor {
+ public:
+ Disassembler();
+ Disassembler(char* text_buffer, int buffer_size);
+ virtual ~Disassembler();
+ char* GetOutput();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ protected:
+ virtual void ProcessOutput(Instruction* instr);
+
+ void Format(Instruction* instr, const char* mnemonic, const char* format);
+ void Substitute(Instruction* instr, const char* string);
+ int SubstituteField(Instruction* instr, const char* format);
+ int SubstituteRegisterField(Instruction* instr, const char* format);
+ int SubstituteImmediateField(Instruction* instr, const char* format);
+ int SubstituteLiteralField(Instruction* instr, const char* format);
+ int SubstituteBitfieldImmediateField(Instruction* instr, const char* format);
+ int SubstituteShiftField(Instruction* instr, const char* format);
+ int SubstituteExtendField(Instruction* instr, const char* format);
+ int SubstituteConditionField(Instruction* instr, const char* format);
+ int SubstitutePCRelAddressField(Instruction* instr, const char* format);
+ int SubstituteBranchTargetField(Instruction* instr, const char* format);
+ int SubstituteLSRegOffsetField(Instruction* instr, const char* format);
+ int SubstitutePrefetchField(Instruction* instr, const char* format);
+ int SubstituteBarrierField(Instruction* instr, const char* format);
+
+ bool RdIsZROrSP(Instruction* instr) const {
+ return (instr->Rd() == kZeroRegCode);
+ }
+
+ bool RnIsZROrSP(Instruction* instr) const {
+ return (instr->Rn() == kZeroRegCode);
+ }
+
+ bool RmIsZROrSP(Instruction* instr) const {
+ return (instr->Rm() == kZeroRegCode);
+ }
+
+ bool RaIsZROrSP(Instruction* instr) const {
+ return (instr->Ra() == kZeroRegCode);
+ }
+
+ bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
+
+ void ResetOutput();
+ void AppendToOutput(const char* string, ...);
+
+ char* buffer_;
+ uint32_t buffer_pos_;
+ uint32_t buffer_size_;
+ bool own_buffer_;
+};
+
+
+class PrintDisassembler: public Disassembler {
+ public:
+ explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
+ ~PrintDisassembler() { }
+
+ virtual void ProcessOutput(Instruction* instr);
+
+ private:
+ FILE *stream_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_A64_DISASM_A64_H
diff --git a/deps/v8/include/v8-defaults.h b/deps/v8/src/a64/frames-a64.cc
index 381a48210d..56d2e26b72 100644
--- a/deps/v8/include/v8-defaults.h
+++ b/deps/v8/src/a64/frames-a64.cc
@@ -25,30 +25,33 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_V8_DEFAULTS_H_
-#define V8_V8_DEFAULTS_H_
-
#include "v8.h"
-/**
- * Default configuration support for the V8 JavaScript engine.
- */
+#if V8_TARGET_ARCH_A64
+
+#include "assembler.h"
+#include "assembler-a64.h"
+#include "assembler-a64-inl.h"
+#include "frames.h"
+
namespace v8 {
+namespace internal {
+
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
-/**
- * Configures the constraints with reasonable default values based on the
- * capabilities of the current device the VM is running on.
- */
-bool V8_EXPORT ConfigureResourceConstraintsForCurrentPlatform(
- ResourceConstraints* constraints);
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
-/**
- * Convience function which performs SetResourceConstraints with the settings
- * returned by ConfigureResourceConstraintsForCurrentPlatform.
- */
-bool V8_EXPORT SetDefaultResourceConstraintsForCurrentPlatform();
-} // namespace v8
+} } // namespace v8::internal
-#endif // V8_V8_DEFAULTS_H_
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/frames-a64.h b/deps/v8/src/a64/frames-a64.h
new file mode 100644
index 0000000000..5ef7681645
--- /dev/null
+++ b/deps/v8/src/a64/frames-a64.h
@@ -0,0 +1,131 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "a64/constants-a64.h"
+#include "a64/assembler-a64.h"
+
+#ifndef V8_A64_FRAMES_A64_H_
+#define V8_A64_FRAMES_A64_H_
+
+namespace v8 {
+namespace internal {
+
+const int kNumRegs = kNumberOfRegisters;
+// Registers x0-x17 are caller-saved.
+const int kNumJSCallerSaved = 18;
+const RegList kJSCallerSaved = 0x3ffff;
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of eight.
+// TODO(all): Refine this number.
+const int kNumSafepointRegisters = 32;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
+#define kNumSafepointSavedRegisters \
+ CPURegList::GetSafepointSavedRegisters().Count();
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+ static const int kFrameSize = 2 * kPointerSize;
+
+ static const int kCallerSPDisplacement = 2 * kPointerSize;
+ static const int kCallerPCOffset = 1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
+ static const int kSPOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kLastExitFrameField = kCodeOffset;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+
+ // There are two words on the stack (saved fp and saved lr) between fp and
+ // the arguments.
+ static const int kLastParameterOffset = 2 * kPointerSize;
+
+ static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
+};
+
+
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kConstructorOffset = -5 * kPointerSize;
+ static const int kImplicitReceiverOffset = -6 * kPointerSize;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_A64_FRAMES_A64_H_
diff --git a/deps/v8/src/a64/full-codegen-a64.cc b/deps/v8/src/a64/full-codegen-a64.cc
new file mode 100644
index 0000000000..ec5d339781
--- /dev/null
+++ b/deps/v8/src/a64/full-codegen-a64.cc
@@ -0,0 +1,5010 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "code-stubs.h"
+#include "codegen.h"
+#include "compiler.h"
+#include "debug.h"
+#include "full-codegen.h"
+#include "isolate-inl.h"
+#include "parser.h"
+#include "scopes.h"
+#include "stub-cache.h"
+
+#include "a64/code-stubs-a64.h"
+#include "a64/macro-assembler-a64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm), reg_(NoReg) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ if (patch_site_.is_bound()) {
+ ASSERT(info_emitted_);
+ } else {
+ ASSERT(reg_.IsNone());
+ }
+ }
+
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ // This code will be patched by PatchInlinedSmiCode, in ic-a64.cc.
+ InstructionAccurateScope scope(masm_, 1);
+ ASSERT(!info_emitted_);
+ ASSERT(reg.Is64Bits());
+ ASSERT(!reg.Is(csp));
+ reg_ = reg;
+ __ bind(&patch_site_);
+ __ tbz(xzr, 0, target); // Always taken before patched.
+ }
+
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ // This code will be patched by PatchInlinedSmiCode, in ic-a64.cc.
+ InstructionAccurateScope scope(masm_, 1);
+ ASSERT(!info_emitted_);
+ ASSERT(reg.Is64Bits());
+ ASSERT(!reg.Is(csp));
+ reg_ = reg;
+ __ bind(&patch_site_);
+ __ tbnz(xzr, 0, target); // Never taken before patched.
+ }
+
+ void EmitJumpIfEitherNotSmi(Register reg1, Register reg2, Label* target) {
+ // We need to use ip0, so don't allow access to the MacroAssembler.
+ InstructionAccurateScope scope(masm_);
+ __ orr(ip0, reg1, reg2);
+ EmitJumpIfNotSmi(ip0, target);
+ }
+
+ void EmitPatchInfo() {
+ Assembler::BlockConstPoolScope scope(masm_);
+ InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_);
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ private:
+ MacroAssembler* masm_;
+ Label patch_site_;
+ Register reg_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// - x1: the JS function object being called (i.e. ourselves).
+// - cp: our context.
+// - fp: our caller's frame pointer.
+// - jssp: stack pointer.
+// - lr: return address.
+//
+// The function builds a JS frame. See JavaScriptFrameConstants in
+// frames-arm.h for its layout.
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ Function compiled by full code generator");
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ Debug("stop-at", __LINE__, BREAK);
+ }
+#endif
+
+ // Classic mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->is_classic_mode() && !info->is_native()) {
+ Label ok;
+ int receiver_offset = info->scope()->num_parameters() * kXRegSizeInBytes;
+ __ Peek(x10, receiver_offset);
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Poke(x10, receiver_offset);
+
+ __ Bind(&ok);
+ }
+
+
+ // Open a frame scope to indicate that there is a frame on the stack.
+ // The MANUAL indicates that the scope shouldn't actually generate code
+ // to set up the frame because we do it manually below.
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+ // This call emits the following sequence in a way that can be patched for
+ // code ageing support:
+ // Push(lr, fp, cp, x1);
+ // Add(fp, jssp, 2 * kPointerSize);
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(BUILD_FUNCTION_FRAME);
+ info->AddNoFrameRange(0, masm_->pc_offset());
+
+ // Reserve space on the stack for locals.
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
+
+ if (locals_count > 0) {
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ PushMultipleTimes(locals_count, x10);
+ }
+ }
+
+ bool function_in_register_x1 = true;
+
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ // Argument to NewContext is the function, which is still in x1.
+ Comment cmnt(masm_, "[ Allocate context");
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Mov(x10, Operand(info->scope()->GetScopeInfo()));
+ __ Push(x1, x10);
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ }
+ function_in_register_x1 = false;
+ // Context is returned in x0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ Mov(cp, x0);
+ __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = info->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ Ldr(x10, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ Str(x10, target);
+
+ // Update the write barrier.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+ }
+ }
+
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register_x1) {
+ // Load this again, if it's used by the local context below.
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ Mov(x3, x1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset + offset);
+ __ Mov(x1, Operand(Smi::FromInt(num_parameters)));
+ __ Push(x3, x2, x1);
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::Type type;
+ if (!is_classic_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
+ __ CallStub(&stub);
+
+ SetVar(arguments, x0, x1, x2);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+
+ } else {
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ { Comment cmnt(masm_, "[ Declarations");
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ VariableDeclaration* function = scope()->function();
+ ASSERT(function->proxy()->var()->mode() == CONST ||
+ function->proxy()->var()->mode() == CONST_HARMONY);
+ ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ VisitVariableDeclaration(function);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ Bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence();
+
+ // Force emit the constant pool, so it doesn't get emitted in the middle
+ // of the back edge table.
+ masm()->CheckConstPool(true, false);
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+ __ Mov(x0, Operand(Smi::FromInt(0)));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ Mov(x2, Operand(profiling_counter_));
+ __ Ldr(x3, FieldMemOperand(x2, Cell::kValueOffset));
+ __ Subs(x3, x3, Operand(Smi::FromInt(delta)));
+ __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ if (isolate()->IsDebuggerActive()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = FLAG_interrupt_budget >> 4;
+ }
+ __ Mov(x2, Operand(profiling_counter_));
+ __ Mov(x3, Operand(Smi::FromInt(reset_value)));
+ __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ ASSERT(jssp.Is(__ StackPointer()));
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ // Block literal pools whilst emitting back edge code.
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
+ Label ok;
+
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ EmitProfilingCounterDecrement(weight);
+ __ B(pl, &ok);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+
+ EmitProfilingCounterReset();
+
+ __ Bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::EmitReturnSequence() {
+ Comment cmnt(masm_, "[ Return sequence");
+
+ if (return_label_.is_bound()) {
+ __ B(&return_label_);
+
+ } else {
+ __ Bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in x0.
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ ASSERT(x0.Is(result_register()));
+ }
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ B(pl, &ok);
+ __ Push(x0);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(x0);
+ EmitProfilingCounterReset();
+ __ Bind(&ok);
+
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence. This sequence can get patched when the debugger is used. See
+ // debug-a64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
+ {
+ InstructionAccurateScope scope(masm_,
+ Assembler::kJSRetSequenceInstructions);
+ CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ __ RecordJSReturn();
+ // This code is generated using Assembler methods rather than Macro
+ // Assembler methods because it will be patched later on, and so the size
+ // of the generated code must be consistent.
+ const Register& current_sp = __ StackPointer();
+ // Nothing ensures 16 bytes alignment here.
+ ASSERT(!current_sp.Is(csp));
+ __ mov(current_sp, fp);
+ int no_frame_start = masm_->pc_offset();
+ __ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSizeInBytes, PostIndex));
+ // Drop the arguments and receiver and return.
+ // TODO(all): This implementation is overkill as it supports 2**31+1
+ // arguments, consider how to improve it without creating a security
+ // hole.
+ __ LoadLiteral(ip0, 3 * kInstructionSize);
+ __ add(current_sp, current_sp, ip0);
+ __ ret();
+ __ dc64(kXRegSizeInBytes * (info_->scope()->num_parameters() + 1));
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ // For simplicity we always test the accumulator register.
+ codegen()->GetVar(result_register(), var);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+ // Root values have no side effects.
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ Mov(result_register(), Operand(lit));
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates cannot be pushed directly.
+ __ Mov(result_register(), Operand(lit));
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ Mov(result_register(), Operand(lit));
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ Poke(reg, 0);
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Mov(result_register(), reg);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == materialize_false);
+ __ Bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ Bind(materialize_true);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ B(&done);
+ __ Bind(materialize_false);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ Bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ Bind(materialize_true);
+ __ LoadRoot(x10, Heap::kTrueValueRootIndex);
+ __ B(&done);
+ __ Bind(materialize_false);
+ __ LoadRoot(x10, Heap::kFalseValueRootIndex);
+ __ Bind(&done);
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == true_label_);
+ ASSERT(materialize_false == false_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(x10, value_root_index);
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) {
+ __ B(true_label_);
+ }
+ } else {
+ if (false_label_ != fall_through_) {
+ __ B(false_label_);
+ }
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, condition->test_id());
+ __ CompareAndSplit(result_register(), 0, ne, if_true, if_false, fall_through);
+}
+
+
+// If (cond), branch to if_true.
+// If (!cond), branch to if_false.
+// fall_through is used as an optimization in cases where only one branch
+// instruction is necessary.
+void FullCodeGenerator::Split(Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ B(cond, if_true);
+ } else if (if_true == fall_through) {
+ ASSERT(if_false != fall_through);
+ __ B(InvertCondition(cond), if_false);
+ } else {
+ __ B(cond, if_true);
+ __ B(if_false);
+ }
+}
+
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -var->index() * kXRegSizeInBytes;
+ // Adjust by a (parameter or local) base offset.
+ if (var->IsParameter()) {
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ } else {
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ }
+ return MemOperand(fp, offset);
+}
+
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ if (var->IsContextSlot()) {
+ int context_chain_length = scope()->ContextChainLength(var->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextMemOperand(scratch, var->index());
+ } else {
+ return StackOperand(var);
+ }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+ // Use destination as scratch.
+ MemOperand location = VarOperand(var, dest);
+ __ Ldr(dest, location);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var,
+ Register src,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ ASSERT(!AreAliased(src, scratch0, scratch1));
+ MemOperand location = VarOperand(var, scratch0);
+ __ Str(src, location);
+
+ // Emit the write barrier code if the location is in the heap.
+ if (var->IsContextSlot()) {
+ // scratch0 contains the correct context.
+ __ RecordWriteContextSlot(scratch0,
+ location.offset(),
+ src,
+ scratch1,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+ // TODO(all): Investigate to see if there is something to work on here.
+ Label skip;
+ if (should_normalize) {
+ __ B(&skip);
+ }
+ PrepareForBailout(expr, TOS_REG);
+ if (should_normalize) {
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, NULL);
+ __ Bind(&skip);
+ }
+}
+
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+ // The variable in the declaration always resides in the current function
+ // context.
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (generate_debug_code_) {
+ // Check that we're not inside a with or catch context.
+ __ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(x1, Heap::kWithContextMapRootIndex);
+ __ Check(ne, kDeclarationInWithContext);
+ __ CompareRoot(x1, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, kDeclarationInCatchContext);
+ }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
+ // If it was not possible to allocate the variable at compile time, we
+ // need to "declare" it at runtime to make sure it actually exists in the
+ // local context.
+ VariableProxy* proxy = declaration->proxy();
+ VariableMode mode = declaration->mode();
+ Variable* variable = proxy->var();
+ bool hole_init = (mode == CONST) || (mode == CONST_HARMONY) || (mode == LET);
+
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ globals_->Add(variable->name(), zone());
+ globals_->Add(variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value(),
+ zone());
+ break;
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Str(x10, StackOperand(variable));
+ }
+ break;
+
+ case Variable::CONTEXT:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Str(x10, ContextMemOperand(cp, variable->index()));
+ // No write barrier since the_hole_value is in old space.
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ }
+ break;
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ Mov(x2, Operand(variable->name()));
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY
+ : NONE;
+ __ Mov(x1, Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (hole_init) {
+ __ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
+ __ Push(cp, x2, x1, x0);
+ } else {
+ // Pushing 0 (xzr) indicates no initial value.
+ __ Push(cp, x2, x1, xzr);
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(declaration->fun(), script());
+ // Check for stack overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals_->Add(function, zone());
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ VisitForAccumulatorValue(declaration->fun());
+ __ Str(result_register(), StackOperand(variable));
+ break;
+ }
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ EmitDebugCheckDeclarationContext(variable);
+ VisitForAccumulatorValue(declaration->fun());
+ __ Str(result_register(), ContextMemOperand(cp, variable->index()));
+ int offset = Context::SlotOffset(variable->index());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ offset,
+ result_register(),
+ x2,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ __ Mov(x2, Operand(variable->name()));
+ __ Mov(x1, Operand(Smi::FromInt(NONE)));
+ __ Push(cp, x2, x1);
+ // Push initial value for function declaration.
+ VisitForStackValue(declaration->fun());
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
+
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+
+ // Load instance object.
+ __ LoadContext(x1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ Ldr(x1, ContextMemOperand(x1, variable->interface()->Index()));
+ __ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX));
+
+ // Assign it.
+ __ Str(x1, ContextMemOperand(cp, variable->index()));
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ Context::SlotOffset(variable->index()),
+ x1,
+ x3,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse info body.
+ Visit(declaration->module());
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ // TODO(rossberg)
+ break;
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ImportDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ // TODO(rossberg)
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+ // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ Mov(x11, Operand(pairs));
+ Register flags = xzr;
+ if (Smi::FromInt(DeclareGlobalsFlags())) {
+ flags = x10;
+ __ Mov(flags, Operand(Smi::FromInt(DeclareGlobalsFlags())));
+ }
+ __ Push(cp, x11, flags);
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ ASM_LOCATION("FullCodeGenerator::VisitSwitchStatement");
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ Bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+
+ // Perform the comparison as if via '==='.
+ __ Peek(x1, 0); // Switch value.
+
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
+ __ Cmp(x1, x0);
+ __ B(ne, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ B(clause->body_target());
+ __ Bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ CallIC(ic, clause->CompareId());
+ patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ B(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ JumpIfNotRoot(x0, Heap::kTrueValueRootIndex, &next_test);
+ __ Drop(1);
+ __ B(clause->body_target());
+ __ Bind(&skip);
+
+ __ Cbnz(x0, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ B(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ Bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ B(nested_statement.break_label());
+ } else {
+ __ B(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ Bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ Bind(nested_statement.break_label());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ ASM_LOCATION("FullCodeGenerator::VisitForInStatement");
+ Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
+ // TODO(all): This visitor probably needs better comments and a revisit.
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
+ VisitForAccumulatorValue(stmt->enumerable());
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
+ Register null_value = x15;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Cmp(x0, null_value);
+ __ B(eq, &exit);
+
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(x0, &convert);
+ __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, &done_convert, ge);
+ __ Bind(&convert);
+ __ Push(x0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Bind(&done_convert);
+ __ Push(x0);
+
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ JumpIfObjectType(x0, x10, x11, LAST_JS_PROXY_TYPE, &call_runtime, le);
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ __ CheckEnumCache(x0, null_value, x10, x11, x12, x13, &call_runtime);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ B(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ Bind(&call_runtime);
+ __ Push(x0); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array, no_descriptors;
+ __ Ldr(x2, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x2, Heap::kMetaMapRootIndex, &fixed_array);
+
+ // We got a map in register x0. Get the enumeration cache from it.
+ __ Bind(&use_cache);
+
+ __ EnumLengthUntagged(x1, x0);
+ __ Cbz(x1, &no_descriptors);
+
+ __ LoadInstanceDescriptors(x0, x2);
+ __ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheOffset));
+ __ Ldr(x2,
+ FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Set up the four remaining stack slots.
+ __ Push(x0); // Map.
+ __ Mov(x0, Operand(Smi::FromInt(0)));
+ // Push enumeration cache, enumeration cache length (as smi) and zero.
+ __ SmiTag(x1);
+ __ Push(x2, x1, x0);
+ __ B(&loop);
+
+ __ Bind(&no_descriptors);
+ __ Drop(1);
+ __ B(&exit);
+
+ // We got a fixed array in register x0. Iterate through that.
+ __ Bind(&fixed_array);
+
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ __ LoadObject(x1, FeedbackVector());
+ __ Mov(x10, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
+ __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
+
+ __ Mov(x1, Operand(Smi::FromInt(1))); // Smi indicates slow check.
+ __ Peek(x10, 0); // Get enumerated object.
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ // TODO(all): similar check was done already. Can we avoid it here?
+ __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
+ ASSERT(Smi::FromInt(0) == 0);
+ __ CzeroX(x1, le); // Zero indicates proxy.
+ __ Push(x1, x0); // Smi and array
+ __ Ldr(x1, FieldMemOperand(x0, FixedArray::kLengthOffset));
+ __ Push(x1, xzr); // Fixed array length (as smi) and initial index.
+
+ // Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ __ Bind(&loop);
+ // Load the current count to x0, load the length to x1.
+ __ PeekPair(x0, x1, 0);
+ __ Cmp(x0, x1); // Compare to the array length.
+ __ B(hs, loop_statement.break_label());
+
+ // Get the current entry of the array into register r3.
+ __ Peek(x10, 2 * kXRegSizeInBytes);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(x0, kPointerSizeLog2));
+ __ Ldr(x3, MemOperand(x10, FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the expected map from the stack or a smi in the
+ // permanent slow case into register x10.
+ __ Peek(x2, 3 * kXRegSizeInBytes);
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we may have to filter the key.
+ Label update_each;
+ __ Peek(x1, 4 * kXRegSizeInBytes);
+ __ Ldr(x11, FieldMemOperand(x1, HeapObject::kMapOffset));
+ __ Cmp(x11, x2);
+ __ B(eq, &update_each);
+
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Cbz(x2, &update_each);
+
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
+ // just skip it.
+ __ Push(x1, x3);
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ Mov(x3, x0);
+ __ Cbz(x0, loop_statement.continue_label());
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register x3.
+ __ Bind(&update_each);
+ __ Mov(result_register(), x3);
+ // Perform the assignment as if via '='.
+ { EffectContext context(this);
+ EmitAssignment(stmt->each());
+ }
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for going to the next element by incrementing
+ // the index (smi) stored on top of the stack.
+ __ Bind(loop_statement.continue_label());
+ // TODO(all): We could use a callee saved register to avoid popping.
+ __ Pop(x0);
+ __ Add(x0, x0, Operand(Smi::FromInt(1)));
+ __ Push(x0);
+
+ EmitBackEdgeBookkeeping(stmt, &loop);
+ __ B(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ Bind(loop_statement.break_label());
+ __ Drop(5);
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ Bind(&exit);
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ Register iterator = x0;
+ __ JumpIfRoot(iterator, Heap::kUndefinedValueRootIndex,
+ loop_statement.break_label());
+ __ JumpIfRoot(iterator, Heap::kNullValueRootIndex,
+ loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(iterator, &convert);
+ __ CompareObjectType(iterator, x1, x1, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, &done_convert);
+ __ Bind(&convert);
+ __ Push(iterator);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Bind(&done_convert);
+ __ Push(iterator);
+
+ // Loop entry.
+ __ Bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ Bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ B(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ Bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
+ // Use the fast case closure allocation code that allocates in new space for
+ // nested functions that don't need literals cloning. If we're running with
+ // the --always-opt or the --prepare-always-opt flag, we need to use the
+ // runtime function so that the new function we are creating here gets a
+ // chance to have its code optimized and doesn't just get a copy of the
+ // existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ !pretenure &&
+ scope()->is_function_scope() &&
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ __ Mov(x2, Operand(info));
+ __ CallStub(&stub);
+ } else {
+ __ Mov(x11, Operand(info));
+ __ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex
+ : Heap::kFalseValueRootIndex);
+ __ Push(cp, x11, x10);
+ __ CallRuntime(Runtime::kNewClosure, 3);
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+ TypeofState typeof_state,
+ Label* slow) {
+ Register current = cp;
+ Register next = x10;
+ Register temp = x11;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_non_strict_eval()) {
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ }
+ // Load next context in chain.
+ __ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label loop, fast;
+ __ Mov(next, current);
+
+ __ Bind(&loop);
+ // Terminate at native context.
+ __ Ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ JumpIfRoot(temp, Heap::kNativeContextMapRootIndex, &fast);
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ // Load next context in chain.
+ __ Ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
+ __ B(&loop);
+ __ Bind(&fast);
+ }
+
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(var->name()));
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+ Label* slow) {
+ ASSERT(var->IsContextSlot());
+ Register context = cp;
+ Register next = x10;
+ Register temp = x11;
+
+ for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_non_strict_eval()) {
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ }
+ __ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ context = next;
+ }
+ }
+ // Check that last extension is NULL.
+ __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextMemOperand(context, var->index());
+}
+
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+ TypeofState typeof_state,
+ Label* slow,
+ Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (var->mode() == DYNAMIC_GLOBAL) {
+ EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+ __ B(done);
+ } else if (var->mode() == DYNAMIC_LOCAL) {
+ Variable* local = var->local_if_not_shadowed();
+ __ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET ||
+ local->mode() == CONST ||
+ local->mode() == CONST_HARMONY) {
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done);
+ if (local->mode() == CONST) {
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ } else { // LET || CONST_HARMONY
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
+ }
+ __ B(done);
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
+
+ // Three cases: global variables, lookup variables, and all other types of
+ // variables.
+ switch (var->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in x2 and the global
+ // object (receiver) in x0.
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(var->name()));
+ CallLoadIC(CONTEXTUAL);
+ context()->Plug(x0);
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, var->IsContextSlot()
+ ? "Context variable"
+ : "Stack variable");
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
+ } else {
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ GetVar(x0, var);
+ Label done;
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, &done);
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ Bind(&done);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ Bind(&done);
+ }
+ context()->Plug(x0);
+ break;
+ }
+ }
+ context()->Plug(var);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Label done, slow;
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+ __ Bind(&slow);
+ Comment cmnt(masm_, "Lookup variable");
+ __ Mov(x1, Operand(var->name()));
+ __ Push(cp, x1); // Context and name.
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ Bind(&done);
+ context()->Plug(x0);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label materialized;
+ // Registers will be used as follows:
+ // x5 = materialized value (RegExp literal)
+ // x4 = JS function, literals array
+ // x3 = literal index
+ // x2 = RegExp pattern
+ // x1 = RegExp flags
+ // x0 = RegExp literal clone
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x4, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ Ldr(x5, FieldMemOperand(x4, literal_offset));
+ __ JumpIfNotRoot(x5, Heap::kUndefinedValueRootIndex, &materialized);
+
+ // Create regexp literal using runtime function.
+ // Result will be in x0.
+ __ Mov(x3, Operand(Smi::FromInt(expr->literal_index())));
+ __ Mov(x2, Operand(expr->pattern()));
+ __ Mov(x1, Operand(expr->flags()));
+ __ Push(x4, x3, x2, x1);
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ Mov(x5, x0);
+
+ __ Bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ Allocate(size, x0, x2, x3, &runtime_allocate, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&runtime_allocate);
+ __ Mov(x10, Operand(Smi::FromInt(size)));
+ __ Push(x5, x10);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Pop(x5);
+
+ __ Bind(&allocated);
+ // After this, registers are used as follows:
+ // x0: Newly allocated regexp.
+ // x5: Materialized regexp.
+ // x10, x11, x12: temps.
+ __ CopyFields(x0, x5, CPURegList(x10, x11, x12), size / kPointerSize);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ LoadRoot(x10, Heap::kNullValueRootIndex);
+ __ Push(x10);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
+ __ Mov(x2, Operand(Smi::FromInt(expr->literal_index())));
+ __ Mov(x1, Operand(constant_properties));
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ Mov(x0, Operand(Smi::FromInt(flags)));
+ int properties_count = constant_properties->length() / 2;
+ const int max_cloned_properties =
+ FastCloneShallowObjectStub::kMaximumClonedProperties;
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ (expr->depth() > 1) || Serializer::enabled() ||
+ (flags != ObjectLiteral::kFastElements) ||
+ (properties_count > max_cloned_properties)) {
+ __ Push(x3, x2, x1, x0);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in x0.
+ bool result_saved = false;
+
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ expr->CalculateEmitStore(zone());
+
+ AccessorTable accessor_table(zone());
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ Push(x0); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ __ Mov(x2, Operand(key->value()));
+ __ Peek(x1, 0);
+ CallStoreIC(key->LiteralFeedbackId());
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ // Duplicate receiver on stack.
+ __ Peek(x0, 0);
+ __ Push(x0);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ Mov(x0, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ Push(x0);
+ __ CallRuntime(Runtime::kSetProperty, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ Peek(x0, 0);
+ // TODO(jbramley): This push shouldn't be necessary if we don't call the
+ // runtime below. In that case, skip it.
+ __ Push(x0);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ __ Drop(2);
+ }
+ break;
+ case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
+ break;
+ }
+ }
+
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ Peek(x10, 0); // Duplicate receiver.
+ __ Push(x10);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ Mov(x10, Operand(Smi::FromInt(NONE)));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ Peek(x0, 0);
+ __ Push(x0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ expr->BuildConstantElements(isolate());
+ int flags = (expr->depth() == 1) ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
+ // TODO(jbramley): Can these Operand constructors be implicit?
+ __ Mov(x2, Operand(Smi::FromInt(expr->literal_index())));
+ __ Mov(x1, Operand(constant_elements));
+ if (has_fast_elements && constant_elements_values->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ allocation_site_mode,
+ length);
+ __ CallStub(&stub);
+ __ IncrementCounter(
+ isolate()->counters()->cow_arrays_created_stub(), 1, x10, x11);
+ } else if ((expr->depth() > 1) || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ Mov(x0, Operand(Smi::FromInt(flags)));
+ __ Push(x3, x2, x1, x0);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ } else {
+ ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+ FLAG_smi_only_arrays);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+
+ if (has_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ }
+
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ __ CallStub(&stub);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ for (int i = 0; i < length; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ if (!result_saved) {
+ __ Push(x0);
+ __ Push(Smi::FromInt(expr->literal_index()));
+ result_saved = true;
+ }
+ VisitForAccumulatorValue(subexpr);
+
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ Peek(x6, kPointerSize); // Copy of array literal.
+ __ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
+ __ Str(result_register(), FieldMemOperand(x1, offset));
+ // Update the write barrier for the array store.
+ __ RecordWriteField(x1, offset, result_register(), x10,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+ } else {
+ __ Mov(x3, Operand(Smi::FromInt(i)));
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ }
+
+ if (result_saved) {
+ __ Drop(1); // literal index
+ context()->PlugTOS();
+ } else {
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ Comment cmnt(masm_, "[ Assignment");
+ // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+ // on the left-hand side.
+ if (!expr->target()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->target());
+ return;
+ }
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the accumulator.
+ VisitForAccumulatorValue(property->obj());
+ __ Push(result_register());
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case KEYED_PROPERTY:
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
+ __ Peek(x1, 0);
+ __ Push(x0);
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ { AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ __ Push(x0); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ AccumulatorValueContext context(this);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
+ op,
+ mode,
+ expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op, mode);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ Mov(x2, Operand(key->value()));
+ // Call load IC. It has arguments receiver and property name x0 and x2.
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ // Call keyed load IC. It has arguments key and receiver in r0 and r1.
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Expression* left_expr,
+ Expression* right_expr) {
+ Label done, both_smis, stub_call;
+
+ // Get the arguments.
+ Register left = x1;
+ Register right = x0;
+ Register result = x0;
+ __ Pop(left);
+
+ // Perform combined smi check on both operands.
+ __ Orr(x10, left, right);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(x10, &both_smis);
+
+ __ Bind(&stub_call);
+ BinaryOpICStub stub(op, mode);
+ {
+ Assembler::BlockConstPoolScope scope(masm_);
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ __ B(&done);
+
+ __ Bind(&both_smis);
+ // Smi case. This code works in the same way as the smi-smi case in the type
+ // recording binary operation stub, see
+ // BinaryOpStub::GenerateSmiSmiOperation for comments.
+ // TODO(all): That doesn't exist any more. Where are the comments?
+ //
+ // The set of operations that needs to be supported here is controlled by
+ // FullCodeGenerator::ShouldInlineSmiCase().
+ switch (op) {
+ case Token::SAR:
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Asr(result, left, right);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Lsl(result, left, right);
+ break;
+ case Token::SHR: {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ __ Tbnz(left, kXSignBit, &stub_call);
+ __ Bind(&right_not_zero);
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Lsr(result, left, right);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ }
+ case Token::ADD:
+ __ Adds(x10, left, right);
+ __ B(vs, &stub_call);
+ __ Mov(result, x10);
+ break;
+ case Token::SUB:
+ __ Subs(x10, left, right);
+ __ B(vs, &stub_call);
+ __ Mov(result, x10);
+ break;
+ case Token::MUL: {
+ Label not_minus_zero, done;
+ __ Smulh(x10, left, right);
+ __ Cbnz(x10, &not_minus_zero);
+ __ Eor(x11, left, right);
+ __ Tbnz(x11, kXSignBit, &stub_call);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Mov(result, x10);
+ __ B(&done);
+ __ Bind(&not_minus_zero);
+ __ Cls(x11, x10);
+ __ Cmp(x11, kXRegSize - kSmiShift);
+ __ B(lt, &stub_call);
+ __ SmiTag(result, x10);
+ __ Bind(&done);
+ break;
+ }
+ case Token::BIT_OR:
+ __ Orr(result, left, right);
+ break;
+ case Token::BIT_AND:
+ __ And(result, left, right);
+ break;
+ case Token::BIT_XOR:
+ __ Eor(result, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode) {
+ __ Pop(x1);
+ BinaryOpICStub stub(op, mode);
+ JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
+ {
+ Assembler::BlockConstPoolScope scope(masm_);
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+ // Invalid left-hand sides are rewritten to have a 'throw
+ // ReferenceError' on the left-hand side.
+ if (!expr->IsValidLeftHandSide()) {
+ VisitForEffect(expr);
+ return;
+ }
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->AsProperty();
+ if (prop != NULL) {
+ assign_type = (prop->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ Push(x0); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ // TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
+ // this copy.
+ __ Mov(x1, x0);
+ __ Pop(x0); // Restore value.
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ CallStoreIC();
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ Push(x0); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Mov(x1, x0);
+ __ Pop(x2, x0);
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic);
+ break;
+ }
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ Str(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ Mov(x10, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ x1, offset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, LanguageMode mode) {
+ __ Mov(x11, Operand(name));
+ __ Mov(x10, Operand(Smi::FromInt(mode)));
+ // jssp[0] : mode.
+ // jssp[8] : name.
+ // jssp[16] : context.
+ // jssp[24] : value.
+ __ Push(x0, cp, x11, x10);
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Token::Value op) {
+ ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
+ if (var->IsUnallocated()) {
+ // Global var, const, or let.
+ __ Mov(x2, Operand(var->name()));
+ __ Ldr(x1, GlobalObjectMemOperand());
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST) {
+ // Const initializers need a write barrier.
+ ASSERT(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ Push(x0);
+ __ Mov(x0, Operand(var->name()));
+ __ Push(cp, x0); // Context and name.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ Bind(&skip);
+ }
+
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
+ // Non-initializing assignment to let variable needs a write barrier.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), language_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
+ __ Mov(x10, Operand(var->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ // Perform the assignment.
+ __ Bind(&assign);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), language_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ MemOperand location = VarOperand(var, x1);
+ if (FLAG_debug_code && op == Token::INIT_LET) {
+ __ Ldr(x10, location);
+ __ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization);
+ }
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+ }
+ // Non-initializing assignments to consts are ignored.
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment");
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(x1);
+
+ CallStoreIC(expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
+ // Assignment to a property, using a keyed store IC.
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ // TODO(all): Could we pass this in registers rather than on the stack?
+ __ Pop(x1, x2); // Key and object holding the property.
+
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ VisitForAccumulatorValue(expr->obj());
+ EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(x0);
+ } else {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Pop(x1);
+ EmitKeyedPropertyLoad(expr);
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ TypeFeedbackId ast_id) {
+ ic_total_count_++;
+ // All calls must have a predictable size in full-codegen code to ensure that
+ // the debugger can patch them correctly.
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithIC(Call* expr) {
+ ASM_LOCATION("EmitCallWithIC");
+
+ Expression* callee = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ CallFunctionFlags flags;
+ // Get the target function.
+ if (callee->IsVariableProxy()) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a classic mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ flags = NO_CALL_FUNCTION_FLAGS;
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ Peek(x0, 0);
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+ flags = CALL_AS_METHOD;
+ }
+
+ // Load the arguments.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, flags);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ RecordJSReturnSite(expr);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+ Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ Expression* callee = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ Peek(x1, 0);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, CALL_AS_METHOD);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+
+ Handle<Object> uninitialized =
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ LoadObject(x2, FeedbackVector());
+ __ Mov(x3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
+
+ // Record call targets in unoptimized code.
+ CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
+ __ Peek(x1, (arg_count + 1) * kXRegSizeInBytes);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, x0);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+ ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval");
+ // Prepare to push a copy of the first argument or undefined if it doesn't
+ // exist.
+ if (arg_count > 0) {
+ __ Peek(x10, arg_count * kXRegSizeInBytes);
+ } else {
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ }
+
+ // Prepare to push the receiver of the enclosing function.
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
+
+ // Push.
+ __ Push(x10, x11);
+
+ // Prepare to push the language mode.
+ __ Mov(x10, Operand(Smi::FromInt(language_mode())));
+ // Prepare to push the start position of the scope the calls resides in.
+ __ Mov(x11, Operand(Smi::FromInt(scope()->start_position())));
+
+ // Push.
+ __ Push(x10, x11);
+
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* callee = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ {
+ PreservePositionScope pos_scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10); // Reserved receiver slot.
+
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ Peek(x10, (arg_count + 1) * kPointerSize);
+ __ Push(x10);
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // The runtime call returns a pair of values in x0 (function) and
+ // x1 (receiver). Touch up the stack with the right values.
+ __ PokePair(x1, x0, arg_count * kPointerSize);
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+
+ // Call the evaluated function.
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ Peek(x1, (arg_count + 1) * kXRegSizeInBytes);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, x0);
+
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
+ // Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
+ Label slow, done;
+
+ { PreservePositionScope scope(masm()->positions_recorder());
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+ }
+
+ __ Bind(&slow);
+ // Call the runtime to find the function to call (returned in x0)
+ // and the object holding it (returned in x1).
+ __ Push(context_register());
+ __ Mov(x10, Operand(proxy->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ Push(x0, x1); // Receiver, function.
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ B(&call);
+ __ Bind(&done);
+ // Push function.
+ __ Push(x0);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the undefined to the call function stub.
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
+ __ Push(x1);
+ __ Bind(&call);
+ }
+
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot.
+ EmitCallWithStub(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (property->key()->IsPropertyName()) {
+ EmitCallWithIC(expr);
+ } else {
+ EmitKeyedCallWithIC(expr, property->key());
+ }
+
+ } else {
+ ASSERT(call_type == Call::OTHER_CALL);
+ // Call to an arbitrary expression not handled specially above.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ }
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
+ __ Push(x1);
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ ASSERT(expr->return_is_recorded_);
+#endif
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForStackValue(expr->expression());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into x1 and x0.
+ __ Mov(x0, arg_count);
+ __ Peek(x1, arg_count * kXRegSizeInBytes);
+
+ // Record call targets in unoptimized code.
+ Handle<Object> uninitialized =
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ __ LoadObject(x2, FeedbackVector());
+ __ Mov(x3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
+
+ CallConstructStub stub(RECORD_CALL_TARGET);
+ __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestAndSplit(x0, kSmiTagMask, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true,
+ if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ Tbnz(x11, Map::kIsUndetectable, if_false);
+ __ Ldrb(x12, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Cmp(x12, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(lt, if_false);
+ __ Cmp(x12, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(le, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ge, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ Tst(x11, 1 << Map::kIsUndetectable);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ne, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false, skip_lookup;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Register object = x0;
+ __ AssertNotSmi(object);
+
+ Register map = x10;
+ Register bitfield2 = x11;
+ __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Ldrb(bitfield2, FieldMemOperand(map, Map::kBitField2Offset));
+ __ Tbnz(bitfield2, Map::kStringWrapperSafeForDefaultValueOf, &skip_lookup);
+
+ // Check for fast case object. Generate false result for slow case object.
+ Register props = x12;
+ Register props_map = x12;
+ Register hash_table_map = x13;
+ __ Ldr(props, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ Ldr(props_map, FieldMemOperand(props, HeapObject::kMapOffset));
+ __ LoadRoot(hash_table_map, Heap::kHashTableMapRootIndex);
+ __ Cmp(props_map, hash_table_map);
+ __ B(eq, if_false);
+
+ // Look for valueOf name in the descriptor array, and indicate false if found.
+ // Since we omit an enumeration index check, if it is added via a transition
+ // that shares its descriptor array, this is a false positive.
+ Label loop, done;
+
+ // Skip loop if no descriptors are valid.
+ Register descriptors = x12;
+ Register descriptors_length = x13;
+ __ NumberOfOwnDescriptors(descriptors_length, map);
+ __ Cbz(descriptors_length, &done);
+
+ __ LoadInstanceDescriptors(map, descriptors);
+
+ // Calculate the end of the descriptor array.
+ Register descriptors_end = x14;
+ __ Mov(x15, DescriptorArray::kDescriptorSize);
+ __ Mul(descriptors_length, descriptors_length, x15);
+ // Calculate location of the first key name.
+ __ Add(descriptors, descriptors,
+ DescriptorArray::kFirstOffset - kHeapObjectTag);
+ // Calculate the end of the descriptor array.
+ __ Add(descriptors_end, descriptors,
+ Operand(descriptors_length, LSL, kPointerSizeLog2));
+
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // string "valueOf" the result is false.
+ Register valueof_string = x1;
+ int descriptor_size = DescriptorArray::kDescriptorSize * kPointerSize;
+ __ Mov(valueof_string, Operand(isolate()->factory()->value_of_string()));
+ __ Bind(&loop);
+ __ Ldr(x15, MemOperand(descriptors, descriptor_size, PostIndex));
+ __ Cmp(x15, valueof_string);
+ __ B(eq, if_false);
+ __ Cmp(descriptors, descriptors_end);
+ __ B(ne, &loop);
+
+ __ Bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ Ldrb(x2, FieldMemOperand(map, Map::kBitField2Offset));
+ __ Orr(x2, x2, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ Strb(x2, FieldMemOperand(map, Map::kBitField2Offset));
+
+ __ Bind(&skip_lookup);
+
+ // If a valueOf property is not found on the object check that its prototype
+ // is the unmodified String prototype. If not result is false.
+ Register prototype = x1;
+ Register global_idx = x2;
+ Register native_context = x2;
+ Register string_proto = x3;
+ Register proto_map = x4;
+ __ Ldr(prototype, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ JumpIfSmi(prototype, if_false);
+ __ Ldr(proto_map, FieldMemOperand(prototype, HeapObject::kMapOffset));
+ __ Ldr(global_idx, GlobalObjectMemOperand());
+ __ Ldr(native_context,
+ FieldMemOperand(global_idx, GlobalObject::kNativeContextOffset));
+ __ Ldr(string_proto,
+ ContextMemOperand(native_context,
+ Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ Cmp(proto_map, string_proto);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_FUNCTION_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Only a HeapNumber can be -0.0, so return false if we have something else.
+ __ CheckMap(x0, x1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+
+ // Test the bit pattern.
+ __ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
+ __ Cmp(x10, 1); // Set V on 0x8000000000000000.
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(vs, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_REGEXP_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Get the frame pointer for the calling frame.
+ __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
+ __ Cmp(x1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(ne, &check_frame_marker);
+ __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ Bind(&check_frame_marker);
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+ __ Cmp(x1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ Pop(x1);
+ __ Cmp(x0, x1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in x1.
+ VisitForAccumulatorValue(args->at(0));
+ __ Mov(x1, x0);
+ __ Mov(x0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ Label exit;
+ // Get the number of formal parameters.
+ __ Mov(x0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ Ldr(x12, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x13, MemOperand(x12, StandardFrameConstants::kContextOffset));
+ __ Cmp(x13, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(ne, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ Ldr(x0, MemOperand(x12, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ Bind(&exit);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitClassOf");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is a smi, we return null.
+ __ JumpIfSmi(x0, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ // x10: object's map.
+ // x11: object's type.
+ __ B(lt, &null);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ B(eq, &function);
+
+ __ Cmp(x11, LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ B(eq, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
+ __ Ldr(x12, FieldMemOperand(x10, Map::kConstructorOffset));
+ __ JumpIfNotObjectType(x12, x13, x14, JS_FUNCTION_TYPE,
+ &non_function_constructor);
+
+ // x12 now contains the constructor function. Grab the
+ // instance class name from there.
+ __ Ldr(x13, FieldMemOperand(x12, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x0,
+ FieldMemOperand(x13, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ B(&done);
+
+ // Functions have class 'Function'.
+ __ Bind(&function);
+ __ LoadRoot(x0, Heap::kfunction_class_stringRootIndex);
+ __ B(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ Bind(&non_function_constructor);
+ __ LoadRoot(x0, Heap::kObject_stringRootIndex);
+ __ B(&done);
+
+ // Non-JS objects have class null.
+ __ Bind(&null);
+ __ LoadRoot(x0, Heap::kNullValueRootIndex);
+
+ // All done.
+ __ Bind(&done);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(args->length(), 3);
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallRuntime(Runtime::kLog, 2);
+ }
+
+ // Finally, we're expected to leave a value on the top of the stack.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 4);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitValueOf");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(x0, &done);
+ // If the object is not a value type, return the object.
+ __ JumpIfNotObjectType(x0, x10, x11, JS_VALUE_TYPE, &done);
+ __ Ldr(x0, FieldMemOperand(x0, JSValue::kValueOffset));
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done, not_date_object;
+ Register object = x0;
+ Register result = x0;
+ Register stamp_addr = x10;
+ Register stamp_cache = x11;
+
+ __ JumpIfSmi(object, &not_date_object);
+ __ JumpIfNotObjectType(object, x10, x10, JS_DATE_TYPE, &not_date_object);
+
+ if (index->value() == 0) {
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ B(&done);
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ Mov(x10, Operand(stamp));
+ __ Ldr(stamp_addr, MemOperand(x10));
+ __ Ldr(stamp_cache, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Cmp(stamp_addr, stamp_cache);
+ __ B(ne, &runtime);
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ B(&done);
+ }
+
+ __ Bind(&runtime);
+ __ Mov(x1, Operand(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ B(&done);
+ }
+
+ __ Bind(&not_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = x0;
+ Register index = x1;
+ Register value = x2;
+ Register scratch = x10;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(value, index);
+
+ if (FLAG_debug_code) {
+ __ AssertSmi(value, kNonSmiValue);
+ __ AssertSmi(index, kNonSmiIndex);
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
+ one_byte_seq_type);
+ }
+
+ __ Add(scratch, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+ __ Strb(value, MemOperand(scratch, index));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = x0;
+ Register index = x1;
+ Register value = x2;
+ Register scratch = x10;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(value, index);
+
+ if (FLAG_debug_code) {
+ __ AssertSmi(value, kNonSmiValue);
+ __ AssertSmi(index, kNonSmiIndex);
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
+ two_byte_seq_type);
+ }
+
+ __ Add(scratch, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+ __ Strh(value, MemOperand(scratch, index, LSL, 1));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+ // Load the arguments on the stack and call the MathPow stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
+ __ Pop(x1);
+ // x0 = value.
+ // x1 = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ JumpIfSmi(x1, &done);
+
+ // If the object is not a value type, return the value.
+ __ JumpIfNotObjectType(x1, x10, x11, JS_VALUE_TYPE, &done);
+
+ // Store the value.
+ __ Str(x0, FieldMemOperand(x1, JSValue::kValueOffset));
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ Mov(x10, x0);
+ __ RecordWriteField(
+ x1, JSValue::kValueOffset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument into x0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+
+ NumberToStringStub stub;
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ Register code = x0;
+ Register result = x1;
+
+ StringCharFromCodeGenerator generator(code, result);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = x1;
+ Register index = x0;
+ Register result = x3;
+
+ __ Pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ __ Bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = x1;
+ Register index = x0;
+ Register result = x0;
+
+ __ Pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ x3,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ __ Bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ B(&done);
+
+ __ Bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger conversion.
+ __ Mov(result, Operand(Smi::FromInt(0)));
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitStringAdd");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ __ Pop(x1);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringCompareStub stub;
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
+ // Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallRuntime(Runtime::kMath_log, 1);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
+ // Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallRuntime(Runtime::kMath_sqrt, 1);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ VisitForAccumulatorValue(args->last()); // Function.
+
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(x0, &runtime);
+ __ JumpIfNotObjectType(x0, x1, x1, JS_FUNCTION_TYPE, &runtime);
+
+ // InvokeFunction requires the function in x1. Move it in there.
+ __ Mov(x1, x0);
+ ParameterCount count(arg_count);
+ __ InvokeFunction(x1, count, CALL_FUNCTION, NullCallWrapper());
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ B(&done);
+
+ __ Bind(&runtime);
+ __ Push(x0);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ Bind(&done);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+ RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(2));
+ __ Pop(x1, x2);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ isolate()->native_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort(kAttemptToUseUndefinedCache);
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(x0);
+ return;
+ }
+
+ VisitForAccumulatorValue(args->at(1));
+
+ Register key = x0;
+ Register cache = x1;
+ __ Ldr(cache, GlobalObjectMemOperand());
+ __ Ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
+ __ Ldr(cache, ContextMemOperand(cache,
+ Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ Ldr(cache,
+ FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+ Label done;
+ __ Ldrsw(x2, UntagSmiFieldMemOperand(cache,
+ JSFunctionResultCache::kFingerOffset));
+ __ Add(x3, cache, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x3, x3, Operand(x2, LSL, kPointerSizeLog2));
+
+ // Load the key and data from the cache.
+ __ Ldp(x2, x3, MemOperand(x3));
+
+ __ Cmp(key, x2);
+ __ CmovX(x0, x3, eq);
+ __ B(eq, &done);
+
+ // Call runtime to perform the lookup.
+ __ Push(cache, key);
+ __ CallRuntime(Runtime::kGetFromCache, 2);
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ Tst(x10, String::kContainsCachedArrayIndexMask);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(x0);
+
+ __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ IndexFromHash(x10, x0);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin");
+
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(0));
+
+ Register array = x0;
+ Register result = x0;
+ Register elements = x1;
+ Register element = x2;
+ Register separator = x3;
+ Register array_length = x4;
+ Register result_pos = x5;
+ Register map = x6;
+ Register string_length = x10;
+ Register elements_end = x11;
+ Register string = x12;
+ Register scratch1 = x13;
+ Register scratch2 = x14;
+ Register scratch3 = x7;
+ Register separator_length = x15;
+
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ empty_separator_loop, one_char_separator_loop,
+ one_char_separator_loop_entry, long_separator_loop;
+
+ // The separator operand is on the stack.
+ __ Pop(separator);
+
+ // Check that the array is a JSArray.
+ __ JumpIfSmi(array, &bailout);
+ __ JumpIfNotObjectType(array, map, scratch1, JS_ARRAY_TYPE, &bailout);
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(map, scratch1, &bailout);
+
+ // If the array has length zero, return the empty string.
+ // Load and untag the length of the array.
+ // It is an unsigned value, so we can skip sign extension.
+ // We assume little endianness.
+ __ Ldrsw(array_length,
+ UntagSmiFieldMemOperand(array, JSArray::kLengthOffset));
+ __ Cbnz(array_length, &non_trivial_array);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ B(&done);
+
+ __ Bind(&non_trivial_array);
+ // Get the FixedArray containing array's elements.
+ __ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths.
+ __ Mov(string_length, 0);
+ __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ // Loop condition: while (element < elements_end).
+ // Live values in registers:
+ // elements: Fixed array of strings.
+ // array_length: Length of the fixed array of strings (not smi)
+ // separator: Separator string
+ // string_length: Accumulated sum of string lengths (not smi).
+ // element: Current array element.
+ // elements_end: Array end.
+ if (FLAG_debug_code) {
+ __ Cmp(array_length, Operand(0));
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ }
+ __ Bind(&loop);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ JumpIfSmi(string, &bailout);
+ __ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ Ldrsw(scratch1,
+ UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ Adds(string_length, string_length, scratch1);
+ __ B(vs, &bailout);
+ __ Cmp(element, elements_end);
+ __ B(lt, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ Cmp(array_length, 1);
+ __ B(ne, &not_size_one_array);
+ __ Ldr(result, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ B(&done);
+
+ __ Bind(&not_size_one_array);
+
+ // Live values in registers:
+ // separator: Separator string
+ // array_length: Length of the array (not smi).
+ // string_length: Sum of string lengths (not smi).
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ JumpIfSmi(separator, &bailout);
+ __ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+ // Add (separator length times array_length) - separator length to the
+ // string_length to get the length of the result string.
+ // Load the separator length as untagged.
+ // We assume little endianness, and that the length is positive.
+ __ Ldrsw(separator_length,
+ UntagSmiFieldMemOperand(separator,
+ SeqOneByteString::kLengthOffset));
+ __ Sub(string_length, string_length, separator_length);
+ __ Umaddl(string_length, array_length.W(), separator_length.W(),
+ string_length);
+
+ // Get first element in the array.
+ __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ // Live values in registers:
+ // element: First array element
+ // separator: Separator string
+ // string_length: Length of result string (not smi)
+ // array_length: Length of the array (not smi).
+ __ AllocateAsciiString(result, string_length, scratch1, scratch2, scratch3,
+ &bailout);
+
+ // Prepare for looping. Set up elements_end to end of the array. Set
+ // result_pos to the position of the result where to write the first
+ // character.
+ // TODO(all): useless unless AllocateAsciiString trashes the register.
+ __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ __ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ // Check the length of the separator.
+ __ Cmp(separator_length, 1);
+ __ B(eq, &one_char_separator);
+ __ B(gt, &long_separator);
+
+ // Empty separator case
+ __ Bind(&empty_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+
+ // Copy next array element to the result.
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &empty_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ // One-character separator case
+ __ Bind(&one_char_separator);
+ // Replace separator with its ASCII character value.
+ __ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ B(&one_char_separator_loop_entry);
+
+ __ Bind(&one_char_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Single separator ASCII char (in lower byte).
+
+ // Copy the separator character to the result.
+ __ Strb(separator, MemOperand(result_pos, 1, PostIndex));
+
+ // Copy next array element to the result.
+ __ Bind(&one_char_separator_loop_entry);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &one_char_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ // Long separator case (separator is more than one character). Entry is at the
+ // label long_separator below.
+ __ Bind(&long_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Separator string.
+
+ // Copy the separator to the result.
+ // TODO(all): hoist next two instructions.
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(separator, String::kLengthOffset));
+ __ Add(string, separator, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+
+ __ Bind(&long_separator);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &long_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ __ Bind(&bailout);
+ // Returning undefined will force slower code to handle it.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ Handle<String> name = expr->name();
+ if (name->length() > 0 && name->Get(0) == '_') {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
+ Comment cmnt(masm_, "[ CallRunTime");
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ if (expr->is_jsruntime()) {
+ // Push the builtins object as the receiver.
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x0, FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
+ __ Push(x0);
+
+ // Load the function from the receiver.
+ __ Mov(x2, Operand(name));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+ } else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* property = expr->expression()->AsProperty();
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+ if (property != NULL) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+ ? kNonStrictMode : kStrictMode;
+ __ Mov(x10, Operand(Smi::FromInt(strict_mode_flag)));
+ __ Push(x10);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(x0);
+ } else if (proxy != NULL) {
+ Variable* var = proxy->var();
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is allowed.
+ ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ if (var->IsUnallocated()) {
+ __ Ldr(x12, GlobalObjectMemOperand());
+ __ Mov(x11, Operand(var->name()));
+ __ Mov(x10, Operand(Smi::FromInt(kNonStrictMode)));
+ __ Push(x12, x11, x10);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(x0);
+ } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(var->is_this());
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ Mov(x2, Operand(var->name()));
+ __ Push(context_register(), x2);
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(x0);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ break;
+ }
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
+ } else {
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ // TODO(jbramley): This could be much more efficient using (for
+ // example) the CSEL instruction.
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+
+ __ Bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&done);
+ if (context()->IsStackValue()) {
+ __ Push(result_register());
+ }
+ }
+ break;
+ }
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ {
+ StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ CallRuntime(Runtime::kTypeof, 1);
+ context()->Plug(x0);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
+ // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+ // as the left-hand side.
+ if (!expr->expression()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->expression());
+ return;
+ }
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ Push(xzr);
+ }
+ if (assign_type == NAMED_PROPERTY) {
+ // Put the object both on the stack and in the accumulator.
+ VisitForAccumulatorValue(prop->obj());
+ __ Push(x0);
+ EmitNamedPropertyLoad(prop);
+ } else {
+ // KEYED_PROPERTY
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Peek(x1, 0);
+ __ Push(x0);
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ }
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(x0, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property we
+ // store the result under the receiver that is currently on top of the
+ // stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ Push(x0);
+ break;
+ case NAMED_PROPERTY:
+ __ Poke(x0, kPointerSize);
+ break;
+ case KEYED_PROPERTY:
+ __ Poke(x0, kPointerSize * 2);
+ break;
+ }
+ }
+ }
+
+ __ Adds(x0, x0, Operand(Smi::FromInt(count_value)));
+ __ B(vc, &done);
+ // Call stub. Undo operation first.
+ __ Sub(x0, x0, Operand(Smi::FromInt(count_value)));
+ __ B(&stub_call);
+ __ Bind(&slow);
+ }
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ Push(x0);
+ break;
+ case NAMED_PROPERTY:
+ __ Poke(x0, kXRegSizeInBytes);
+ break;
+ case KEYED_PROPERTY:
+ __ Poke(x0, 2 * kXRegSizeInBytes);
+ break;
+ }
+ }
+ }
+
+ __ Bind(&stub_call);
+ __ Mov(x1, x0);
+ __ Mov(x0, Operand(Smi::FromInt(count_value)));
+
+ // Record position before stub call.
+ SetSourcePosition(expr->position());
+
+ {
+ Assembler::BlockConstPoolScope scope(masm_);
+ BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ __ Bind(&done);
+
+ // Store the value returned in x0.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(x0);
+ }
+ // For all contexts except EffectConstant We have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(x1);
+ CallStoreIC(expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(x0);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ Pop(x1); // Key.
+ __ Pop(x2); // Receiver.
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(x0);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ ASSERT(!context()->IsEffect());
+ ASSERT(!context()->IsTest());
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->IsUnallocated()) {
+ Comment cmnt(masm_, "Global variable");
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(proxy->name()));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ CallLoadIC(NOT_CONTEXTUAL);
+ PrepareForBailout(expr, TOS_REG);
+ context()->Plug(x0);
+ } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+
+ __ Bind(&slow);
+ __ Mov(x0, Operand(proxy->name()));
+ __ Push(cp, x0);
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ PrepareForBailout(expr, TOS_REG);
+ __ Bind(&done);
+
+ context()->Plug(x0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitInDuplicateContext(expr);
+ }
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof");
+ Comment cmnt(masm_, "[ EmitLiteralCompareTypeof");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(sub_expr);
+ }
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ if (check->Equals(isolate()->heap()->number_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof number_string");
+ __ JumpIfSmi(x0, if_true);
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ CompareRoot(x0, Heap::kHeapNumberMapRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->string_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof string_string");
+ __ JumpIfSmi(x0, if_false);
+ // Check for undetectable objects => false.
+ __ JumpIfObjectType(x0, x0, x1, FIRST_NONSTRING_TYPE, if_false, ge);
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_true, if_false,
+ fall_through);
+ } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof symbol_string");
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x0, x1, SYMBOL_TYPE);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof boolean_string");
+ __ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true);
+ __ CompareRoot(x0, Heap::kFalseValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (FLAG_harmony_typeof &&
+ check->Equals(isolate()->heap()->null_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof null_string");
+ __ CompareRoot(x0, Heap::kNullValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
+ ASM_LOCATION(
+ "FullCodeGenerator::EmitLiteralCompareTypeof undefined_string");
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true);
+ __ JumpIfSmi(x0, if_false);
+ // Check for undetectable objects => true.
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true,
+ fall_through);
+ } else if (check->Equals(isolate()->heap()->function_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof function_string");
+ __ JumpIfSmi(x0, if_false);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ JumpIfObjectType(x0, x10, x11, JS_FUNCTION_TYPE, if_true);
+ __ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false,
+ fall_through);
+
+ } else if (check->Equals(isolate()->heap()->object_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
+ __ JumpIfSmi(x0, if_false);
+ if (!FLAG_harmony_typeof) {
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
+ }
+ // Check for JS objects => true.
+ Register map = x10;
+ __ JumpIfObjectType(x0, map, x11, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ if_false, lt);
+ __ CompareInstanceType(map, x11, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(gt, if_false);
+ // Check for undetectable objects => false.
+ __ Ldrb(x10, FieldMemOperand(map, Map::kBitFieldOffset));
+
+ __ TestAndSplit(x10, 1 << Map::kIsUndetectable, if_true, if_false,
+ fall_through);
+
+ } else {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof other");
+ if (if_false != fall_through) __ B(if_false);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
+
+ // Try to generate an optimized comparison with a literal value.
+ // TODO(jbramley): This only checks common values like NaN or undefined.
+ // Should it also handle A64 immediate operands?
+ if (TryLiteralCompare(expr)) {
+ return;
+ }
+
+ // Assign labels according to context()->PrepareTest.
+ Label materialize_true;
+ Label materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForStackValue(expr->right());
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ // The stub returns 0 for true.
+ __ CompareAndSplit(x0, 0, eq, if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cond = CompareIC::ComputeCondition(op);
+
+ // Pop the stack value.
+ __ Pop(x1);
+
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
+ __ Cmp(x1, x0);
+ Split(cond, if_true, if_false, NULL);
+ __ Bind(&slow_case);
+ }
+
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ CompareAndSplit(x0, 0, cond, if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareNil");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ if (expr->op() == Token::EQ_STRICT) {
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ CompareRoot(x0, nil_value);
+ Split(eq, if_true, if_false, fall_through);
+ } else {
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ CompareAndSplit(x0, 0, ne, if_true, if_false, fall_through);
+ }
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ // TODO(jbramley): Tidy this up once the merge is done, using named registers
+ // and suchlike. The implementation changes a little by bleeding_edge so I
+ // don't want to spend too much time on it now.
+
+ switch (expr->yield_kind()) {
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ Push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ B(&suspend);
+
+ // TODO(jbramley): This label is bound here because the following code
+ // looks at its pos(). Is it possible to do something more efficient here,
+ // perhaps using Adr?
+ __ Bind(&continuation);
+ __ B(&resume);
+
+ __ Bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
+ __ Mov(x1, Operand(Smi::FromInt(continuation.pos())));
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
+ __ Mov(x1, cp);
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
+ __ Cmp(__ StackPointer(), x1);
+ __ B(eq, &post_runtime);
+ __ Push(x0); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Bind(&post_runtime);
+ __ Pop(result_register());
+ EmitReturnSequence();
+
+ __ Bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ Mov(x1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
+ __ Str(x1, FieldMemOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
+ // Initial send value is undefined.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ B(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ Bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(x2, Heap::kthrow_stringRootIndex); // "throw"
+ __ Peek(x3, 1 * kPointerSize); // iter
+ __ Push(x2, x3, x0); // "throw", iter, except
+ __ B(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ Bind(&l_try);
+ __ Pop(x0); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ Push(x0); // result
+ __ B(&l_suspend);
+
+ // TODO(jbramley): This label is bound here because the following code
+ // looks at its pos(). Is it possible to do something more efficient here,
+ // perhaps using Adr?
+ __ Bind(&l_continuation);
+ __ B(&l_resume);
+
+ __ Bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ Peek(x0, generator_object_depth);
+ __ Push(x0); // g
+ ASSERT((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
+ __ Mov(x1, Operand(Smi::FromInt(l_continuation.pos())));
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
+ __ Mov(x1, cp);
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Pop(x0); // result
+ EmitReturnSequence();
+ __ Bind(&l_resume); // received in x0
+ __ PopTryHandler();
+
+ // receiver = iter; f = 'next'; arg = received;
+ __ Bind(&l_next);
+ __ LoadRoot(x2, Heap::knext_stringRootIndex); // "next"
+ __ Peek(x3, 1 * kPointerSize); // iter
+ __ Push(x2, x3, x0); // "next", iter, received
+
+ // result = receiver[f](arg);
+ __ Bind(&l_call);
+ __ Peek(x1, 1 * kPointerSize);
+ __ Peek(x0, 2 * kPointerSize);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ Mov(x1, x0);
+ __ Poke(x1, 2 * kPointerSize);
+ CallFunctionStub stub(1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
+
+ // if (!result.done) goto l_try;
+ __ Bind(&l_loop);
+ __ Push(x0); // save result
+ __ LoadRoot(x2, Heap::kdone_stringRootIndex); // "done"
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in x0
+ // The ToBooleanStub argument (result.done) is in x0.
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ Cbz(x0, &l_try);
+
+ // result.value
+ __ Pop(x0); // result
+ __ LoadRoot(x2, Heap::kvalue_stringRootIndex); // "value"
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in x0
+ context()->DropAndPlug(2, x0); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume");
+ Register value_reg = x0;
+ Register generator_object = x1;
+ Register the_hole = x2;
+ Register operand_stack_size = w3;
+ Register function = x4;
+
+ // The value stays in x0, and is ultimately read by the resumed generator, as
+ // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed. r1
+ // will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ Pop(generator_object);
+
+ // Check generator state.
+ Label wrong_state, closed_state, done;
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ CompareAndBranch(x10, Operand(Smi::FromInt(0)), eq, &closed_state);
+ __ CompareAndBranch(x10, Operand(Smi::FromInt(0)), lt, &wrong_state);
+
+ // Load suspended function and context.
+ __ Ldr(cp, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContextOffset));
+ __ Ldr(function, FieldMemOperand(generator_object,
+ JSGeneratorObject::kFunctionOffset));
+
+ // Load receiver and store as the first argument.
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kReceiverOffset));
+ __ Push(x10);
+
+ // Push holes for the rest of the arguments to the generator function.
+ __ Ldr(x10, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+
+ // The number of arguments is stored as an int32_t, and -1 is a marker
+ // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
+ // extension to correctly handle it. However, in this case, we operate on
+ // 32-bit W registers, so extension isn't required.
+ __ Ldr(w10, FieldMemOperand(x10,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+
+ // TODO(jbramley): Write a variant of PushMultipleTimes which takes a register
+ // instead of a constant count, and use it to replace this loop.
+ Label push_argument_holes, push_frame;
+ __ Bind(&push_argument_holes);
+ __ Subs(w10, w10, 1);
+ __ B(mi, &push_frame);
+ __ Push(the_hole);
+ __ B(&push_argument_holes);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ Bind(&push_frame);
+ __ Bl(&resume_frame);
+ __ B(&done);
+
+ __ Bind(&resume_frame);
+ __ Push(lr, // Return address.
+ fp, // Caller's frame pointer.
+ cp, // Callee's context.
+ function); // Callee's JS Function.
+ __ Add(fp, __ StackPointer(), kPointerSize * 2);
+
+ // Load and untag the operand stack size.
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kOperandStackOffset));
+ __ Ldr(operand_stack_size,
+ UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ Cbnz(operand_stack_size, &slow_resume);
+ __ Ldr(x10, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ __ Ldrsw(x11,
+ UntagSmiFieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ __ Add(x10, x10, x11);
+ __ Mov(x12, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ Str(x12, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ __ Br(x10);
+
+ __ Bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ // TODO(jbramley): Write a variant of PushMultipleTimes which takes a register
+ // instead of a constant count, and use it to replace this loop.
+ Label push_operand_holes, call_resume;
+ __ Bind(&push_operand_holes);
+ __ Subs(operand_stack_size, operand_stack_size, 1);
+ __ B(mi, &call_resume);
+ __ Push(the_hole);
+ __ B(&push_operand_holes);
+
+ __ Bind(&call_resume);
+ __ Mov(x10, Operand(Smi::FromInt(resume_mode)));
+ __ Push(generator_object, result_register(), x10);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ Unreachable();
+
+ // Reach here when generator is closed.
+ __ Bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ Push(value_reg);
+ __ CallRuntime(Runtime::kThrow, 1);
+ }
+ __ B(&done);
+
+ // Throw error if we attempt to operate on a running generator.
+ __ Bind(&wrong_state);
+ __ Push(generator_object);
+ __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+
+ __ Bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ // Allocate and populate an object with this form: { value: VAL, done: DONE }
+
+ Register result = x0;
+ __ Allocate(map->instance_size(), result, x10, x11, &gc_required, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Ldr(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ __ Bind(&allocated);
+ Register map_reg = x1;
+ Register result_value = x2;
+ Register boolean_done = x3;
+ Register empty_fixed_array = x4;
+ __ Mov(map_reg, Operand(map));
+ __ Pop(result_value);
+ __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
+ __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ // TODO(jbramley): Use Stp if possible.
+ __ Str(map_reg, FieldMemOperand(result, HeapObject::kMapOffset));
+ __ Str(empty_fixed_array,
+ FieldMemOperand(result, JSObject::kPropertiesOffset));
+ __ Str(empty_fixed_array, FieldMemOperand(result, JSObject::kElementsOffset));
+ __ Str(result_value,
+ FieldMemOperand(result,
+ JSGeneratorObject::kResultValuePropertyOffset));
+ __ Str(boolean_done,
+ FieldMemOperand(result,
+ JSGeneratorObject::kResultDonePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(result, JSGeneratorObject::kResultValuePropertyOffset,
+ x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+}
+
+
+// TODO(all): I don't like this method.
+// It seems to me that in too many places x0 is used in place of this.
+// Also, this function is not suitable for all places where x0 should be
+// abstracted (eg. when used as an argument). But some places assume that the
+// first argument register is x0, and use this function instead.
+// Considering that most of the register allocation is hard-coded in the
+// FullCodeGen, that it is unlikely we will need to change it extensively, and
+// that abstracting the allocation through functions would not yield any
+// performance benefit, I think the existence of this function is debatable.
+Register FullCodeGenerator::result_register() {
+ return x0;
+}
+
+
+Register FullCodeGenerator::context_register() {
+ return cp;
+}
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
+ __ Str(value, MemOperand(fp, frame_offset));
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ Ldr(dst, ContextMemOperand(cp, context_index));
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope() ||
+ declaration_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ ASSERT(kSmiTag == 0);
+ __ Push(xzr);
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
+ __ Push(x10);
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(x10);
+ }
+}
+
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
+ ASSERT(!result_register().is(x10));
+ // Preserve the result register while executing finally block.
+ // Also cook the return address in lr to the stack (smi encoded Code* delta).
+ __ Sub(x10, lr, Operand(masm_->CodeObject()));
+ __ SmiTag(x10);
+ __ Push(result_register(), x10);
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Mov(x10, Operand(pending_message_obj));
+ __ Ldr(x10, MemOperand(x10));
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Mov(x11, Operand(has_pending_message));
+ __ Ldr(x11, MemOperand(x11));
+ __ SmiTag(x11);
+
+ __ Push(x10, x11);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Mov(x10, Operand(pending_message_script));
+ __ Ldr(x10, MemOperand(x10));
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock");
+ ASSERT(!result_register().is(x10));
+
+ // Restore pending message from stack.
+ __ Pop(x10, x11, x12);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Mov(x13, Operand(pending_message_script));
+ __ Str(x10, MemOperand(x13));
+
+ __ SmiUntag(x11);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Mov(x13, Operand(has_pending_message));
+ __ Str(x11, MemOperand(x13));
+
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Mov(x13, Operand(pending_message_obj));
+ __ Str(x12, MemOperand(x13));
+
+ // Restore result register and cooked return address from the stack.
+ __ Pop(x10, result_register());
+
+ // Uncook the return address (see EnterFinallyBlock).
+ __ SmiUntag(x10);
+ __ Add(x11, x10, Operand(masm_->CodeObject()));
+ __ Br(x11);
+}
+
+
+#undef __
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ // Turn the jump into a nop.
+ Address branch_address = pc - 3 * kInstructionSize;
+ PatchingAssembler patcher(branch_address, 1);
+
+ switch (target_state) {
+ case INTERRUPT:
+ // <decrement profiling counter>
+ // .. .. .. .. b.pl ok
+ // .. .. .. .. ldr x16, pc+<interrupt stub address>
+ // .. .. .. .. blr x16
+ // ... more instructions.
+ // ok-label
+ // Jump offset is 6 instructions.
+ ASSERT(Instruction::Cast(branch_address)
+ ->IsNop(Assembler::INTERRUPT_CODE_NOP));
+ patcher.b(6, pl);
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // <decrement profiling counter>
+ // .. .. .. .. mov x0, x0 (NOP)
+ // .. .. .. .. ldr x16, pc+<on-stack replacement address>
+ // .. .. .. .. blr x16
+ ASSERT(Instruction::Cast(branch_address)->IsCondBranchImm());
+ ASSERT(Instruction::Cast(branch_address)->ImmPCOffset() ==
+ 6 * kInstructionSize);
+ patcher.nop(Assembler::INTERRUPT_CODE_NOP);
+ break;
+ }
+
+ // Replace the call address.
+ Instruction* load = Instruction::Cast(pc)->preceding(2);
+ Address interrupt_address_pointer =
+ reinterpret_cast<Address>(load) + load->ImmPCOffset();
+ ASSERT((Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OnStackReplacement()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->InterruptCheck()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OsrAfterStackCheck()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OnStackReplacement()
+ ->entry())));
+ Memory::uint64_at(interrupt_address_pointer) =
+ reinterpret_cast<uint64_t>(replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, reinterpret_cast<Address>(load), replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ // TODO(jbramley): There should be some extra assertions here (as in the ARM
+ // back-end), but this function is gone in bleeding_edge so it might not
+ // matter anyway.
+ Instruction* jump_or_nop = Instruction::Cast(pc)->preceding(3);
+
+ if (jump_or_nop->IsNop(Assembler::INTERRUPT_CODE_NOP)) {
+ Instruction* load = Instruction::Cast(pc)->preceding(2);
+ uint64_t entry = Memory::uint64_at(reinterpret_cast<Address>(load) +
+ load->ImmPCOffset());
+ if (entry == reinterpret_cast<uint64_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) {
+ return ON_STACK_REPLACEMENT;
+ } else if (entry == reinterpret_cast<uint64_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry())) {
+ return OSR_AFTER_STACK_CHECK;
+ } else {
+ UNREACHABLE();
+ }
+ }
+
+ return INTERRUPT;
+}
+
+
+#define __ ACCESS_MASM(masm())
+
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+ int* stack_depth,
+ int* context_length) {
+ ASM_LOCATION("FullCodeGenerator::TryFinally::Exit");
+ // The macros used here must preserve the result register.
+
+ // Because the handler block contains the context of the finally
+ // code, we can restore it directly from there for the finally code
+ // rather than iteratively unwinding contexts via their previous
+ // links.
+ __ Drop(*stack_depth); // Down to the handler block.
+ if (*context_length > 0) {
+ // Restore the context to its dedicated register and the stack.
+ __ Peek(cp, StackHandlerConstants::kContextOffset);
+ __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ PopTryHandler();
+ __ Bl(finally_entry_);
+
+ *stack_depth = 0;
+ *context_length = 0;
+ return previous_;
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/ic-a64.cc b/deps/v8/src/a64/ic-a64.cc
new file mode 100644
index 0000000000..93d7857b05
--- /dev/null
+++ b/deps/v8/src/a64/ic-a64.cc
@@ -0,0 +1,1413 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "a64/assembler-a64.h"
+#include "code-stubs.h"
+#include "codegen.h"
+#include "disasm.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// "type" holds an instance type on entry and is not clobbered.
+// Generated code branch on "global_object" if type is any kind of global
+// JS object.
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
+ __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
+ __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
+ __ B(eq, global_object);
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+//
+// "receiver" holds the receiver on entry and is unchanged.
+// "elements" holds the property dictionary on fall through.
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register scratch0,
+ Register scratch1,
+ Label* miss) {
+ ASSERT(!AreAliased(receiver, elements, scratch0, scratch1));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the receiver is a valid JS object.
+ // Let t be the object instance type, we want:
+ // FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE.
+ // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only
+ // check the lower bound.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+
+ __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE,
+ miss, lt);
+
+ // scratch0 now contains the map of the receiver and scratch1 the object type.
+ Register map = scratch0;
+ Register type = scratch1;
+
+ // Check if the receiver is a global JS object.
+ GenerateGlobalInstanceTypeCheck(masm, type, miss);
+
+ // Check that the object does not require access checks.
+ __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss);
+ __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss);
+
+ // Check that the properties dictionary is valid.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss);
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done.
+// The scratch registers need to be different from elements, name and result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+ ASSERT(!AreAliased(result, scratch1, scratch2));
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry check that the value is a normal property.
+ __ Bind(&done);
+
+ static const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ Tst(scratch1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
+ __ B(ne, miss);
+
+ // Get the value at the masked, scaled index and return.
+ __ Ldr(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store (never clobbered).
+//
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, value, scratch1, scratch2));
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ Bind(&done);
+
+ static const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ static const int kTypeAndReadOnlyMask =
+ PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY);
+ __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+ __ Tst(scratch1, kTypeAndReadOnlyMask);
+ __ B(ne, miss);
+
+ // Store the value at the masked, scaled index and return.
+ static const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
+ __ Str(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ Mov(scratch1, value);
+ __ RecordWrite(
+ elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object and return the map of the
+// receiver in 'map_scratch' if the receiver is not a SMI.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map_scratch,
+ Register scratch,
+ int interceptor_bit,
+ Label* slow) {
+ ASSERT(!AreAliased(map_scratch, scratch));
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
+ __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
+ __ Tbnz(scratch, interceptor_bit, slow);
+
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object, we enter the
+ // runtime system to make sure that indexing into string objects work
+ // as intended.
+ STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+ __ Cmp(scratch, JS_OBJECT_TYPE);
+ __ B(lt, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+//
+// receiver - holds the receiver on entry.
+// Unchanged unless 'result' is the same register.
+//
+// key - holds the smi key on entry.
+// Unchanged unless 'result' is the same register.
+//
+// elements - holds the elements of the receiver on exit.
+//
+// elements_map - holds the elements map on exit if the not_fast_array branch is
+// taken. Otherwise, this is used as a scratch register.
+//
+// result - holds the result on exit if the load succeeded.
+// Allowed to be the the same as 'receiver' or 'key'.
+// Unchanged on bailout so 'receiver' and 'key' can be safely
+// used by further computation.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register elements_map,
+ Register scratch2,
+ Register result,
+ Label* not_fast_array,
+ Label* slow) {
+ ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2));
+
+ // Check for fast array.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
+ not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
+
+ // The elements_map register is only used for the not_fast_array path, which
+ // was handled above. From this point onward it is a scratch register.
+ Register scratch1 = elements_map;
+
+ // Check that the key (index) is within bounds.
+ __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(key, scratch1);
+ __ B(hs, slow);
+
+ // Fast case: Do the load.
+ __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(scratch2, key);
+ __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
+
+ // Move the value to the result register.
+ // 'result' can alias with 'receiver' or 'key' but these two must be
+ // preserved if we jump to 'slow'.
+ __ Mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+// The map of the key is returned in 'map_scratch'.
+// If the jump to 'index_string' is done the hash of the key is left
+// in 'hash_scratch'.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map_scratch,
+ Register hash_scratch,
+ Label* index_string,
+ Label* not_unique) {
+ ASSERT(!AreAliased(key, map_scratch, hash_scratch));
+
+ // Is the key a name?
+ Label unique;
+ __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
+ not_unique, hi);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ B(eq, &unique);
+
+ // Is the string an array index with cached numeric value?
+ __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+ __ TestAndBranchIfAllClear(hash_scratch,
+ Name::kContainsCachedArrayIndexMask,
+ index_string);
+
+ // Is the string internalized? We know it's a string, so a single bit test is
+ // enough.
+ __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
+
+ __ Bind(&unique);
+ // Fall through if the key is a unique name.
+}
+
+
+// Neither 'object' nor 'key' are modified by this function.
+//
+// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
+// left with the object's elements map. Otherwise, it is used as a scratch
+// register.
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register map,
+ Register scratch1,
+ Register scratch2,
+ Label* unmapped_case,
+ Label* slow_case) {
+ ASSERT(!AreAliased(object, key, map, scratch1, scratch2));
+
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE,
+ slow_case, lt);
+
+ // Check that the key is a positive smi.
+ __ JumpIfNotSmi(key, slow_case);
+ __ Tbnz(key, kXSignBit, slow_case);
+
+ // Load the elements object and check its map.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup.
+ __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ Sub(scratch1, scratch1, Operand(Smi::FromInt(2)));
+ __ Cmp(key, scratch1);
+ __ B(hs, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ static const int offset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ Add(scratch1, map, offset);
+ __ SmiUntag(scratch2, key);
+ __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+ __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
+
+ // Load value from context and return it.
+ __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
+ __ SmiUntag(scratch1);
+ __ Add(scratch2, scratch2, Context::kHeaderSize - kHeapObjectTag);
+ return MemOperand(scratch2, scratch1, LSL, kPointerSizeLog2);
+}
+
+
+// The 'parameter_map' register must be loaded with the parameter map of the
+// arguments object and is overwritten.
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ ASSERT(!AreAliased(key, parameter_map, scratch));
+
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(
+ backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ Cmp(key, scratch);
+ __ B(hs, slow_case);
+
+ __ Add(backing_store,
+ backing_store,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(scratch, key);
+ return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, x0, x2, x3, x4, x5, x6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+ Label miss;
+
+ GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss);
+
+ // x1 now holds the property dictionary.
+ GenerateDictionaryLoad(masm, &miss, x1, x2, x0, x3, x4);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+ ASM_LOCATION("LoadIC::GenerateMiss");
+
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
+
+ // TODO(jbramley): Does the target actually expect an argument in x3, or is
+ // this inherited from ARM's push semantics?
+ __ Mov(x3, x0);
+ __ Push(x3, x2);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+
+ // TODO(jbramley): Does the target actually expect an argument in x3, or is
+ // this inherited from ARM's push semantics?
+ __ Mov(x3, x0);
+ __ Push(x3, x2);
+
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Register result = x0;
+ Register key = x0;
+ Register receiver = x1;
+ Label miss, unmapped;
+
+ Register map_scratch = x2;
+ MemOperand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
+ __ Ldr(result, mapped_location);
+ __ Ret();
+
+ __ Bind(&unmapped);
+ // Parameter map is left in map_scratch when a jump on unmapped is done.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
+ __ Ldr(x2, unmapped_location);
+ __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss);
+ // Move the result in x0. x0 must be preserved on miss.
+ __ Mov(result, x2);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateNonStrictArguments");
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -----------------------------------
+
+ Label slow, notin;
+
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register map = x3;
+
+ // These registers are used by GenerateMappedArgumentsLookup to build a
+ // MemOperand. They are live for as long as the MemOperand is live.
+ Register mapped1 = x4;
+ Register mapped2 = x5;
+
+ MemOperand mapped =
+ GenerateMappedArgumentsLookup(masm, receiver, key, map,
+ mapped1, mapped2,
+ &notin, &slow);
+ Operand mapped_offset = mapped.OffsetAsOperand();
+ __ Str(value, mapped);
+ __ Add(x10, mapped.base(), mapped_offset);
+ __ Mov(x11, value);
+ __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+
+ __ Bind(&notin);
+
+ // These registers are used by GenerateMappedArgumentsLookup to build a
+ // MemOperand. They are live for as long as the MemOperand is live.
+ Register unmapped1 = map; // This is assumed to alias 'map'.
+ Register unmapped2 = x4;
+ MemOperand unmapped =
+ GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
+ Operand unmapped_offset = unmapped.OffsetAsOperand();
+ __ Str(value, unmapped);
+ __ Add(x10, unmapped.base(), unmapped_offset);
+ __ Mov(x11, value);
+ __ RecordWrite(unmapped.base(), x10, x11,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+ __ Bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
+
+ __ Push(x1, x0);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Register key = x0;
+ Register receiver = x1;
+
+ __ Push(receiver, key);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
+ Register key,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label *slow) {
+ ASSERT(!AreAliased(
+ key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
+
+ Isolate* isolate = masm->isolate();
+ Label check_number_dictionary;
+ // If we can load the value, it should be returned in x0.
+ Register result = x0;
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
+
+ GenerateFastArrayLoad(
+ masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow);
+ __ IncrementCounter(
+ isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
+ __ Ret();
+
+ __ Bind(&check_number_dictionary);
+ __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
+
+ // Check whether we have a number dictionary.
+ __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
+
+ __ LoadFromNumberDictionary(
+ slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
+ __ Ret();
+}
+
+static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
+ Register key,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label *slow) {
+ ASSERT(!AreAliased(
+ key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
+
+ Isolate* isolate = masm->isolate();
+ Label probe_dictionary, property_array_property;
+ // If we can load the value, it should be returned in x0.
+ Register result = x0;
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup cache.
+ // Otherwise probe the dictionary.
+ __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
+
+ // We keep the map of the receiver in scratch1.
+ Register receiver_map = scratch1;
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the name hash.
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
+ __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+ __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
+ int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+ __ And(scratch2, scratch2, mask);
+
+ // Load the key (consisting of map and unique name) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
+
+ __ Mov(scratch3, Operand(cache_keys));
+ __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ // Load map and make scratch3 pointing to the next entry.
+ __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
+ __ Cmp(receiver_map, scratch4);
+ __ B(ne, &try_next_entry);
+ __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
+ __ Cmp(key, scratch4);
+ __ B(eq, &hit_on_nth_entry[i]);
+ __ Bind(&try_next_entry);
+ }
+
+ // Last entry.
+ __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
+ __ Cmp(receiver_map, scratch4);
+ __ B(ne, slow);
+ __ Ldr(scratch4, MemOperand(scratch3));
+ __ Cmp(key, scratch4);
+ __ B(ne, slow);
+
+ // Get field offset.
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ Bind(&hit_on_nth_entry[i]);
+ __ Mov(scratch3, Operand(cache_field_offsets));
+ if (i != 0) {
+ __ Add(scratch2, scratch2, i);
+ }
+ __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
+ __ Ldrb(scratch5,
+ FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
+ __ Subs(scratch4, scratch4, scratch5);
+ __ B(ge, &property_array_property);
+ if (i != 0) {
+ __ B(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ Bind(&load_in_object_property);
+ __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
+ __ Add(scratch5, scratch5, scratch4); // Index from start of object.
+ __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
+ __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1, scratch1, scratch2);
+ __ Ret();
+
+ // Load property array property.
+ __ Bind(&property_array_property);
+ __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1, scratch1, scratch2);
+ __ Ret();
+
+ // Do a quick inline probe of the receiver's dictionary, if it exists.
+ __ Bind(&probe_dictionary);
+ __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
+ // Load the property.
+ GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+ 1, scratch1, scratch2);
+ __ Ret();
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow, check_name, index_smi, index_name;
+
+ Register key = x0;
+ Register receiver = x1;
+
+ __ JumpIfNotSmi(key, &check_name);
+ __ Bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+ GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
+
+ // Slow case, key and receiver still in x0 and x1.
+ __ Bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3);
+ GenerateRuntimeGetProperty(masm);
+
+ __ Bind(&check_name);
+ GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow);
+
+ GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
+
+ __ Bind(&index_name);
+ __ IndexFromHash(x3, key);
+ // Now jump to the place where smi keys are handled.
+ __ B(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key (index)
+ // -- x1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ Register index = x0;
+ Register receiver = x1;
+ Register result = x0;
+ Register scratch = x3;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow;
+ Register key = x0;
+ Register receiver = x1;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
+
+ // Get the map of the receiver.
+ Register map = x2;
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset));
+ ASSERT(kSlowCaseBitFieldMask ==
+ ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
+ __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow);
+ __ Tbz(x3, Map::kHasIndexedInterceptor, &slow);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
+ masm->isolate()),
+ 2,
+ 1);
+
+ __ Bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateMiss");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateSlow");
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ // Push PropertyAttributes(NONE) and strict_mode for runtime call.
+ STATIC_ASSERT(NONE == 0);
+ __ Mov(x10, Operand(Smi::FromInt(strict_mode)));
+ __ Push(xzr, x10);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length,
+ Register value,
+ Register key,
+ Register receiver,
+ Register receiver_map,
+ Register elements_map,
+ Register elements) {
+ ASSERT(!AreAliased(
+ value, key, receiver, receiver_map, elements_map, elements, x10, x11));
+
+ Label transition_smi_elements;
+ Label transition_double_elements;
+ Label fast_double_without_map_check;
+ Label non_double_value;
+ Label finish_store;
+
+ __ Bind(fast_object);
+ if (check_map == kCheckMap) {
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ B(ne, fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because there
+ // may be a callback on the element.
+ Label holecheck_passed;
+ // TODO(all): This address calculation is repeated later (for the store
+ // itself). We should keep the result to avoid doing the work twice.
+ __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10));
+ __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+ __ bind(&holecheck_passed);
+
+ // Smi stores don't require further checks.
+ __ JumpIfSmi(value, &finish_store);
+
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
+
+ __ Bind(&finish_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Add(x10, key, Operand(Smi::FromInt(1)));
+ __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+
+ Register address = x11;
+ __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Str(value, MemOperand(address));
+
+ Label dont_record_write;
+ __ JumpIfSmi(value, &dont_record_write);
+
+ // Update write barrier for the elements array address.
+ __ Mov(x10, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ x10,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ Bind(&dont_record_write);
+ __ Ret();
+
+
+ __ Bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so go to
+ // the runtime.
+ // TODO(all): This address calculation was done earlier. We should keep the
+ // result to avoid doing the work twice.
+ __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10));
+ __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+
+ __ Bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ elements,
+ x10,
+ d0,
+ d1,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Add(x10, key, Operand(Smi::FromInt(1)));
+ __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Ret();
+
+
+ __ Bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ receiver_map,
+ x10,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&fast_double_without_map_check);
+
+ __ Bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ x10,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&finish_store);
+
+ __ Bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ x10,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&finish_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow;
+ Label array;
+ Label fast_object;
+ Label extra;
+ Label fast_object_grow;
+ Label fast_double_grow;
+ Label fast_double;
+
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register receiver_map = x3;
+ Register elements = x4;
+ Register elements_map = x5;
+
+ __ JumpIfNotSmi(key, &slow);
+ __ JumpIfSmi(receiver, &slow);
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(
+ x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
+
+ // Check if the object is a JS array or not.
+ Register instance_type = x10;
+ __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
+ __ B(eq, &array);
+ // Check that the object is some kind of JSObject.
+ __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
+ __ B(lt, &slow);
+
+ // Object case: Check key against length in the elements array.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(hi, &fast_object);
+
+
+ __ Bind(&slow);
+ // Slow case, handle jump to runtime.
+ // Live values:
+ // x0: value
+ // x1: key
+ // x2: receiver
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+
+ __ Bind(&extra);
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(ls, &slow);
+
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ B(eq, &fast_object_grow);
+ __ Cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ B(eq, &fast_double_grow);
+ __ B(&slow);
+
+
+ __ Bind(&array);
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(eq, &extra); // We can handle the case where we are appending 1 element.
+ __ B(lo, &slow);
+
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, x1, x2, x3, x4, x5, x6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ __ Push(x1, x2, x0);
+
+ // Tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+ Register value = x0;
+ Register receiver = x1;
+ Register name = x2;
+ Register dictionary = x3;
+
+ GenerateNameDictionaryReceiverCheck(
+ masm, receiver, dictionary, x4, x5, &miss);
+
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ __ Push(x1, x2, x0);
+
+ __ Mov(x11, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ Mov(x10, Operand(Smi::FromInt(strict_mode)));
+ __ Push(x11, x10);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, name and value for runtime call.
+ __ Push(x1, x2, x0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return al;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address info_address =
+ Assembler::return_address_from_call_start(address);
+
+ InstructionSequence* patch_info = InstructionSequence::At(info_address);
+ return patch_info->IsInlineData();
+}
+
+
+// Activate a SMI fast-path by patching the instructions generated by
+// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
+// JumpPatchSite::EmitPatchInfo().
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ // The patch information is encoded in the instruction stream using
+ // instructions which have no side effects, so we can safely execute them.
+ // The patch information is encoded directly after the call to the helper
+ // function which is requesting this patch operation.
+ Address info_address =
+ Assembler::return_address_from_call_start(address);
+ InlineSmiCheckInfo info(info_address);
+
+ // Check and decode the patch information instruction.
+ if (!info.HasSmiCheck()) {
+ return;
+ }
+
+ if (FLAG_trace_ic) {
+ PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
+ address, info_address, reinterpret_cast<void*>(info.SmiCheck()));
+ }
+
+ // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
+ // and JumpPatchSite::EmitJumpIfSmi().
+ // Changing
+ // tb(n)z xzr, #0, <target>
+ // to
+ // tb(!n)z test_reg, #0, <target>
+ Instruction* to_patch = info.SmiCheck();
+ PatchingAssembler patcher(to_patch, 1);
+ ASSERT(to_patch->IsTestBranch());
+ ASSERT(to_patch->ImmTestBranchBit5() == 0);
+ ASSERT(to_patch->ImmTestBranchBit40() == 0);
+
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+
+ int branch_imm = to_patch->ImmTestBranch();
+ Register smi_reg;
+ if (check == ENABLE_INLINED_SMI_CHECK) {
+ ASSERT(to_patch->Rt() == xzr.code());
+ smi_reg = info.SmiRegister();
+ } else {
+ ASSERT(check == DISABLE_INLINED_SMI_CHECK);
+ ASSERT(to_patch->Rt() != xzr.code());
+ smi_reg = xzr;
+ }
+
+ if (to_patch->Mask(TestBranchMask) == TBZ) {
+ // This is JumpIfNotSmi(smi_reg, branch_imm).
+ patcher.tbnz(smi_reg, 0, branch_imm);
+ } else {
+ ASSERT(to_patch->Mask(TestBranchMask) == TBNZ);
+ // This is JumpIfSmi(smi_reg, branch_imm).
+ patcher.tbz(smi_reg, 0, branch_imm);
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/instructions-a64.cc b/deps/v8/src/a64/instructions-a64.cc
new file mode 100644
index 0000000000..4496d56753
--- /dev/null
+++ b/deps/v8/src/a64/instructions-a64.cc
@@ -0,0 +1,334 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#define A64_DEFINE_FP_STATICS
+
+#include "a64/instructions-a64.h"
+#include "a64/assembler-a64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool Instruction::IsLoad() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) != 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x:
+ case LDRSB_w:
+ case LDRSB_x:
+ case LDRSH_w:
+ case LDRSH_x:
+ case LDRSW_x:
+ case LDR_s:
+ case LDR_d: return true;
+ default: return false;
+ }
+ }
+}
+
+
+bool Instruction::IsStore() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) == 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ switch (op) {
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x:
+ case STR_s:
+ case STR_d: return true;
+ default: return false;
+ }
+ }
+}
+
+
+static uint64_t RotateRight(uint64_t value,
+ unsigned int rotate,
+ unsigned int width) {
+ ASSERT(width <= 64);
+ rotate &= 63;
+ return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
+ (value >> rotate);
+}
+
+
+static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
+ uint64_t value,
+ unsigned width) {
+ ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+ (width == 32));
+ ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ uint64_t result = value & ((1UL << width) - 1UL);
+ for (unsigned i = width; i < reg_size; i *= 2) {
+ result |= (result << i);
+ }
+ return result;
+}
+
+
+// Logical immediates can't encode zero, so a return value of zero is used to
+// indicate a failure case. Specifically, where the constraints on imm_s are not
+// met.
+uint64_t Instruction::ImmLogical() {
+ unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t n = BitN();
+ int64_t imm_s = ImmSetBits();
+ int64_t imm_r = ImmRotate();
+
+ // An integer is constructed from the n, imm_s and imm_r bits according to
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+
+ if (n == 1) {
+ if (imm_s == 0x3F) {
+ return 0;
+ }
+ uint64_t bits = (1UL << (imm_s + 1)) - 1;
+ return RotateRight(bits, imm_r, 64);
+ } else {
+ if ((imm_s >> 1) == 0x1F) {
+ return 0;
+ }
+ for (int width = 0x20; width >= 0x2; width >>= 1) {
+ if ((imm_s & width) == 0) {
+ int mask = width - 1;
+ if ((imm_s & mask) == mask) {
+ return 0;
+ }
+ uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
+ return RepeatBitsAcrossReg(reg_size,
+ RotateRight(bits, imm_r & mask, width),
+ width);
+ }
+ }
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+float Instruction::ImmFP32() {
+ // ImmFP: abcdefgh (8 bits)
+ // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+ // where B is b ^ 1
+ uint32_t bits = ImmFP();
+ uint32_t bit7 = (bits >> 7) & 0x1;
+ uint32_t bit6 = (bits >> 6) & 0x1;
+ uint32_t bit5_to_0 = bits & 0x3f;
+ uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+ return rawbits_to_float(result);
+}
+
+
+double Instruction::ImmFP64() {
+ // ImmFP: abcdefgh (8 bits)
+ // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+ // where B is b ^ 1
+ uint32_t bits = ImmFP();
+ uint64_t bit7 = (bits >> 7) & 0x1;
+ uint64_t bit6 = (bits >> 6) & 0x1;
+ uint64_t bit5_to_0 = bits & 0x3f;
+ uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+
+ return rawbits_to_double(result);
+}
+
+
+LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
+ switch (op) {
+ case STP_x:
+ case LDP_x:
+ case STP_d:
+ case LDP_d: return LSDoubleWord;
+ default: return LSWord;
+ }
+}
+
+
+ptrdiff_t Instruction::ImmPCOffset() {
+ ptrdiff_t offset;
+ if (IsPCRelAddressing()) {
+ // PC-relative addressing. Only ADR is supported.
+ offset = ImmPCRel();
+ } else if (BranchType() != UnknownBranchType) {
+ // All PC-relative branches.
+ // Relative branch offsets are instruction-size-aligned.
+ offset = ImmBranch() << kInstructionSizeLog2;
+ } else {
+ // Load literal (offset from PC).
+ ASSERT(IsLdrLiteral());
+ // The offset is always shifted by 2 bits, even for loads to 64-bits
+ // registers.
+ offset = ImmLLiteral() << kInstructionSizeLog2;
+ }
+ return offset;
+}
+
+
+Instruction* Instruction::ImmPCOffsetTarget() {
+ return this + ImmPCOffset();
+}
+
+
+bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
+ int32_t offset) {
+ return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
+}
+
+
+bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
+ int offset = target - this;
+ return IsValidImmPCOffset(BranchType(), offset);
+}
+
+
+void Instruction::SetImmPCOffsetTarget(Instruction* target) {
+ if (IsPCRelAddressing()) {
+ SetPCRelImmTarget(target);
+ } else if (BranchType() != UnknownBranchType) {
+ SetBranchImmTarget(target);
+ } else {
+ SetImmLLiteral(target);
+ }
+}
+
+
+void Instruction::SetPCRelImmTarget(Instruction* target) {
+ // ADRP is not supported, so 'this' must point to an ADR instruction.
+ ASSERT(Mask(PCRelAddressingMask) == ADR);
+
+ Instr imm = Assembler::ImmPCRelAddress(target - this);
+
+ SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+}
+
+
+void Instruction::SetBranchImmTarget(Instruction* target) {
+ ASSERT(((target - this) & 3) == 0);
+ Instr branch_imm = 0;
+ uint32_t imm_mask = 0;
+ int offset = (target - this) >> kInstructionSizeLog2;
+ switch (BranchType()) {
+ case CondBranchType: {
+ branch_imm = Assembler::ImmCondBranch(offset);
+ imm_mask = ImmCondBranch_mask;
+ break;
+ }
+ case UncondBranchType: {
+ branch_imm = Assembler::ImmUncondBranch(offset);
+ imm_mask = ImmUncondBranch_mask;
+ break;
+ }
+ case CompareBranchType: {
+ branch_imm = Assembler::ImmCmpBranch(offset);
+ imm_mask = ImmCmpBranch_mask;
+ break;
+ }
+ case TestBranchType: {
+ branch_imm = Assembler::ImmTestBranch(offset);
+ imm_mask = ImmTestBranch_mask;
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ SetInstructionBits(Mask(~imm_mask) | branch_imm);
+}
+
+
+void Instruction::SetImmLLiteral(Instruction* source) {
+ ASSERT(((source - this) & 3) == 0);
+ int offset = (source - this) >> kLiteralEntrySizeLog2;
+ Instr imm = Assembler::ImmLLiteral(offset);
+ Instr mask = ImmLLiteral_mask;
+
+ SetInstructionBits(Mask(~mask) | imm);
+}
+
+
+// TODO(jbramley): We can't put this inline in the class because things like
+// xzr and Register are not defined in that header. Consider adding
+// instructions-a64-inl.h to work around this.
+bool InstructionSequence::IsInlineData() const {
+ // Inline data is encoded as a single movz instruction which writes to xzr
+ // (x31).
+ return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
+ // TODO(all): If we extend ::InlineData() to support bigger data, we need
+ // to update this method too.
+}
+
+
+// TODO(jbramley): We can't put this inline in the class because things like
+// xzr and Register are not defined in that header. Consider adding
+// instructions-a64-inl.h to work around this.
+uint64_t InstructionSequence::InlineData() const {
+ ASSERT(IsInlineData());
+ uint64_t payload = ImmMoveWide();
+ // TODO(all): If we extend ::InlineData() to support bigger data, we need
+ // to update this method too.
+ return payload;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/instructions-a64.h b/deps/v8/src/a64/instructions-a64.h
new file mode 100644
index 0000000000..472d4bf9fd
--- /dev/null
+++ b/deps/v8/src/a64/instructions-a64.h
@@ -0,0 +1,516 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_INSTRUCTIONS_A64_H_
+#define V8_A64_INSTRUCTIONS_A64_H_
+
+#include "globals.h"
+#include "utils.h"
+#include "a64/constants-a64.h"
+#include "a64/utils-a64.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ISA constants. --------------------------------------------------------------
+
+typedef uint32_t Instr;
+
+// The following macros initialize a float/double variable with a bit pattern
+// without using static initializers: If A64_DEFINE_FP_STATICS is defined, the
+// symbol is defined as uint32_t/uint64_t initialized with the desired bit
+// pattern. Otherwise, the same symbol is declared as an external float/double.
+#if defined(A64_DEFINE_FP_STATICS)
+#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
+#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
+#else
+#define DEFINE_FLOAT(name, value) extern const float name
+#define DEFINE_DOUBLE(name, value) extern const double name
+#endif // defined(A64_DEFINE_FP_STATICS)
+
+DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
+DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
+DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
+DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL);
+
+// This value is a signalling NaN as both a double and as a float (taking the
+// least-significant word).
+DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001);
+DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001);
+
+// A similar value, but as a quiet NaN.
+DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001);
+DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
+
+#undef DEFINE_FLOAT
+#undef DEFINE_DOUBLE
+
+
+enum LSDataSize {
+ LSByte = 0,
+ LSHalfword = 1,
+ LSWord = 2,
+ LSDoubleWord = 3
+};
+
+LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
+
+enum ImmBranchType {
+ UnknownBranchType = 0,
+ CondBranchType = 1,
+ UncondBranchType = 2,
+ CompareBranchType = 3,
+ TestBranchType = 4
+};
+
+enum AddrMode {
+ Offset,
+ PreIndex,
+ PostIndex
+};
+
+enum FPRounding {
+ // The first four values are encodable directly by FPCR<RMode>.
+ FPTieEven = 0x0,
+ FPPositiveInfinity = 0x1,
+ FPNegativeInfinity = 0x2,
+ FPZero = 0x3,
+
+ // The final rounding mode is only available when explicitly specified by the
+ // instruction (such as with fcvta). It cannot be set in FPCR.
+ FPTieAway
+};
+
+enum Reg31Mode {
+ Reg31IsStackPointer,
+ Reg31IsZeroRegister
+};
+
+// Instructions. ---------------------------------------------------------------
+
+class Instruction {
+ public:
+ Instr InstructionBits() const {
+ Instr bits;
+ memcpy(&bits, this, sizeof(bits));
+ return bits;
+ }
+
+ void SetInstructionBits(Instr new_instr) {
+ memcpy(this, &new_instr, sizeof(new_instr));
+ }
+
+ int Bit(int pos) const {
+ return (InstructionBits() >> pos) & 1;
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, InstructionBits());
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+ return signed_bitextract_32(msb, lsb, bits);
+ }
+
+ Instr Mask(uint32_t mask) const {
+ return InstructionBits() & mask;
+ }
+
+ Instruction* following(int count = 1) {
+ return this + count * kInstructionSize;
+ }
+
+ Instruction* preceding(int count = 1) {
+ return this - count * kInstructionSize;
+ }
+
+ #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ int64_t Name() const { return Func(HighBit, LowBit); }
+ INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
+ #undef DEFINE_GETTER
+
+ // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
+ // formed from ImmPCRelLo and ImmPCRelHi.
+ int ImmPCRel() const {
+ int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int const width = ImmPCRelLo_width + ImmPCRelHi_width;
+ return signed_bitextract_32(width-1, 0, offset);
+ }
+
+ uint64_t ImmLogical();
+ float ImmFP32();
+ double ImmFP64();
+
+ LSDataSize SizeLSPair() const {
+ return CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
+ }
+
+ // Helpers.
+ bool IsCondBranchImm() const {
+ return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
+ }
+
+ bool IsUncondBranchImm() const {
+ return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
+ }
+
+ bool IsCompareBranch() const {
+ return Mask(CompareBranchFMask) == CompareBranchFixed;
+ }
+
+ bool IsTestBranch() const {
+ return Mask(TestBranchFMask) == TestBranchFixed;
+ }
+
+ bool IsLdrLiteral() const {
+ return Mask(LoadLiteralFMask) == LoadLiteralFixed;
+ }
+
+ bool IsLdrLiteralX() const {
+ return Mask(LoadLiteralMask) == LDR_x_lit;
+ }
+
+ bool IsPCRelAddressing() const {
+ return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
+ }
+
+ bool IsLogicalImmediate() const {
+ return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
+ }
+
+ bool IsAddSubImmediate() const {
+ return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
+ }
+
+ bool IsAddSubExtended() const {
+ return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
+ }
+
+ // Match any loads or stores, including pairs.
+ bool IsLoadOrStore() const {
+ return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
+ }
+
+ // Match any loads, including pairs.
+ bool IsLoad() const;
+ // Match any stores, including pairs.
+ bool IsStore() const;
+
+ // Indicate whether Rd can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rd field.
+ Reg31Mode RdMode() const {
+ // The following instructions use csp or wsp as Rd:
+ // Add/sub (immediate) when not setting the flags.
+ // Add/sub (extended) when not setting the flags.
+ // Logical (immediate) when not setting the flags.
+ // Otherwise, r31 is the zero register.
+ if (IsAddSubImmediate() || IsAddSubExtended()) {
+ if (Mask(AddSubSetFlagsBit)) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ if (IsLogicalImmediate()) {
+ // Of the logical (immediate) instructions, only ANDS (and its aliases)
+ // can set the flags. The others can all write into csp.
+ // Note that some logical operations are not available to
+ // immediate-operand instructions, so we have to combine two masks here.
+ if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ // Indicate whether Rn can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rn field.
+ Reg31Mode RnMode() const {
+ // The following instructions use csp or wsp as Rn:
+ // All loads and stores.
+ // Add/sub (immediate).
+ // Add/sub (extended).
+ // Otherwise, r31 is the zero register.
+ if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
+ return Reg31IsStackPointer;
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ ImmBranchType BranchType() const {
+ if (IsCondBranchImm()) {
+ return CondBranchType;
+ } else if (IsUncondBranchImm()) {
+ return UncondBranchType;
+ } else if (IsCompareBranch()) {
+ return CompareBranchType;
+ } else if (IsTestBranch()) {
+ return TestBranchType;
+ } else {
+ return UnknownBranchType;
+ }
+ }
+
+ static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
+ switch (branch_type) {
+ case UncondBranchType:
+ return ImmUncondBranch_width;
+ case CondBranchType:
+ return ImmCondBranch_width;
+ case CompareBranchType:
+ return ImmCmpBranch_width;
+ case TestBranchType:
+ return ImmTestBranch_width;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ }
+
+ // The range of the branch instruction, expressed as 'instr +- range'.
+ static int32_t ImmBranchRange(ImmBranchType branch_type) {
+ return
+ (1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
+ kInstructionSize;
+ }
+
+ int ImmBranch() const {
+ switch (BranchType()) {
+ case CondBranchType: return ImmCondBranch();
+ case UncondBranchType: return ImmUncondBranch();
+ case CompareBranchType: return ImmCmpBranch();
+ case TestBranchType: return ImmTestBranch();
+ default: UNREACHABLE();
+ }
+ return 0;
+ }
+
+ bool IsBranchAndLinkToRegister() const {
+ return Mask(UnconditionalBranchToRegisterMask) == BLR;
+ }
+
+ bool IsMovz() const {
+ return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
+ (Mask(MoveWideImmediateMask) == MOVZ_w);
+ }
+
+ bool IsMovk() const {
+ return (Mask(MoveWideImmediateMask) == MOVK_x) ||
+ (Mask(MoveWideImmediateMask) == MOVK_w);
+ }
+
+ bool IsMovn() const {
+ return (Mask(MoveWideImmediateMask) == MOVN_x) ||
+ (Mask(MoveWideImmediateMask) == MOVN_w);
+ }
+
+ bool IsNop(int n) {
+ // A marking nop is an instruction
+ // mov r<n>, r<n>
+ // which is encoded as
+ // orr r<n>, xzr, r<n>
+ return (Mask(LogicalShiftedMask) == ORR_x) &&
+ (Rd() == Rm()) &&
+ (Rd() == n);
+ }
+
+ // Find the PC offset encoded in this instruction. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ // The offset returned is unscaled.
+ ptrdiff_t ImmPCOffset();
+
+ // Find the target of this instruction. 'this' may be a branch or a
+ // PC-relative addressing instruction.
+ Instruction* ImmPCOffsetTarget();
+
+ static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
+ bool IsTargetInImmPCOffsetRange(Instruction* target);
+ // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ void SetImmPCOffsetTarget(Instruction* target);
+ // Patch a literal load instruction to load from 'source'.
+ void SetImmLLiteral(Instruction* source);
+
+ uint8_t* LiteralAddress() {
+ int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
+ return reinterpret_cast<uint8_t*>(this) + offset;
+ }
+
+ uint32_t Literal32() {
+ uint32_t literal;
+ memcpy(&literal, LiteralAddress(), sizeof(literal));
+
+ return literal;
+ }
+
+ uint64_t Literal64() {
+ uint64_t literal;
+ memcpy(&literal, LiteralAddress(), sizeof(literal));
+
+ return literal;
+ }
+
+ float LiteralFP32() {
+ return rawbits_to_float(Literal32());
+ }
+
+ double LiteralFP64() {
+ return rawbits_to_double(Literal64());
+ }
+
+ Instruction* NextInstruction() {
+ return this + kInstructionSize;
+ }
+
+ Instruction* InstructionAtOffset(int64_t offset) {
+ ASSERT(IsAligned(reinterpret_cast<uintptr_t>(this) + offset,
+ kInstructionSize));
+ return this + offset;
+ }
+
+ template<typename T> static Instruction* Cast(T src) {
+ return reinterpret_cast<Instruction*>(src);
+ }
+
+
+ void SetPCRelImmTarget(Instruction* target);
+ void SetBranchImmTarget(Instruction* target);
+};
+
+
+// Where Instruction looks at instructions generated by the Assembler,
+// InstructionSequence looks at instructions sequences generated by the
+// MacroAssembler.
+class InstructionSequence : public Instruction {
+ public:
+ static InstructionSequence* At(Address address) {
+ return reinterpret_cast<InstructionSequence*>(address);
+ }
+
+ // Sequences generated by MacroAssembler::InlineData().
+ bool IsInlineData() const;
+ uint64_t InlineData() const;
+};
+
+
+// Simulator/Debugger debug instructions ---------------------------------------
+// Each debug marker is represented by a HLT instruction. The immediate comment
+// field in the instruction is used to identify the type of debug marker. Each
+// marker encodes arguments in a different way, as described below.
+
+// Indicate to the Debugger that the instruction is a redirected call.
+const Instr kImmExceptionIsRedirectedCall = 0xca11;
+
+// Represent unreachable code. This is used as a guard in parts of the code that
+// should not be reachable, such as in data encoded inline in the instructions.
+const Instr kImmExceptionIsUnreachable = 0xdebf;
+
+// A pseudo 'printf' instruction. The arguments will be passed to the platform
+// printf method.
+const Instr kImmExceptionIsPrintf = 0xdeb1;
+// Parameters are stored in A64 registers as if the printf pseudo-instruction
+// was a call to the real printf method:
+//
+// x0: The format string, then either of:
+// x1-x7: Optional arguments.
+// d0-d7: Optional arguments.
+//
+// Floating-point and integer arguments are passed in separate sets of
+// registers in AAPCS64 (even for varargs functions), so it is not possible to
+// determine the type of location of each arguments without some information
+// about the values that were passed in. This information could be retrieved
+// from the printf format string, but the format string is not trivial to
+// parse so we encode the relevant information with the HLT instruction.
+// - Type
+// Either kRegister or kFPRegister, but stored as a uint32_t because there's
+// no way to guarantee the size of the CPURegister::RegisterType enum.
+const unsigned kPrintfTypeOffset = 1 * kInstructionSize;
+const unsigned kPrintfLength = 2 * kInstructionSize;
+
+// A pseudo 'debug' instruction.
+const Instr kImmExceptionIsDebug = 0xdeb0;
+// Parameters are inlined in the code after a debug pseudo-instruction:
+// - Debug code.
+// - Debug parameters.
+// - Debug message string. This is a NULL-terminated ASCII string, padded to
+// kInstructionSize so that subsequent instructions are correctly aligned.
+// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
+// string data.
+const unsigned kDebugCodeOffset = 1 * kInstructionSize;
+const unsigned kDebugParamsOffset = 2 * kInstructionSize;
+const unsigned kDebugMessageOffset = 3 * kInstructionSize;
+
+// Debug parameters.
+// Used without a TRACE_ option, the Debugger will print the arguments only
+// once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
+// before every instruction for the specified LOG_ parameters.
+//
+// TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
+// others that were not specified.
+//
+// For example:
+//
+// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
+// will print the registers and fp registers only once.
+//
+// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
+// starts disassembling the code.
+//
+// __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
+// adds the general purpose registers to the trace.
+//
+// __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
+// stops tracing the registers.
+const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
+enum DebugParameters {
+ NO_PARAM = 0,
+ BREAK = 1 << 0,
+ LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
+ LOG_REGS = 1 << 2, // Log general purpose registers.
+ LOG_FP_REGS = 1 << 3, // Log floating-point registers.
+ LOG_SYS_REGS = 1 << 4, // Log the status flags.
+ LOG_WRITE = 1 << 5, // Log any memory write.
+
+ LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
+ LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
+
+ // Trace control.
+ TRACE_ENABLE = 1 << 6,
+ TRACE_DISABLE = 2 << 6,
+ TRACE_OVERRIDE = 3 << 6
+};
+
+
+} } // namespace v8::internal
+
+
+#endif // V8_A64_INSTRUCTIONS_A64_H_
diff --git a/deps/v8/src/a64/instrument-a64.cc b/deps/v8/src/a64/instrument-a64.cc
new file mode 100644
index 0000000000..93892d9360
--- /dev/null
+++ b/deps/v8/src/a64/instrument-a64.cc
@@ -0,0 +1,618 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "a64/instrument-a64.h"
+
+namespace v8 {
+namespace internal {
+
+Counter::Counter(const char* name, CounterType type)
+ : count_(0), enabled_(false), type_(type) {
+ ASSERT(name != NULL);
+ strncpy(name_, name, kCounterNameMaxLength);
+}
+
+
+void Counter::Enable() {
+ enabled_ = true;
+}
+
+
+void Counter::Disable() {
+ enabled_ = false;
+}
+
+
+bool Counter::IsEnabled() {
+ return enabled_;
+}
+
+
+void Counter::Increment() {
+ if (enabled_) {
+ count_++;
+ }
+}
+
+
+uint64_t Counter::count() {
+ uint64_t result = count_;
+ if (type_ == Gauge) {
+ // If the counter is a Gauge, reset the count after reading.
+ count_ = 0;
+ }
+ return result;
+}
+
+
+const char* Counter::name() {
+ return name_;
+}
+
+
+CounterType Counter::type() {
+ return type_;
+}
+
+
+typedef struct {
+ const char* name;
+ CounterType type;
+} CounterDescriptor;
+
+
+static const CounterDescriptor kCounterList[] = {
+ {"Instruction", Cumulative},
+
+ {"Move Immediate", Gauge},
+ {"Add/Sub DP", Gauge},
+ {"Logical DP", Gauge},
+ {"Other Int DP", Gauge},
+ {"FP DP", Gauge},
+
+ {"Conditional Select", Gauge},
+ {"Conditional Compare", Gauge},
+
+ {"Unconditional Branch", Gauge},
+ {"Compare and Branch", Gauge},
+ {"Test and Branch", Gauge},
+ {"Conditional Branch", Gauge},
+
+ {"Load Integer", Gauge},
+ {"Load FP", Gauge},
+ {"Load Pair", Gauge},
+ {"Load Literal", Gauge},
+
+ {"Store Integer", Gauge},
+ {"Store FP", Gauge},
+ {"Store Pair", Gauge},
+
+ {"PC Addressing", Gauge},
+ {"Other", Gauge},
+ {"SP Adjust", Gauge},
+};
+
+
+Instrument::Instrument(const char* datafile, uint64_t sample_period)
+ : output_stream_(stderr), sample_period_(sample_period) {
+
+ // Set up the output stream. If datafile is non-NULL, use that file. If it
+ // can't be opened, or datafile is NULL, use stderr.
+ if (datafile != NULL) {
+ output_stream_ = fopen(datafile, "w");
+ if (output_stream_ == NULL) {
+ fprintf(stderr, "Can't open output file %s. Using stderr.\n", datafile);
+ output_stream_ = stderr;
+ }
+ }
+
+ static const int num_counters =
+ sizeof(kCounterList) / sizeof(CounterDescriptor);
+
+ // Dump an instrumentation description comment at the top of the file.
+ fprintf(output_stream_, "# counters=%d\n", num_counters);
+ fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
+
+ // Construct Counter objects from counter description array.
+ for (int i = 0; i < num_counters; i++) {
+ Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type);
+ counters_.push_back(counter);
+ }
+
+ DumpCounterNames();
+}
+
+
+Instrument::~Instrument() {
+ // Dump any remaining instruction data to the output file.
+ DumpCounters();
+
+ // Free all the counter objects.
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ delete *it;
+ }
+
+ if (output_stream_ != stderr) {
+ fclose(output_stream_);
+ }
+}
+
+
+void Instrument::Update() {
+ // Increment the instruction counter, and dump all counters if a sample period
+ // has elapsed.
+ static Counter* counter = GetCounter("Instruction");
+ ASSERT(counter->type() == Cumulative);
+ counter->Increment();
+
+ if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
+ DumpCounters();
+ }
+}
+
+
+void Instrument::DumpCounters() {
+ // Iterate through the counter objects, dumping their values to the output
+ // stream.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ fprintf(output_stream_, "%" PRIu64 ",", (*it)->count());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::DumpCounterNames() {
+ // Iterate through the counter objects, dumping the counter names to the
+ // output stream.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ fprintf(output_stream_, "%s,", (*it)->name());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::HandleInstrumentationEvent(unsigned event) {
+ switch (event) {
+ case InstrumentStateEnable: Enable(); break;
+ case InstrumentStateDisable: Disable(); break;
+ default: DumpEventMarker(event);
+ }
+}
+
+
+void Instrument::DumpEventMarker(unsigned marker) {
+ // Dumpan event marker to the output stream as a specially formatted comment
+ // line.
+ static Counter* counter = GetCounter("Instruction");
+
+ fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
+ (marker >> 8) & 0xff, counter->count());
+}
+
+
+Counter* Instrument::GetCounter(const char* name) {
+ // Get a Counter object by name from the counter list.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ if (strcmp((*it)->name(), name) == 0) {
+ return *it;
+ }
+ }
+
+ // A Counter by that name does not exist: print an error message to stderr
+ // and the output file, and exit.
+ static const char* error_message =
+ "# Error: Unknown counter \"%s\". Exiting.\n";
+ fprintf(stderr, error_message, name);
+ fprintf(output_stream_, error_message, name);
+ exit(1);
+}
+
+
+void Instrument::Enable() {
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ (*it)->Enable();
+ }
+}
+
+
+void Instrument::Disable() {
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ (*it)->Disable();
+ }
+}
+
+
+void Instrument::VisitPCRelAddressing(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("PC Addressing");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubImmediate(Instruction* instr) {
+ Update();
+ static Counter* sp_counter = GetCounter("SP Adjust");
+ static Counter* add_sub_counter = GetCounter("Add/Sub DP");
+ if (((instr->Mask(AddSubOpMask) == SUB) ||
+ (instr->Mask(AddSubOpMask) == ADD)) &&
+ (instr->Rd() == 31) && (instr->Rn() == 31)) {
+ // Count adjustments to the C stack pointer caused by V8 needing two SPs.
+ sp_counter->Increment();
+ } else {
+ add_sub_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLogicalImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitMoveWideImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Move Immediate");
+
+ if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) {
+ unsigned imm = instr->ImmMoveWide();
+ HandleInstrumentationEvent(imm);
+ } else {
+ counter->Increment();
+ }
+}
+
+
+void Instrument::VisitBitfield(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitExtract(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCompareBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Compare and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitTestBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Test and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitSystem(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitException(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStorePair(Instruction* instr) {
+ static Counter* load_pair_counter = GetCounter("Load Pair");
+ static Counter* store_pair_counter = GetCounter("Store Pair");
+ if (instr->Mask(LoadStorePairLBit) != 0) {
+ load_pair_counter->Increment();
+ } else {
+ store_pair_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLoadStorePairPostIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadLiteral(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Load Literal");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStore(Instruction* instr) {
+ static Counter* load_int_counter = GetCounter("Load Integer");
+ static Counter* store_int_counter = GetCounter("Store Integer");
+ static Counter* load_fp_counter = GetCounter("Load FP");
+ static Counter* store_fp_counter = GetCounter("Store FP");
+
+ switch (instr->Mask(LoadStoreOpMask)) {
+ case STRB_w: // Fall through.
+ case STRH_w: // Fall through.
+ case STR_w: // Fall through.
+ case STR_x: store_int_counter->Increment(); break;
+ case STR_s: // Fall through.
+ case STR_d: store_fp_counter->Increment(); break;
+ case LDRB_w: // Fall through.
+ case LDRH_w: // Fall through.
+ case LDR_w: // Fall through.
+ case LDR_x: // Fall through.
+ case LDRSB_x: // Fall through.
+ case LDRSH_x: // Fall through.
+ case LDRSW_x: // Fall through.
+ case LDRSB_w: // Fall through.
+ case LDRSH_w: load_int_counter->Increment(); break;
+ case LDR_s: // Fall through.
+ case LDR_d: load_fp_counter->Increment(); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Instrument::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePostIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePreIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLogicalShifted(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubShifted(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubExtended(Instruction* instr) {
+ Update();
+ static Counter* sp_counter = GetCounter("SP Adjust");
+ static Counter* add_sub_counter = GetCounter("Add/Sub DP");
+ if (((instr->Mask(AddSubOpMask) == SUB) ||
+ (instr->Mask(AddSubOpMask) == ADD)) &&
+ (instr->Rd() == 31) && (instr->Rn() == 31)) {
+ // Count adjustments to the C stack pointer caused by V8 needing two SPs.
+ sp_counter->Increment();
+ } else {
+ add_sub_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitAddSubWithCarry(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareRegister(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalSelect(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing1Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing2Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing3Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPCompare(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalCompare(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalSelect(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing1Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing2Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing3Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPIntegerConvert(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnallocated(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnimplemented(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/a64/instrument-a64.h b/deps/v8/src/a64/instrument-a64.h
new file mode 100644
index 0000000000..08dc1b2ad1
--- /dev/null
+++ b/deps/v8/src/a64/instrument-a64.h
@@ -0,0 +1,108 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_INSTRUMENT_A64_H_
+#define V8_A64_INSTRUMENT_A64_H_
+
+#include "globals.h"
+#include "utils.h"
+#include "a64/decoder-a64.h"
+#include "a64/constants-a64.h"
+#include "a64/instrument-a64.h"
+
+namespace v8 {
+namespace internal {
+
+const int kCounterNameMaxLength = 256;
+const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
+
+
+enum InstrumentState {
+ InstrumentStateDisable = 0,
+ InstrumentStateEnable = 1
+};
+
+
+enum CounterType {
+ Gauge = 0, // Gauge counters reset themselves after reading.
+ Cumulative = 1 // Cumulative counters keep their value after reading.
+};
+
+
+class Counter {
+ public:
+ Counter(const char* name, CounterType type = Gauge);
+
+ void Increment();
+ void Enable();
+ void Disable();
+ bool IsEnabled();
+ uint64_t count();
+ const char* name();
+ CounterType type();
+
+ private:
+ char name_[kCounterNameMaxLength];
+ uint64_t count_;
+ bool enabled_;
+ CounterType type_;
+};
+
+
+class Instrument: public DecoderVisitor {
+ public:
+ explicit Instrument(const char* datafile = NULL,
+ uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
+ ~Instrument();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ void Update();
+ void Enable();
+ void Disable();
+ void DumpCounters();
+ void DumpCounterNames();
+ void DumpEventMarker(unsigned marker);
+ void HandleInstrumentationEvent(unsigned event);
+ Counter* GetCounter(const char* name);
+
+ void InstrumentLoadStore(Instruction* instr);
+ void InstrumentLoadStorePair(Instruction* instr);
+
+ std::list<Counter*> counters_;
+
+ FILE *output_stream_;
+ uint64_t sample_period_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_INSTRUMENT_A64_H_
diff --git a/deps/v8/src/a64/lithium-a64.cc b/deps/v8/src/a64/lithium-a64.cc
new file mode 100644
index 0000000000..fa351e3928
--- /dev/null
+++ b/deps/v8/src/a64/lithium-a64.cc
@@ -0,0 +1,2449 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "lithium-allocator-inl.h"
+#include "a64/lithium-a64.h"
+#include "a64/lithium-codegen-a64.h"
+#include "hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ value()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ value()->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d",
+ *hydrogen()->class_name(),
+ true_block_id(),
+ false_block_id());
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ left()->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ right()->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < InputCount(); i++) {
+ if (i > 0) stream->Add(" ");
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
+ }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_object(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ value()->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ hydrogen()->type_literal()->ToCString().get(),
+ true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if ((parallel_moves_[i] != NULL) && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ hydrogen()->access().PrintTo(stream);
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(String::cast(*name())->ToCString().get());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("%p -> %p", *original_map(), *transitioned_map());
+}
+
+
+template<int T>
+void LUnaryMathOperation<T>::PrintDataTo(StringStream* stream) {
+ value()->PrintTo(stream);
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-d";
+ case Token::SUB: return "sub-d";
+ case Token::MUL: return "mul-d";
+ case Token::DIV: return "div-d";
+ case Token::MOD: return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-t";
+ case Token::SUB: return "sub-t";
+ case Token::MUL: return "mul-t";
+ case Token::MOD: return "mod-t";
+ case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
+ case Token::SHL: return "shl-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void LChunkBuilder::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ operand->set_virtual_register(value->id());
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAndClobber(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return value->IsConstant() ? UseConstant(value) : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return value->IsConstant() ? UseConstant(value) : UseRegisterAtStart(value);
+}
+
+
+LConstantOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? UseConstant(value)
+ : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result) {
+ result->set_virtual_register(current_instruction_->id());
+ instr->set_result(result);
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateResultInstruction<1>* instr, int index) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(
+ LTemplateResultInstruction<1>* instr, Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ if (hinstr->HasObservableSideEffects()) {
+ ASSERT(hinstr->next()->IsSimulate());
+ HSimulate* sim = HSimulate::cast(hinstr->next());
+ ASSERT(instruction_pending_deoptimization_environment_ == NULL);
+ ASSERT(pending_deoptimization_ast_id_.IsNone());
+ instruction_pending_deoptimization_environment_ = instr;
+ pending_deoptimization_ast_id_ = sim->ast_id();
+ }
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment =
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+ return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+int LPlatformChunk::GetNextSpillIndex() {
+ return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex();
+ if (kind == DOUBLE_REGISTERS) {
+ return LDoubleStackSlot::Create(index, zone());
+ } else {
+ ASSERT(kind == GENERAL_REGISTERS);
+ return LStackSlot::Create(index, zone());
+ }
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ ASSERT(operand->HasFixedPolicy());
+ return operand;
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+ ASSERT(is_unused());
+ chunk_ = new(zone()) LPlatformChunk(info_, graph_);
+ LPhase phase("L_Building chunk", chunk_);
+ status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ // TODO(all): GetNextSpillIndex just increments a field. It has no other
+ // side effects, so we should get rid of this loop.
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex();
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ DoBasicBlock(blocks->at(i));
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
+ ASSERT(is_building());
+ current_block_ = block;
+
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ ASSERT(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ ASSERT(last_environment != NULL);
+
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ ASSERT(pred->end()->FirstSuccessor() == block);
+ } else {
+ if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) ||
+ (pred->end()->SecondSuccessor()->block_id() > block->block_id())) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ ASSERT(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+
+ // Translate hydrogen instructions to lithium ones for the current block.
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while ((current != NULL) && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
+
+ if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, the register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ ASSERT(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+ }
+ current_instruction_ = old_current;
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator,
+ &objects_to_materialize));
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = UseFixedDouble(instr->right(), d1);
+ LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr) {
+ ASSERT((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
+ (op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) ||
+ (op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) ||
+ (op == Token::BIT_OR) || (op == Token::BIT_AND) ||
+ (op == Token::BIT_XOR));
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+
+ // TODO(jbramley): Once we've implemented smi support for all arithmetic
+ // operations, these assertions should check IsTagged().
+ ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(left->representation().IsSmiOrTagged());
+ ASSERT(right->representation().IsSmiOrTagged());
+
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left_operand = UseFixed(left, x1);
+ LOperand* right_operand = UseFixed(right, x0);
+ LArithmeticT* result =
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ // TODO(all): Try to improve this, like ARM r17925.
+ info()->MarkAsRequiresFrame();
+ LOperand* args = NULL;
+ LOperand* length = NULL;
+ LOperand* index = NULL;
+ LOperand* temp = NULL;
+
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+ args = UseRegisterAtStart(instr->arguments());
+ length = UseConstant(instr->length());
+ index = UseConstant(instr->index());
+ } else {
+ args = UseRegister(instr->arguments());
+ length = UseRegisterAtStart(instr->length());
+ index = UseRegisterOrConstantAtStart(instr->index());
+ temp = TempRegister();
+ }
+
+ return DefineAsRegister(
+ new(zone()) LAccessArgumentsAt(args, length, index, temp));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right =
+ UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ LInstruction* result = instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LAddS(left, right)) :
+ DefineAsRegister(new(zone()) LAddI(left, right));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return DefineAsRegister(new(zone()) LAddE(left, right));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::ADD, instr);
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = UseRegisterOrConstant(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), x1);
+ LOperand* receiver = UseFixed(instr->receiver(), x0);
+ LOperand* length = UseFixed(instr->length(), x2);
+ LOperand* elements = UseFixed(instr->elements(), x3);
+ LApplyArguments* result = new(zone()) LApplyArguments(function,
+ receiver,
+ length,
+ elements);
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* temp = instr->from_inlined() ? NULL : TempRegister();
+ return DefineAsRegister(new(zone()) LArgumentsElements(temp));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LArgumentsLength(value));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right =
+ UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ return instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LBitS(left, right)) :
+ DefineAsRegister(new(zone()) LBitI(left, right));
+ } else {
+ return DoArithmeticT(instr->op(), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ // V8 expects a label to be generated for each basic block.
+ // This is used in some places like LAllocator::IsBlockBoundary
+ // in lithium-allocator.cc
+ return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ LOperand* value = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = UseRegister(instr->length());
+ return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ HValue* value = instr->value();
+ Representation r = value->representation();
+ HType type = value->type();
+
+ if (r.IsInteger32() || r.IsSmi() || r.IsDouble()) {
+ // These representations have simple checks that cannot deoptimize.
+ return new(zone()) LBranch(UseRegister(value), NULL, NULL);
+ } else {
+ ASSERT(r.IsTagged());
+ if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() ||
+ type.IsHeapNumber()) {
+ // These types have simple checks that cannot deoptimize.
+ return new(zone()) LBranch(UseRegister(value), NULL, NULL);
+ }
+
+ if (type.IsString()) {
+ // This type cannot deoptimize, but needs a scratch register.
+ return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
+ }
+
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ bool needs_temps = expected.NeedsMap() || expected.IsEmpty();
+ LOperand* temp1 = needs_temps ? TempRegister() : NULL;
+ LOperand* temp2 = needs_temps ? TempRegister() : NULL;
+
+ if (expected.IsGeneric() || expected.IsEmpty()) {
+ // The generic case cannot deoptimize because it already supports every
+ // possible input type.
+ ASSERT(needs_temps);
+ return new(zone()) LBranch(UseRegister(value), temp1, temp2);
+ } else {
+ return AssignEnvironment(
+ new(zone()) LBranch(UseRegister(value), temp1, temp2));
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), x1);
+
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
+ ops,
+ zone());
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseFixed(instr->function(), x1);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The call to CallConstructStub will expect the constructor to be in x1.
+ LOperand* constructor = UseFixed(instr->constructor(), x1);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The call to ArrayConstructCode will expect the constructor to be in x1.
+ LOperand* constructor = UseFixed(instr->constructor(), x1);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->value()->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+ } else {
+ ASSERT(to.IsInteger32());
+ LInstruction* res = NULL;
+
+ if (instr->value()->type().IsSmi() ||
+ instr->value()->representation().IsSmi()) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
+ } else {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 =
+ instr->CanTruncateToInt32() ? TempRegister() : FixedTemp(d24);
+ res = DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2));
+ res = AssignEnvironment(res);
+ }
+
+ return res;
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ } else {
+ ASSERT(to.IsSmi() || to.IsInteger32());
+ LOperand* value = UseRegister(instr->value());
+
+ if (instr->CanTruncateToInt32()) {
+ LTruncateDoubleToIntOrSmi* result =
+ new(zone()) LTruncateDoubleToIntOrSmi(value);
+ return DefineAsRegister(result);
+ } else {
+ LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ }
+ } else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
+ if (to.IsTagged()) {
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegister(instr->value());
+ LNumberTagU* result = new(zone()) LNumberTagU(value,
+ TempRegister(),
+ TempRegister());
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
+ (kMaxInt == Smi::kMaxValue));
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LSmiTag(value));
+ }
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ LUint32ToSmi* result = new(zone()) LUint32ToSmi(value);
+ return AssignEnvironment(DefineAsRegister(result));
+ } else {
+ // This cannot deoptimize because an A64 smi can represent any int32.
+ return DefineAsRegister(new(zone()) LInteger32ToSmi(value));
+ }
+ } else {
+ ASSERT(to.IsDouble());
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegisterAtStart(instr->value())));
+ } else {
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(UseRegisterAtStart(instr->value())));
+ }
+ }
+ }
+
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ // We only need a temp register if the target is in new space, but we can't
+ // dereference the handle to test that here.
+ // TODO(all): Check these constraints. The temp register is not always used.
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ return AssignEnvironment(new(zone()) LCheckValue(value, temp));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LInstruction* result = new(zone()) LCheckInstanceType(value, temp);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+ if (instr->CanOmitMapChecks()) {
+ // LCheckMaps does nothing in this case.
+ return new(zone()) LCheckMaps(NULL);
+ } else {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+
+ if (instr->has_migration_target()) {
+ info()->MarkAsDeferredCalling();
+ LInstruction* result = new(zone()) LCheckMaps(value, temp);
+ return AssignPointerMap(AssignEnvironment(result));
+ } else {
+ return AssignEnvironment(new(zone()) LCheckMaps(value, temp));
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
+ } else if (input_rep.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+ } else {
+ ASSERT(input_rep.IsSmiOrTagged());
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LClampTToUint8(reg,
+ TempRegister(),
+ FixedTemp(d24))));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LClassOfTestAndBranch(value,
+ TempRegister(),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+
+ // TODO(all): This instruction has been replaced by HCompareNumericAndBranch
+ // on bleeding_edge. We should update when we'll do the rebase.
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ // TODO(all): In fact the only case that we can handle more efficiently is
+ // when one of the operand is the constant 0. Currently the MacroAssembler
+ // will be able to cope with any constant by loading it into an internal
+ // scratch register. This means that if the constant is used more that once,
+ // it will be loaded multiple times. Unfortunatly crankshaft already
+ // duplicates constant loads, but we should modify the code below once this
+ // issue has been addressed in crankshaft.
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->representation().IsTagged()) {
+ return new(zone()) LCmpHoleAndBranchT(value);
+ } else {
+ LOperand* temp = TempRegister();
+ return new(zone()) LCmpHoleAndBranchD(value, temp);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LConstantI);
+ } else if (r.IsDouble()) {
+ return DefineAsRegister(new(zone()) LConstantD);
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new(zone()) LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
+ }
+
+ return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* object = UseFixed(instr->value(), x0);
+ LDateField* result = new(zone()) LDateField(object, instr->index());
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsInteger32()) {
+ // TODO(all): Update this case to support smi inputs.
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ if (instr->RightIsPowerOf2()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegister(instr->left());
+ LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
+ return AssignEnvironment(DefineAsRegister(div));
+ }
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+ ? NULL : TempRegister();
+ LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineAsRegister(div));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
+ instr->function(),
+ undefined,
+ instr->inlining_kind());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if ((instr->arguments_var() != NULL) &&
+ instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
+ }
+ inner->set_entry(instr);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(
+ HForceRepresentation* instr) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LInstanceOf* result = new(zone()) LInstanceOf(
+ context,
+ UseFixed(instr->left(), InstanceofStub::left()),
+ UseFixed(instr->right(), InstanceofStub::right()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ LInstanceOfKnownGlobal* result = new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), InstanceofStub::left()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The function is required (by MacroAssembler::InvokeFunction) to be in x1.
+ LOperand* function = UseFixed(instr->function(), x1);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new(zone()) LIsConstructCallAndBranch(TempRegister(), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ return new(zone()) LIsObjectAndBranch(value, temp1, temp2);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsStringAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+ HEnvironment* env = current_block_->last_environment();
+
+ if (env->entry()->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LDrop(argument_count);
+ ASSERT(instr->argument_delta() == -argument_count);
+ }
+
+ HEnvironment* outer =
+ current_block_->last_environment()->DiscardInlined(false);
+ current_block_->UpdateEnvironment(outer);
+
+ return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ LOperand* function = UseRegister(instr->function());
+ LOperand* temp = TempRegister();
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LLoadFunctionPrototype(function, temp)));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new(zone()) LLoadGlobalCell();
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* global_object = UseFixed(instr->global_object(), x0);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* elements = UseRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ if (!instr->is_typed_elements()) {
+ if (instr->representation().IsDouble()) {
+ LOperand* temp = (!instr->key()->IsConstant() ||
+ instr->RequiresHoleCheck())
+ ? TempRegister()
+ : NULL;
+
+ LLoadKeyedFixedDouble* result =
+ new(zone()) LLoadKeyedFixedDouble(elements, key, temp);
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+ } else {
+ ASSERT(instr->representation().IsSmiOrTagged() ||
+ instr->representation().IsInteger32());
+ LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ LLoadKeyedFixed* result =
+ new(zone()) LLoadKeyedFixed(elements, key, temp);
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+ }
+ } else {
+ ASSERT((instr->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+
+ LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ LLoadKeyedExternal* result =
+ new(zone()) LLoadKeyedExternal(elements, key, temp);
+ // An unsigned int array load might overflow and cause a deopt. Make sure it
+ // has an environment.
+ if (instr->RequiresHoleCheck() ||
+ elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) {
+ return AssignEnvironment(DefineAsRegister(result));
+ } else {
+ return DefineAsRegister(result);
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x1);
+ LOperand* key = UseFixed(instr->key(), x0);
+
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), x0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(object));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x0);
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object), x0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ HValue* right = instr->right();
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(right);
+ LOperand* remainder = TempRegister();
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* hmod) {
+ HValue* hleft = hmod->left();
+ HValue* hright = hmod->right();
+
+ // TODO(jbramley): Add smi support.
+ if (hmod->representation().IsInteger32()) {
+ ASSERT(hleft->representation().IsInteger32());
+ ASSERT(hleft->representation().IsInteger32());
+ LOperand* left_op;
+ LOperand* right_op;
+
+ if (hmod->RightIsPowerOf2()) {
+ left_op = UseRegisterAtStart(hleft);
+ right_op = UseConstant(hright);
+ } else {
+ right_op = UseRegister(hright);
+ left_op = UseRegister(hleft);
+ }
+
+ LModI* lmod = new(zone()) LModI(left_op, right_op);
+
+ if (hmod->right()->CanBeZero() ||
+ (hmod->CheckFlag(HValue::kBailoutOnMinusZero) &&
+ hmod->left()->CanBeNegative() && hmod->CanBeZero())) {
+ AssignEnvironment(lmod);
+ }
+ return DefineAsRegister(lmod);
+
+ } else if (hmod->representation().IsSmiOrTagged()) {
+ return DoArithmeticT(Token::MOD, hmod);
+ } else {
+ return DoArithmeticD(Token::MOD, hmod);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool needs_environment = can_overflow || bailout_on_minus_zero;
+
+ HValue* least_const = instr->BetterLeftOperand();
+ HValue* most_const = instr->BetterRightOperand();
+
+ LOperand* left = UseRegisterAtStart(least_const);
+
+ // LMulConstI can handle a subset of constants:
+ // With support for overflow detection:
+ // -1, 0, 1, 2
+ // Without support for overflow detection:
+ // 2^n, -(2^n)
+ // 2^n + 1, -(2^n - 1)
+ if (most_const->IsConstant()) {
+ int32_t constant = HConstant::cast(most_const)->Integer32Value();
+ int32_t constant_abs = (constant >= 0) ? constant : -constant;
+
+ if (((constant >= -1) && (constant <= 2)) ||
+ (!can_overflow && (IsPowerOf2(constant_abs) ||
+ IsPowerOf2(constant_abs + 1) ||
+ IsPowerOf2(constant_abs - 1)))) {
+ LConstantOperand* right = UseConstant(most_const);
+ LMulConstIS* mul = new(zone()) LMulConstIS(left, right);
+ if (needs_environment) AssignEnvironment(mul);
+ return DefineAsRegister(mul);
+ }
+ }
+
+ // LMulI/S can handle all cases, but it requires that a register is
+ // allocated for the second operand.
+ LInstruction* result;
+ if (instr->representation().IsSmi()) {
+ // TODO(jbramley/rmcilroy): Fix LMulS so we can UseRegisterAtStart here.
+ LOperand* right = UseRegister(most_const);
+ result = DefineAsRegister(new(zone()) LMulS(left, right));
+ } else {
+ LOperand* right = UseRegisterAtStart(most_const);
+ result = DefineAsRegister(new(zone()) LMulI(left, right));
+ }
+ if (needs_environment) AssignEnvironment(result);
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk_->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ int index = static_cast<int>(instr->index());
+ Register reg = descriptor->GetParameterRegister(index);
+ return DefineFixed(result, reg);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = exponent_type.IsInteger32()
+ ? UseFixed(instr->right(), x12)
+ : exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), d1)
+ : UseFixed(instr->right(), x11);
+ LPower* result = new(zone()) LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, d0),
+ instr,
+ CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+ LOperand* argument = UseRegister(instr->argument());
+ return new(zone()) LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(UseFixed(instr->value(), x0), context,
+ parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ // TODO(all): Use UseRegisterAtStart and UseRegisterOrConstantAtStart here.
+ // We cannot do it now because the debug code in the implementation changes
+ // temp.
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LOperand* temp = TempRegister();
+ LSeqStringGetChar* result =
+ new(zone()) LSeqStringGetChar(string, index, temp);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegister(instr->index())
+ : UseRegisterOrConstant(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ LOperand* temp = TempRegister();
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(context, string, index, value, temp);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ return DoArithmeticT(op, instr);
+ }
+
+ ASSERT(instr->representation().IsInteger32() ||
+ instr->representation().IsSmi());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ LOperand* left = instr->representation().IsSmi()
+ ? UseRegister(instr->left())
+ : UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ LOperand* temp = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ right = UseConstant(right_value);
+ HConstant* constant = HConstant::cast(right_value);
+ constant_value = constant->Integer32Value() & 0x1f;
+ } else {
+ right = UseRegisterAtStart(right_value);
+ if (op == Token::ROR) {
+ temp = TempRegister();
+ }
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift by 0 and the
+ // result cannot be truncated to int32.
+ bool does_deopt = false;
+ if ((op == Token::SHR) && (constant_value == 0)) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ LInstruction* result;
+ if (instr->representation().IsInteger32()) {
+ result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ } else {
+ ASSERT(instr->representation().IsSmi());
+ result = DefineAsRegister(
+ new(zone()) LShiftS(op, left, right, temp, does_deopt));
+ }
+
+ return does_deopt ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // If there is an instruction pending deoptimization environment create a
+ // lazy bailout instruction to capture the environment.
+ if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+ LInstruction* result = new(zone()) LLazyBailout;
+ result = AssignEnvironment(result);
+ // Store the lazy deopt environment with the instruction if needed. Right
+ // now it is only used for LInstanceOfKnownGlobal.
+ instruction_pending_deoptimization_environment_->
+ SetDeferredLazyDeoptimizationEnvironment(result->environment());
+ instruction_pending_deoptimization_environment_ = NULL;
+ pending_deoptimization_ast_id_ = BailoutId::None();
+ return result;
+ }
+
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(HStoreCodeEntry* instr) {
+ LOperand* function = UseRegister(instr->function());
+ LOperand* code_object = UseRegisterAtStart(instr->code_object());
+ LOperand* temp = TempRegister();
+ return new(zone()) LStoreCodeEntry(function, code_object, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* temp = TempRegister();
+ LOperand* context;
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ // TODO(all): Replace these constraints when RecordWriteStub has been
+ // rewritten.
+ context = UseRegisterAndClobber(instr->context());
+ value = UseRegisterAndClobber(instr->value());
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ }
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->RequiresHoleCheck()) {
+ return AssignEnvironment(new(zone()) LStoreGlobalCell(value,
+ TempRegister(),
+ TempRegister()));
+ } else {
+ return new(zone()) LStoreGlobalCell(value, TempRegister(), NULL);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ LOperand* temp = NULL;
+ LOperand* elements = NULL;
+ LOperand* val = NULL;
+ LOperand* key = NULL;
+
+ if (!instr->is_typed_elements() &&
+ instr->value()->representation().IsTagged() &&
+ instr->NeedsWriteBarrier()) {
+ // RecordWrite() will clobber all registers.
+ elements = UseRegisterAndClobber(instr->elements());
+ val = UseRegisterAndClobber(instr->value());
+ key = UseRegisterAndClobber(instr->key());
+ } else {
+ elements = UseRegister(instr->elements());
+ val = UseRegister(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+
+ if (instr->is_typed_elements()) {
+ ASSERT((instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
+ temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
+
+ } else if (instr->value()->representation().IsDouble()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+
+ // The constraint used here is UseRegister, even though the StoreKeyed
+ // instruction may canonicalize the value in the register if it is a NaN.
+ temp = TempRegister();
+ return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp);
+
+ } else {
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsSmiOrTagged() ||
+ instr->value()->representation().IsInteger32());
+
+ temp = TempRegister();
+ return new(zone()) LStoreKeyedFixed(elements, key, val, temp);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x2);
+ LOperand* key = UseFixed(instr->key(), x1);
+ LOperand* value = UseFixed(instr->value(), x0);
+
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
+
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, object, key, value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ // TODO(jbramley): Optimize register usage in this instruction. For now, it
+ // allocates everything that it might need because it keeps changing in the
+ // merge and keeping it valid is time-consuming.
+
+ // TODO(jbramley): It might be beneficial to allow value to be a constant in
+ // some cases. x64 makes use of this with FLAG_track_fields, for example.
+
+ LOperand* object = UseRegister(instr->object());
+ LOperand* value = UseRegisterAndClobber(instr->value());
+ LOperand* temp0 = TempRegister();
+ LOperand* temp1 = TempRegister();
+
+ LStoreNamedField* result =
+ new(zone()) LStoreNamedField(object, value, temp0, temp1);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject() &&
+ !instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x1);
+ LOperand* value = UseFixed(instr->value(), x0);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+
+ LStringAdd* result = new(zone()) LStringAdd(context, left, right);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseRegisterAndClobber(instr->string());
+ LOperand* index = UseRegisterAndClobber(instr->index());
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ // TODO(all) use at start and remove assert in codegen
+ LOperand* char_code = UseRegister(instr->value());
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+ LStringCompareAndBranch* result =
+ new(zone()) LStringCompareAndBranch(context, left, right);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand *left;
+ if (instr->left()->IsConstant() &&
+ (HConstant::cast(instr->left())->Integer32Value() == 0)) {
+ left = UseConstant(instr->left());
+ } else {
+ left = UseRegisterAtStart(instr->left());
+ }
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ LInstruction* result = instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LSubS(left, right)) :
+ DefineAsRegister(new(zone()) LSubI(left, right));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ if (instr->HasNoUses()) {
+ return NULL;
+ } else {
+ return DefineAsRegister(new(zone()) LThisFunction);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), x0);
+ LToFastProperties* result = new(zone()) LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, NULL,
+ TempRegister(), TempRegister());
+ return result;
+ } else {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, context, TempRegister());
+ return AssignPointerMap(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp1, temp2);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // TODO(jbramley): In ARM, this uses UseFixed to force the input to x0.
+ // However, LCodeGen::DoTypeof just pushes it to the stack (for CallRuntime)
+ // anyway, so the input doesn't have to be in x0. We might be able to improve
+ // the ARM back-end a little by relaxing this restriction.
+ LTypeof* result =
+ new(zone()) LTypeof(context, UseRegisterAtStart(instr->value()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ // We only need temp registers in some cases, but we can't dereference the
+ // instr->type_literal() handle to test that here.
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ return new(zone()) LTypeofIsAndBranch(
+ UseRegister(instr->value()), temp1, temp2);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathAbs: {
+ Representation r = instr->representation();
+ if (r.IsTagged()) {
+ // The tagged case might need to allocate a HeapNumber for the result,
+ // so it is handled by a separate LInstruction.
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = TempRegister();
+ LMathAbsTagged* result =
+ new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathAbs* result = new(zone()) LMathAbs(input);
+ if (r.IsDouble()) {
+ // The Double case can never fail so it doesn't need an environment.
+ return DefineAsRegister(result);
+ } else {
+ ASSERT(r.IsInteger32() || r.IsSmi());
+ // The Integer32 and Smi cases need an environment because they can
+ // deoptimize on minimum representable number.
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ }
+ }
+ case kMathExp: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ // TODO(all): Implement TempFPRegister.
+ LOperand* double_temp1 = FixedTemp(d24); // This was chosen arbitrarily.
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(input, double_temp1,
+ temp1, temp2, temp3);
+ return DefineAsRegister(result);
+ }
+ case kMathFloor: {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->value()->representation().IsDouble());
+ // TODO(jbramley): A64 can easily handle a double argument with frintm,
+ // but we're never asked for it here. At the moment, we fall back to the
+ // runtime if the result doesn't fit, like the other architectures.
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ }
+ case kMathLog: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ LMathLog* result = new(zone()) LMathLog(input);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ }
+ case kMathPowHalf: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ return DefineAsRegister(new(zone()) LMathPowHalf(input));
+ }
+ case kMathRound: {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->value()->representation().IsDouble());
+ // TODO(jbramley): As with kMathFloor, we can probably handle double
+ // results fairly easily, but we are never asked for them.
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = FixedTemp(d24); // Choosen arbitrarily.
+ LMathRound* result = new(zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ case kMathSqrt: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMathSqrt(input));
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk_->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
+ }
+ return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // Assign object to a fixed register different from those already used in
+ // LForInPrepareMap.
+ LOperand* object = UseFixed(instr->enumerable(), x0);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegister(instr->map());
+ LOperand* temp = TempRegister();
+ return AssignEnvironment(new(zone()) LCheckMapValue(value, map, temp));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ LOperand* index = UseRegister(instr->index());
+ return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegister(instr->receiver());
+ LOperand* function = UseRegister(instr->function());
+ LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/a64/lithium-a64.h b/deps/v8/src/a64/lithium-a64.h
new file mode 100644
index 0000000000..33d11e6c5d
--- /dev/null
+++ b/deps/v8/src/a64/lithium-a64.h
@@ -0,0 +1,2967 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_LITHIUM_A64_H_
+#define V8_A64_LITHIUM_A64_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "lithium.h"
+#include "safepoint-table.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddE) \
+ V(AddI) \
+ V(AddS) \
+ V(Allocate) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BitS) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallFunction) \
+ V(CallJSFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CallWithDescriptor) \
+ V(CheckInstanceType) \
+ V(CheckMapValue) \
+ V(CheckMaps) \
+ V(CheckNonSmi) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CmpHoleAndBranchD) \
+ V(CmpHoleAndBranchT) \
+ V(CmpMapAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpT) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivI) \
+ V(DoubleToIntOrSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(Integer32ToSmi) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyedExternal) \
+ V(LoadKeyedFixed) \
+ V(LoadKeyedFixedDouble) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(LoadRoot) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathAbsTagged) \
+ V(MathExp) \
+ V(MathFloor) \
+ V(MathFloorOfDiv) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSqrt) \
+ V(ModI) \
+ V(MulConstIS) \
+ V(MulI) \
+ V(MulS) \
+ V(NumberTagD) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(ShiftS) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreGlobalCell) \
+ V(StoreKeyedExternal) \
+ V(StoreKeyedFixed) \
+ V(StoreKeyedFixedDouble) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(SubS) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(TruncateDoubleToIntOrSmi) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
+ V(UnknownOSRValue) \
+ V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { \
+ return H##type::cast(this->hydrogen_value()); \
+ }
+
+
+class LInstruction : public ZoneObject {
+ public:
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) { }
+
+ virtual ~LInstruction() { }
+
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
+
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ bool IsMarkedAsCall() const { return IsCall(); }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() const = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ private:
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ int32_t bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return (R != 0) && (result() != NULL);
+ }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() const { return results_[0]; }
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+};
+
+
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+ DECLARE_HYDROGEN_ACCESSOR(ControlInstruction);
+
+ Label* false_label_;
+ Label* true_label_;
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block)
+ : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
+
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+
+class LInstructionGap V8_FINAL : public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return !IsRedundant();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LDrop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+ int count_;
+};
+
+
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
+
+ int block_id() const { return block_->block_id(); }
+
+ private:
+ HBasicBlock* block_;
+};
+
+
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LLazyBailout() : gap_instructions_size_(0) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+ void set_gap_instructions_size(int gap_instructions_size) {
+ gap_instructions_size_ = gap_instructions_size;
+ }
+ int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+ int gap_instructions_size_;
+};
+
+
+class LLabel V8_FINAL : public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block)
+ : LGap(block), replacement_(NULL) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LOsrEntry() {}
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 1> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments,
+ LOperand* length,
+ LOperand* index,
+ LOperand* temp) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* temp() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LAddE V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddE(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAddS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
+ public:
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function,
+ LOperand* receiver,
+ LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+ public:
+ explicit LArgumentsElements(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+ DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
+ }
+
+ LOperand* elements() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+ Token::Value op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ explicit LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
+
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LBitS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitS, "bit-s")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ explicit LBranch(LOperand* value, LOperand *temp1, LOperand *temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+ DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+ DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LCheckInstanceType(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LCheckMaps(LOperand* value, LOperand* temp = NULL) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LCheckValue(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampDToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LClampTToUint8(LOperand* unclamped, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+ "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCmpHoleAndBranchD V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ explicit LCmpHoleAndBranchD(LOperand* object, LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchD, "cmp-hole-and-branch-d")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCmpHoleAndBranchT V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranchT(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchT, "cmp-hole-and-branch-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->representation().IsDouble();
+ }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
+};
+
+
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDateField(LOperand* date, Smi* index) : index_(index) {
+ inputs_[0] = date;
+ }
+
+ LOperand* date() { return inputs_[0]; }
+ Smi* index() const { return index_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ private:
+ Smi* index_;
+};
+
+
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LDivI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LDoubleToIntOrSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToIntOrSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToIntOrSmi, "double-to-int-or-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool tag_result() { return hydrogen()->representation().IsSmi(); }
+};
+
+
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) {
+ inputs_[0] = map;
+ }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() {
+ return HForInCacheArray::cast(this->hydrogen_value())->idx();
+ }
+};
+
+
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 1> {
+ public:
+ LHasCachedArrayIndexAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+ inputs_[0] = base_object;
+ inputs_[1] = offset;
+ }
+
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+ LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+ return lazy_deopt_env_;
+ }
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
+ lazy_deopt_env_ = env;
+ }
+
+ private:
+ LEnvironment* lazy_deopt_env_;
+};
+
+
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+ public:
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
+ }
+
+ LOperand* target() const { return inputs_[0]; }
+
+ const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<LOperand*> inputs_;
+
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+};
+
+
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 2> {
+ public:
+ LIsConstructCallAndBranch(LOperand* temp1, LOperand* temp2) {
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LIsObjectAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int slot_index() const { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+};
+
+
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+ inputs_[0] = function;
+ temps_[0] = temp;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+template<int T>
+class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
+ public:
+ LLoadKeyed(LOperand* elements, LOperand* key) {
+ this->inputs_[0] = elements;
+ this->inputs_[1] = key;
+ }
+
+ LOperand* elements() { return this->inputs_[0]; }
+ LOperand* key() { return this->inputs_[1]; }
+ ElementsKind elements_kind() const {
+ return this->hydrogen()->elements_kind();
+ }
+ bool is_external() const {
+ return this->hydrogen()->is_external();
+ }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ uint32_t additional_index() const {
+ return this->hydrogen()->index_offset();
+ }
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ this->elements()->PrintTo(stream);
+ stream->Add("[");
+ this->key()->PrintTo(stream);
+ if (this->hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", this->additional_index());
+ } else {
+ stream->Add("]");
+ }
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+};
+
+
+class LLoadKeyedExternal: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedExternal(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedExternal, "load-keyed-external");
+};
+
+
+class LLoadKeyedFixed: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixed, "load-keyed-fixed");
+};
+
+
+class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixedDouble, "load-keyed-fixed-double");
+};
+
+
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+};
+
+
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+template<int T>
+class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> {
+ public:
+ explicit LUnaryMathOperation(LOperand* value) {
+ this->inputs_[0] = value;
+ }
+
+ LOperand* value() { return this->inputs_[0]; }
+ BuiltinFunctionId op() const { return this->hydrogen()->op(); }
+
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathAbs V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathAbs(LOperand* value) : LUnaryMathOperation<0>(value) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+};
+
+
+class LMathAbsTagged: public LTemplateInstruction<1, 2, 3> {
+ public:
+ LMathAbsTagged(LOperand* context, LOperand* value,
+ LOperand* temp1, LOperand* temp2, LOperand* temp3) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbsTagged, "math-abs-tagged")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathExp V8_FINAL : public LUnaryMathOperation<4> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* double_temp1,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3)
+ : LUnaryMathOperation<4>(value) {
+ temps_[0] = double_temp1;
+ temps_[1] = temp1;
+ temps_[2] = temp2;
+ temps_[3] = temp3;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* double_temp1() { return temps_[0]; }
+ LOperand* temp1() { return temps_[1]; }
+ LOperand* temp2() { return temps_[2]; }
+ LOperand* temp3() { return temps_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathFloor V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathFloor(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+};
+
+
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathFloorOfDiv(LOperand* left,
+ LOperand* right,
+ LOperand* temp = NULL) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMathLog V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathLog(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LMathPowHalf V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathPowHalf(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LMathRound V8_FINAL : public LUnaryMathOperation<1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp1)
+ : LUnaryMathOperation<1>(value) {
+ temps_[0] = temp1;
+ }
+
+ LOperand* temp1() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+};
+
+
+class LMathSqrt V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LModI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LMulConstIS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulConstIS(LOperand* left, LConstantOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LConstantOperand* right() { return LConstantOperand::cast(inputs_[1]); }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulConstIS, "mul-const-i-s")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-s")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LNumberTagU(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LNumberUntagD(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+ DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+ inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* parameter_count() { return inputs_[2]; }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LSeqStringGetChar(LOperand* string,
+ LOperand* index,
+ LOperand* temp) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ temps_[0] = temp;
+ }
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 1> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value,
+ LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ bool needs_check() const { return needs_check_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+ bool needs_check_;
+};
+
+
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+template<int T>
+class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
+ public:
+ LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value) {
+ this->inputs_[0] = elements;
+ this->inputs_[1] = key;
+ this->inputs_[2] = value;
+ }
+
+ bool is_external() const { return this->hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ LOperand* elements() { return this->inputs_[0]; }
+ LOperand* key() { return this->inputs_[1]; }
+ LOperand* value() { return this->inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return this->hydrogen()->elements_kind();
+ }
+
+ bool NeedsCanonicalization() {
+ return this->hydrogen()->NeedsCanonicalization();
+ }
+ uint32_t additional_index() const { return this->hydrogen()->index_offset(); }
+
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ this->elements()->PrintTo(stream);
+ stream->Add("[");
+ this->key()->PrintTo(stream);
+ if (this->hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", this->additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (this->value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ this->value()->PrintTo(stream);
+ }
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+};
+
+
+class LStoreKeyedExternal V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ };
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedExternal, "store-keyed-external")
+};
+
+
+class LStoreKeyedFixed V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ };
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixed, "store-keyed-fixed")
+};
+
+
+class LStoreKeyedFixedDouble V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ };
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixedDouble,
+ "store-keyed-fixed-double")
+};
+
+
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+ public:
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LStoreNamedField(LOperand* object, LOperand* value,
+ LOperand* temp0, LOperand* temp1) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp0;
+ temps_[1] = temp1;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp0() { return temps_[0]; }
+ LOperand* temp1() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Map> transition() const { return hydrogen()->transition_map(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
+};
+
+
+class LStoreNamedGeneric V8_FINAL: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LTaggedToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LShiftS V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LShiftS(Token::Value op, LOperand* left, LOperand* right, LOperand* temp,
+ bool can_deopt) : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftS, "shift-s")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object,
+ LOperand* temp) {
+ inputs_[0] = function;
+ inputs_[1] = code_object;
+ temps_[0] = temp;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+ public:
+ LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LSubS: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* context,
+ LOperand* temp1,
+ LOperand* temp2 = NULL) {
+ inputs_[0] = object;
+ inputs_[1] = context;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() const { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() const { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+ public:
+ LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = object;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
+};
+
+
+class LTruncateDoubleToIntOrSmi V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LTruncateDoubleToIntOrSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TruncateDoubleToIntOrSmi,
+ "truncate-double-to-int-or-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool tag_result() { return hydrogen()->representation().IsSmi(); }
+};
+
+
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LTypeofIsAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() const { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map, LOperand* temp) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk V8_FINAL : public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
+
+ int GetNextSpillIndex();
+ LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+ public:
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
+ info_(info),
+ graph_(graph),
+ status_(UNUSED),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ allocator_(allocator),
+ instruction_pending_deoptimization_environment_(NULL),
+ pending_deoptimization_ast_id_(BailoutId::None()) { }
+
+ // Build the sequence for the graph.
+ LPlatformChunk* Build();
+
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ static bool HasMagicNumberForDivision(int32_t divisor);
+
+ private:
+ enum Status {
+ UNUSED,
+ BUILDING,
+ DONE,
+ ABORTED
+ };
+
+ HGraph* graph() const { return graph_; }
+ Isolate* isolate() const { return info_->isolate(); }
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_building() const { return status_ == BUILDING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ int argument_count() const { return argument_count_; }
+ CompilationInfo* info() const { return info_; }
+ Heap* heap() const { return isolate()->heap(); }
+
+ void Abort(BailoutReason reason);
+
+ // Methods for getting operands for Use / Define / Temp.
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // The operand created by UseRegister is guaranteed to be live until the end
+ // of the instruction. This means that register allocator will not reuse its
+ // register for any other operand inside instruction.
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+
+ // The operand created by UseRegisterAndClobber is guaranteed to be live until
+ // the end of the end of the instruction, and it may also be used as a scratch
+ // register by the instruction implementation.
+ //
+ // This behaves identically to ARM's UseTempRegister. However, it is renamed
+ // to discourage its use in A64, since in most cases it is better to allocate
+ // a temporary register for the Lithium instruction.
+ MUST_USE_RESULT LOperand* UseRegisterAndClobber(HValue* value);
+
+ // The operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. The register allocator is free to assign the same
+ // register to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // A constant operand.
+ MUST_USE_RESULT LConstantOperand* UseConstant(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+
+ // Temporary operand that must be in a fixed double register.
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ // By default we assume that instruction sequences generated for calls
+ // cannot deoptimize eagerly and we do not attach environment to this
+ // instruction.
+ LInstruction* MarkAsCall(
+ LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+ LInstruction* AssignPointerMap(LInstruction* instr);
+ LInstruction* AssignEnvironment(LInstruction* instr);
+
+ void VisitInstruction(HInstruction* current);
+ void DoBasicBlock(HBasicBlock* block);
+
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr);
+
+ LPlatformChunk* chunk_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ Status status_;
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ LAllocator* allocator_;
+ LInstruction* instruction_pending_deoptimization_environment_;
+ BailoutId pending_deoptimization_ast_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} } // namespace v8::internal
+
+#endif // V8_A64_LITHIUM_A64_H_
diff --git a/deps/v8/src/a64/lithium-codegen-a64.cc b/deps/v8/src/a64/lithium-codegen-a64.cc
new file mode 100644
index 0000000000..b4b875fb04
--- /dev/null
+++ b/deps/v8/src/a64/lithium-codegen-a64.cc
@@ -0,0 +1,5692 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "a64/lithium-codegen-a64.h"
+#include "a64/lithium-gap-resolver-a64.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+#include "hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator V8_FINAL : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ Safepoint::DeoptMode mode)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deopt_mode_(mode) { }
+ virtual ~SafepointGenerator() { }
+
+ virtual void BeforeCall(int call_size) const { }
+
+ virtual void AfterCall() const {
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+// Emit code to branch if the given condition holds.
+// The code generated here doesn't modify the flags and they must have
+// been set by some prior instructions.
+//
+// The EmitInverted function simply inverts the condition.
+class BranchOnCondition : public BranchGenerator {
+ public:
+ BranchOnCondition(LCodeGen* codegen, Condition cond)
+ : BranchGenerator(codegen),
+ cond_(cond) { }
+
+ virtual void Emit(Label* label) const {
+ __ B(cond_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ if (cond_ != al) {
+ __ B(InvertCondition(cond_), label);
+ }
+ }
+
+ private:
+ Condition cond_;
+};
+
+
+// Emit code to compare lhs and rhs and branch if the condition holds.
+// This uses MacroAssembler's CompareAndBranch function so it will handle
+// converting the comparison to Cbz/Cbnz if the right-hand side is 0.
+//
+// EmitInverted still compares the two operands but inverts the condition.
+class CompareAndBranch : public BranchGenerator {
+ public:
+ CompareAndBranch(LCodeGen* codegen,
+ Condition cond,
+ const Register& lhs,
+ const Operand& rhs)
+ : BranchGenerator(codegen),
+ cond_(cond),
+ lhs_(lhs),
+ rhs_(rhs) { }
+
+ virtual void Emit(Label* label) const {
+ __ CompareAndBranch(lhs_, rhs_, cond_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label);
+ }
+
+ private:
+ Condition cond_;
+ const Register& lhs_;
+ const Operand& rhs_;
+};
+
+
+// Test the input with the given mask and branch if the condition holds.
+// If the condition is 'eq' or 'ne' this will use MacroAssembler's
+// TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
+// conversion to Tbz/Tbnz when possible.
+class TestAndBranch : public BranchGenerator {
+ public:
+ TestAndBranch(LCodeGen* codegen,
+ Condition cond,
+ const Register& value,
+ uint64_t mask)
+ : BranchGenerator(codegen),
+ cond_(cond),
+ value_(value),
+ mask_(mask) { }
+
+ virtual void Emit(Label* label) const {
+ switch (cond_) {
+ case eq:
+ __ TestAndBranchIfAllClear(value_, mask_, label);
+ break;
+ case ne:
+ __ TestAndBranchIfAnySet(value_, mask_, label);
+ break;
+ default:
+ __ Tst(value_, mask_);
+ __ B(cond_, label);
+ }
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ // The inverse of "all clear" is "any set" and vice versa.
+ switch (cond_) {
+ case eq:
+ __ TestAndBranchIfAnySet(value_, mask_, label);
+ break;
+ case ne:
+ __ TestAndBranchIfAllClear(value_, mask_, label);
+ break;
+ default:
+ __ Tst(value_, mask_);
+ __ B(InvertCondition(cond_), label);
+ }
+ }
+
+ private:
+ Condition cond_;
+ const Register& value_;
+ uint64_t mask_;
+};
+
+
+// Test the input and branch if it is non-zero and not a NaN.
+class BranchIfNonZeroNumber : public BranchGenerator {
+ public:
+ BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
+ const FPRegister& scratch)
+ : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
+
+ virtual void Emit(Label* label) const {
+ __ Fabs(scratch_, value_);
+ // Compare with 0.0. Because scratch_ is positive, the result can be one of
+ // nZCv (equal), nzCv (greater) or nzCV (unordered).
+ __ Fcmp(scratch_, 0.0);
+ __ B(gt, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ Fabs(scratch_, value_);
+ __ Fcmp(scratch_, 0.0);
+ __ B(le, label);
+ }
+
+ private:
+ const FPRegister& value_;
+ const FPRegister& scratch_;
+};
+
+
+// Test the input and branch if it is a heap number.
+class BranchIfHeapNumber : public BranchGenerator {
+ public:
+ BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
+ : BranchGenerator(codegen), value_(value) { }
+
+ virtual void Emit(Label* label) const {
+ __ JumpIfHeapNumber(value_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ JumpIfNotHeapNumber(value_, label);
+ }
+
+ private:
+ const Register& value_;
+};
+
+
+// Test the input and branch if it is the specified root value.
+class BranchIfRoot : public BranchGenerator {
+ public:
+ BranchIfRoot(LCodeGen* codegen, const Register& value,
+ Heap::RootListIndex index)
+ : BranchGenerator(codegen), value_(value), index_(index) { }
+
+ virtual void Emit(Label* label) const {
+ __ JumpIfRoot(value_, index_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ JumpIfNotRoot(value_, index_, label);
+ }
+
+ private:
+ const Register& value_;
+ const Heap::RootListIndex index_;
+};
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->translation_size();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
+ switch (environment->frame_type()) {
+ case JS_FUNCTION:
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ break;
+ case JS_CONSTRUCT:
+ translation->BeginConstructStubFrame(closure_id, translation_size);
+ break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ int object_index = 0;
+ int dematerialized_index = 0;
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
+ }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
+ if (is_tagged) {
+ translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
+ } else {
+ translation->StoreInt32StackSlot(op->index());
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsArgument()) {
+ ASSERT(is_tagged);
+ int src_index = GetStackSlotCount() + op->index();
+ translation->StoreStackSlot(src_index);
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsDoubleRegister()) {
+ DoubleRegister reg = ToDoubleRegister(op);
+ translation->StoreDoubleRegister(reg);
+ } else if (op->IsConstantOperand()) {
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal, zone());
+ return result;
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
+ if (!environment->HasBeenRegistered()) {
+ int frame_count = 0;
+ int jsframe_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ if (e->frame_type() == JS_FUNCTION) {
+ ++jsframe_count;
+ }
+ }
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+ deoptimizations_.Add(environment, zone());
+ }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ ASSERT(instr != NULL);
+
+ Assembler::BlockConstPoolScope scope(masm_);
+ __ Call(code, mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+ if ((code->kind() == Code::BINARY_OP_IC) ||
+ (code->kind() == Code::COMPARE_IC)) {
+ // Signal that we don't inline smi code before these stubs in the
+ // optimizing code generator.
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+ }
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->function()).Is(x1));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->constructor()).is(x1));
+
+ __ Mov(x0, instr->arity());
+ // No cell in x2 for construct type feedback in optimized code.
+ Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+ __ Mov(x2, Operand(undefined_value));
+
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->constructor()).is(x1));
+
+ __ Mov(x0, Operand(instr->arity()));
+ __ Mov(x2, Operand(factory()->undefined_value()));
+
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+
+ // We might need to create a holey array; look at the first argument.
+ __ Peek(x10, 0);
+ __ Cbz(x10, &packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ B(&done);
+ __ Bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ Bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ }
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
+ ASSERT(instr != NULL);
+
+ __ CallRuntime(function, num_arguments, save_doubles);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Mov(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ Ldr(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ LoadHeapObject(cp,
+ Handle<HeapObject>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ ASSERT(expected_safepoint_kind_ == kind);
+
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint = safepoints_.DefineSafepoint(
+ masm(), kind, arguments, deopt_mode);
+
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+ }
+ }
+
+ if (kind & Safepoint::kWithRegisters) {
+ // Register cp always contains a pointer to the context.
+ safepoint.DefinePointerRegister(cp, zone());
+ }
+}
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+ LPointerMap empty_pointers(zone());
+ RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+ LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
+}
+
+
+bool LCodeGen::GenerateCode() {
+ LPhase phase("Z_Code generation", chunk());
+ ASSERT(is_unused());
+ status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
+ return GeneratePrologue() &&
+ GenerateBody() &&
+ GenerateDeferredCode() &&
+ GenerateDeoptJumpTable() &&
+ GenerateSafepointTable();
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator iterator(doubles);
+ int count = 0;
+ while (!iterator.Done()) {
+ // TODO(all): Is this supposed to save just the callee-saved doubles? It
+ // looks like it's saving all of them.
+ FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ __ Poke(value, count * kDoubleSize);
+ iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator iterator(doubles);
+ int count = 0;
+ while (!iterator.Done()) {
+ // TODO(all): Is this supposed to restore just the callee-saved doubles? It
+ // looks like it's restoring all of them.
+ FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ __ Peek(value, count * kDoubleSize);
+ iterator.Advance();
+ count++;
+ }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+ ASSERT(is_generating());
+
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+ // TODO(all): Add support for stop_t FLAG in DEBUG mode.
+
+ // Classic mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->is_classic_mode() &&
+ !info_->is_native()) {
+ Label ok;
+ int receiver_offset = info_->scope()->num_parameters() * kXRegSizeInBytes;
+ __ Peek(x10, receiver_offset);
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Poke(x10, receiver_offset);
+
+ __ Bind(&ok);
+ }
+ }
+
+ ASSERT(__ StackPointer().Is(jssp));
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ frame_is_built_ = true;
+ info_->AddNoFrameRange(0, masm_->pc_offset());
+ }
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ __ Claim(slots, kPointerSize);
+ }
+
+ if (info()->saves_caller_doubles()) {
+ SaveCallerDoubles();
+ }
+
+ // Allocate a local context if needed.
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is in x1.
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+ // Context is returned in x0. It replaces the context passed to us. It's
+ // saved in the stack and kept live in cp.
+ __ Mov(cp, x0);
+ __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ Register value = x0;
+ Register scratch = x3;
+
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ Ldr(value, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ Str(value, target);
+ // Update the write barrier. This clobbers value and scratch.
+ __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
+ GetLinkRegisterState(), kSaveFPRegs);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ // Trace the call.
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ return !is_aborted();
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ Claim(slots);
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+ ASSERT(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
+ LDeferredCode* code = deferred_[i];
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
+ code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
+ code->instr()->Mnemonic());
+
+ __ Bind(code->entry());
+
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ Push(lr, fp, cp);
+ __ Mov(fp, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ Push(fp);
+ __ Add(fp, __ StackPointer(),
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ Comment(";;; Deferred code");
+ }
+
+ code->Generate();
+
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Destroy frame");
+ ASSERT(frame_is_built_);
+ __ Pop(xzr, cp, fp, lr);
+ frame_is_built_ = false;
+ }
+
+ __ B(code->exit());
+ }
+ }
+
+ // Force constant pool emission at the end of the deferred code to make
+ // sure that no constant pools are emitted after deferred code because
+ // deferred code generation is the last step which generates code. The two
+ // following steps will only output data used by crakshaft.
+ masm()->CheckConstPool(true, false);
+
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+ if (deopt_jump_table_.length() > 0) {
+ Comment(";;; -------------------- Jump table --------------------");
+ }
+ Label table_start;
+ __ bind(&table_start);
+ Label needs_frame;
+ for (int i = 0; i < deopt_jump_table_.length(); i++) {
+ __ Bind(&deopt_jump_table_[i].label);
+ Address entry = deopt_jump_table_[i].address;
+ Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ if (deopt_jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
+ __ Mov(__ Tmp0(), Operand(ExternalReference::ForDeoptEntry(entry)));
+ if (needs_frame.is_bound()) {
+ __ B(&needs_frame);
+ } else {
+ __ Bind(&needs_frame);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ // TODO(jochen): Revisit the use of TmpX().
+ ASSERT(info()->IsStub());
+ __ Mov(__ Tmp1(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ Push(lr, fp, cp, __ Tmp1());
+ __ Add(fp, __ StackPointer(), 2 * kPointerSize);
+ __ Call(__ Tmp0());
+ }
+ } else {
+ if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ RestoreCallerDoubles();
+ }
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ masm()->CheckConstPool(false, false);
+ }
+
+ // Force constant pool emission at the end of the deopt jump table to make
+ // sure that no constant pools are emitted after.
+ masm()->CheckConstPool(true, false);
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+ ASSERT(is_done());
+ safepoints_.Emit(masm(), GetStackSlotCount());
+ return !is_aborted();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ ASSERT(is_done());
+ code->set_stack_slots(GetStackSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ RegisterDependentCodeForEmbeddedMaps(code);
+ PopulateDeoptimizationData(code);
+ info()->CommitDependencies(code);
+}
+
+
+void LCodeGen::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+
+ Handle<DeoptimizationInputData> data =
+ factory()->NewDeoptimizationInputData(length, TENURED);
+
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, env->ast_id());
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
+ }
+
+ code->set_deoptimization_data(*data);
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ ASSERT(deoptimization_literals_.length() == 0);
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
+ chunk()->inlined_closures();
+
+ for (int i = 0, length = inlined_closures->length(); i < length; i++) {
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
+ }
+
+ inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+Deoptimizer::BailoutType LCodeGen::DeoptimizeHeader(
+ LEnvironment* environment,
+ Deoptimizer::BailoutType* override_bailout_type) {
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ ASSERT(environment->HasBeenRegistered());
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ int id = environment->deoptimization_index();
+ Deoptimizer::BailoutType bailout_type =
+ info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ if (override_bailout_type) bailout_type = *override_bailout_type;
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+
+ if (entry == NULL) {
+ Abort(kBailoutWasNotPrepared);
+ return bailout_type;
+ }
+
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Label not_zero;
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+
+ __ Push(x0, x1, x2);
+ __ Mrs(x2, NZCV);
+ __ Mov(x0, Operand(count));
+ __ Ldr(w1, MemOperand(x0));
+ __ Subs(x1, x1, 1);
+ __ B(gt, &not_zero);
+ __ Mov(w1, FLAG_deopt_every_n_times);
+ __ Str(w1, MemOperand(x0));
+ __ Pop(x0, x1, x2);
+ ASSERT(frame_is_built_);
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ __ Unreachable();
+
+ __ Bind(&not_zero);
+ __ Str(w1, MemOperand(x0));
+ __ Msr(NZCV, x2);
+ __ Pop(x0, x1, x2);
+ }
+
+ return bailout_type;
+}
+
+
+void LCodeGen::Deoptimize(LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type) {
+ ASSERT(environment->HasBeenRegistered());
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ int id = environment->deoptimization_index();
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+
+ if (info()->ShouldTrapOnDeopt()) {
+ __ Debug("trap_on_deopt", __LINE__, BREAK);
+ }
+
+ ASSERT(info()->IsStub() || frame_is_built_);
+ // Go through jump table if we need to build frame, or restore caller doubles.
+ if (frame_is_built_ && !info()->saves_caller_doubles()) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (deopt_jump_table_.is_empty() ||
+ (deopt_jump_table_.last().address != entry) ||
+ (deopt_jump_table_.last().bailout_type != bailout_type) ||
+ (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
+ deopt_jump_table_.Add(table_entry, zone());
+ }
+ __ B(&deopt_jump_table_.last().label);
+ }
+}
+
+
+void LCodeGen::Deoptimize(LEnvironment* environment) {
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ Deoptimize(environment, bailout_type);
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ B(InvertCondition(cond), &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ Cbnz(rt, &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ Tbz(rt, rt.Is64Bits() ? kXSignBit : kWSignBit, &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::DeoptimizeIfSmi(Register rt,
+ LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ JumpIfNotSmi(rt, &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ JumpIfSmi(rt, &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::DeoptimizeIfRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ JumpIfNotRoot(rt, index, &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::DeoptimizeIfNotRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ JumpIfRoot(rt, index, &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ intptr_t current_pc = masm()->pc_offset();
+
+ if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+ ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ ASSERT((padding_size % kInstructionSize) == 0);
+ InstructionAccurateScope instruction_accurate(
+ masm(), padding_size / kInstructionSize);
+
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= kInstructionSize;
+ }
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ // TODO(all): support zero register results, as ToRegister32.
+ ASSERT((op != NULL) && op->IsRegister());
+ return Register::FromAllocationIndex(op->index());
+}
+
+
+Register LCodeGen::ToRegister32(LOperand* op) const {
+ ASSERT(op != NULL);
+ if (op->IsConstantOperand()) {
+ // If this is a constant operand, the result must be the zero register.
+ ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
+ return wzr;
+ } else {
+ return ToRegister(op).W();
+ }
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ ASSERT((op != NULL) && op->IsDoubleRegister());
+ return DoubleRegister::FromAllocationIndex(op->index());
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+ ASSERT(op != NULL);
+ if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
+ ASSERT(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
+ } else if (r.IsDouble()) {
+ Abort(kToOperandUnsupportedDoubleImmediate);
+ }
+ ASSERT(r.IsTagged());
+ return Operand(constant->handle(isolate()));
+ } else if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
+ return Operand(0);
+ }
+ // Stack slots not implemented, use ToMemOperand instead.
+ UNREACHABLE();
+ return Operand(0);
+}
+
+
+Operand LCodeGen::ToOperand32I(LOperand* op) {
+ return ToOperand32(op, SIGNED_INT32);
+}
+
+
+Operand LCodeGen::ToOperand32U(LOperand* op) {
+ return ToOperand32(op, UNSIGNED_INT32);
+}
+
+
+Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
+ ASSERT(op != NULL);
+ if (op->IsRegister()) {
+ return Operand(ToRegister32(op));
+ } else if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(constant->HasInteger32Value());
+ return Operand(signedness == SIGNED_INT32
+ ? constant->Integer32Value()
+ : static_cast<uint32_t>(constant->Integer32Value()));
+ } else {
+ // Other constants not implemented.
+ Abort(kToOperand32UnsupportedImmediate);
+ }
+ }
+ // Other cases are not implemented.
+ UNREACHABLE();
+ return Operand(0);
+}
+
+
+static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+ ASSERT(op != NULL);
+ ASSERT(!op->IsRegister());
+ ASSERT(!op->IsDoubleRegister());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(masm()->StackPointer(),
+ ArgumentsOffsetWithoutFrame(op->index()));
+ }
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
+ return op->IsConstantOperand() &&
+ chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return constant->Integer32Value();
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+ Condition cond = nv;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = eq;
+ break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
+ case Token::LT:
+ cond = is_unsigned ? lo : lt;
+ break;
+ case Token::GT:
+ cond = is_unsigned ? hi : gt;
+ break;
+ case Token::LTE:
+ cond = is_unsigned ? ls : le;
+ break;
+ case Token::GTE:
+ cond = is_unsigned ? hs : ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchGeneric(InstrType instr,
+ const BranchGenerator& branch) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
+
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
+ } else if (right_block == next_block) {
+ branch.Emit(chunk_->GetAssemblyLabel(left_block));
+ } else {
+ branch.Emit(chunk_->GetAssemblyLabel(left_block));
+ __ B(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
+ ASSERT((condition != al) && (condition != nv));
+ BranchOnCondition branch(this, condition);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitCompareAndBranch(InstrType instr,
+ Condition condition,
+ const Register& lhs,
+ const Operand& rhs) {
+ ASSERT((condition != al) && (condition != nv));
+ CompareAndBranch branch(this, condition, lhs, rhs);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitTestAndBranch(InstrType instr,
+ Condition condition,
+ const Register& value,
+ uint64_t mask) {
+ ASSERT((condition != al) && (condition != nv));
+ TestAndBranch branch(this, condition, value, mask);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
+ const FPRegister& value,
+ const FPRegister& scratch) {
+ BranchIfNonZeroNumber branch(this, value, scratch);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
+ const Register& value) {
+ BranchIfHeapNumber branch(this, value);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfRoot(InstrType instr,
+ const Register& value,
+ Heap::RootListIndex index) {
+ BranchIfRoot branch(this, value, index);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION;
+ i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) {
+ resolver_.Resolve(move);
+ }
+ }
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ // TODO(all): Try to improve this, like ARM r17925.
+ Register arguments = ToRegister(instr->arguments());
+ Register result = ToRegister(instr->result());
+
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ ASSERT(instr->temp() == NULL);
+ int index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int offset = ((length - index) + 1) * kPointerSize;
+ __ Ldr(result, MemOperand(arguments, offset));
+ } else {
+ ASSERT(instr->temp() != NULL);
+ Register temp = ToRegister32(instr->temp());
+ Register length = ToRegister32(instr->length());
+ Operand index = ToOperand32I(instr->index());
+ // There are two words between the frame pointer and the last arguments.
+ // Subtracting from length accounts for only one, so we add one more.
+ __ Sub(temp, length, index);
+ __ Add(temp, temp, 1);
+ __ Ldr(result, MemOperand(arguments, temp, UXTW, kPointerSizeLog2));
+ }
+}
+
+
+void LCodeGen::DoAddE(LAddE* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = (instr->right()->IsConstantOperand())
+ ? ToInteger32(LConstantOperand::cast(instr->right()))
+ : Operand(ToRegister32(instr->right()), SXTW);
+
+ ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
+ __ Add(result, left, right);
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+ if (can_overflow) {
+ __ Adds(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoAddS(LAddS* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+ if (can_overflow) {
+ __ Adds(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate: public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+ }
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
+ } else {
+ Register size = ToRegister32(instr->size());
+ __ Sxtw(size.X(), size);
+ __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
+ }
+
+ __ Bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Mov(temp1, size - kPointerSize);
+ } else {
+ __ Sub(temp1.W(), ToRegister32(instr->size()), kPointerSize);
+ }
+ __ Sub(result, result, kHeapObjectTag);
+
+ // TODO(jbramley): Optimize this loop using stp.
+ Label loop;
+ __ Bind(&loop);
+ __ Mov(temp2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ __ Str(temp2, MemOperand(result, temp1));
+ __ Subs(temp1, temp1, kPointerSize);
+ __ B(ge, &loop);
+
+ __ Add(result, result, kHeapObjectTag);
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(ToRegister(instr->result()), Operand(Smi::FromInt(0)));
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ // We're in a SafepointRegistersScope so we can use any scratch registers.
+ Register size = x0;
+ if (instr->size()->IsConstantOperand()) {
+ __ Mov(size, Operand(ToSmi(LConstantOperand::cast(instr->size()))));
+ } else {
+ __ SmiTag(size, ToRegister32(instr->size()).X());
+ }
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
+ } else {
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+ }
+ __ Mov(x10, Operand(Smi::FromInt(flags)));
+ __ Push(size, x10);
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister32(instr->length());
+
+ Register elements = ToRegister(instr->elements());
+ Register scratch = x5;
+ ASSERT(receiver.Is(x0)); // Used for parameter count.
+ ASSERT(function.Is(x1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ ASSERT(instr->IsMarkedAsCall());
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ Cmp(length, kArgumentsLimit);
+ DeoptimizeIf(hi, instr->environment());
+
+ // Push the receiver and use the register to keep the original
+ // number of arguments.
+ __ Push(receiver);
+ Register argc = receiver;
+ receiver = NoReg;
+ __ Sxtw(argc, length);
+ // The arguments are at a one pointer size offset from elements.
+ __ Add(elements, elements, 1 * kPointerSize);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ Cbz(length, &invoke);
+ __ Bind(&loop);
+ __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
+ __ Push(scratch);
+ __ Subs(length, length, 1);
+ __ B(ne, &loop);
+
+ __ Bind(&invoke);
+ ASSERT(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+ // The number of arguments is stored in argc (receiver) which is x0, as
+ // expected by InvokeFunction.
+ ParameterCount actual(argc);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->from_inlined()) {
+ // When we are inside an inlined function, the arguments are the last things
+ // that have been pushed on the stack. Therefore the arguments array can be
+ // accessed directly from jssp.
+ // However in the normal case, it is accessed via fp but there are two words
+ // on the stack between fp and the arguments (the saved lr and fp) and the
+ // LAccessArgumentsAt implementation take that into account.
+ // In the inlined case we need to subtract the size of 2 words to jssp to
+ // get a pointer which will work well with LAccessArgumentsAt.
+ ASSERT(masm()->StackPointer().Is(jssp));
+ __ Sub(result, jssp, 2 * kPointerSize);
+ } else {
+ ASSERT(instr->temp() != NULL);
+ Register previous_fp = ToRegister(instr->temp());
+
+ __ Ldr(previous_fp,
+ MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(result,
+ MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
+ __ Cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Csel(result, fp, previous_fp, ne);
+ }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister32(instr->result());
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ Cmp(fp, elements);
+ __ Mov(result, scope()->num_parameters());
+ __ B(eq, &done);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(result,
+ UntagSmiMemOperand(result.X(),
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ // Argument length is in result register.
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ switch (instr->op()) {
+ case Token::ADD: __ Fadd(result, left, right); break;
+ case Token::SUB: __ Fsub(result, left, right); break;
+ case Token::MUL: __ Fmul(result, left, right); break;
+ case Token::DIV: __ Fdiv(result, left, right); break;
+ case Token::MOD: {
+ // The ECMA-262 remainder operator is the remainder from a truncating
+ // (round-towards-zero) division. Note that this differs from IEEE-754.
+ //
+ // TODO(jbramley): See if it's possible to do this inline, rather than by
+ // calling a helper function. With frintz (to produce the intermediate
+ // quotient) and fmsub (to calculate the remainder without loss of
+ // precision), it should be possible. However, we would need support for
+ // fdiv in round-towards-zero mode, and the A64 simulator doesn't support
+ // that yet.
+ ASSERT(left.Is(d0));
+ ASSERT(right.Is(d1));
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ ASSERT(result.Is(d0));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->left()).is(x1));
+ ASSERT(ToRegister(instr->right()).is(x0));
+ ASSERT(ToRegister(instr->result()).is(x0));
+
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32U(instr->right());
+
+ switch (instr->op()) {
+ case Token::BIT_AND: __ And(result, left, right); break;
+ case Token::BIT_OR: __ Orr(result, left, right); break;
+ case Token::BIT_XOR: __ Eor(result, left, right); break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoBitS(LBitS* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+
+ switch (instr->op()) {
+ case Token::BIT_AND: __ And(result, left, right); break;
+ case Token::BIT_OR: __ Orr(result, left, right); break;
+ case Token::BIT_XOR: __ Eor(result, left, right); break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ __ Assert(InvertCondition(cc), kEliminatedBoundsCheckFailed);
+ } else {
+ DeoptimizeIf(cc, check->environment());
+ }
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
+ if (instr->hydrogen()->skip_check()) return;
+
+ ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
+ Register length = ToRegister32(instr->length());
+
+ if (instr->index()->IsConstantOperand()) {
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
+ __ Cmp(length, Operand(Smi::FromInt(constant_index)));
+ } else {
+ __ Cmp(length, Operand(constant_index));
+ }
+ } else {
+ ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
+ __ Cmp(length, ToRegister32(instr->index()));
+ }
+ Condition condition = instr->hydrogen()->allow_equality() ? lo : ls;
+ ApplyCheckIf(condition, instr);
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+
+ if (r.IsInteger32()) {
+ ASSERT(!info()->IsStub());
+ EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
+ } else if (r.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ STATIC_ASSERT(kSmiTag == 0);
+ EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
+ } else if (r.IsDouble()) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ // Test the double value. Zero and NaN are false.
+ EmitBranchIfNonZeroNumber(instr, value, double_scratch());
+ } else {
+ ASSERT(r.IsTagged());
+ Register value = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+
+ if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
+ __ CompareRoot(value, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
+ } else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ EmitCompareAndBranch(instr, ne, value, Operand(Smi::FromInt(0)));
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitGoto(instr->TrueDestination(chunk()));
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ __ Ldr(double_scratch(), FieldMemOperand(value,
+ HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ Register temp = ToRegister(instr->temp1());
+ __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
+ EmitCompareAndBranch(instr, ne, temp, 0);
+ } else {
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ JumpIfRoot(
+ value, Heap::kUndefinedValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // Boolean -> its value.
+ __ JumpIfRoot(
+ value, Heap::kTrueValueRootIndex, true_label);
+ __ JumpIfRoot(
+ value, Heap::kFalseValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ JumpIfRoot(
+ value, Heap::kNullValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ ASSERT(Smi::FromInt(0) == 0);
+ __ Cbz(value, false_label);
+ __ JumpIfSmi(value, true_label);
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a smi, deopt.
+ DeoptimizeIfSmi(value, instr->environment());
+ }
+
+ Register map = NoReg;
+ Register scratch = NoReg;
+
+ if (expected.NeedsMap()) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ map = ToRegister(instr->temp1());
+ scratch = ToRegister(instr->temp2());
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(
+ scratch, 1 << Map::kIsUndetectable, false_label);
+ }
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, true_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
+ __ B(ge, &not_string);
+ __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
+ __ Cbz(scratch, false_label);
+ __ B(true_label);
+ __ Bind(&not_string);
+ }
+
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
+ __ B(eq, true_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ Label not_heap_number;
+ __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
+
+ __ Ldr(double_scratch(),
+ FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ Fcmp(double_scratch(), 0.0);
+ // If we got a NaN (overflow bit is set), jump to the false branch.
+ __ B(vs, false_label);
+ __ B(eq, false_label);
+ __ B(true_label);
+ __ Bind(&not_heap_number);
+ }
+
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ Deoptimize(instr->environment());
+ }
+ }
+ }
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ Register function_reg) {
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+
+ // The function interface relies on the following register assignments.
+ ASSERT(function_reg.Is(x1) || function_reg.IsNone());
+ Register arity_reg = x0;
+
+ LPointerMap* pointers = instr->pointer_map();
+
+ // If necessary, load the function object.
+ if (function_reg.IsNone()) {
+ function_reg = x1;
+ __ LoadObject(function_reg, function);
+ }
+
+ if (FLAG_debug_code) {
+ Label is_not_smi;
+ // Try to confirm that function_reg (x1) is a tagged pointer.
+ __ JumpIfNotSmi(function_reg, &is_not_smi);
+ __ Abort(kExpectedFunctionObject);
+ __ Bind(&is_not_smi);
+ }
+
+ if (can_invoke_directly) {
+ // Change context.
+ __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+ // Set the arguments count if adaption is not needed. Assumes that x0 is
+ // available to write to at this point.
+ if (dont_adapt_arguments) {
+ __ Mov(arity_reg, arity);
+ }
+
+ // Invoke function.
+ __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->result()).Is(x0));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ // TODO(all): on ARM we use a call descriptor to specify a storage mode
+ // but on A64 we only have one storage mode so it isn't necessary. Check
+ // this understanding is correct.
+ __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(target);
+ }
+ generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->function()).is(x1));
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ Mov(x0, Operand(instr->arity()));
+ }
+
+ // Change context.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ // Load the code entry address
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->result()).is(x0));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub;
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub;
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub;
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Register temp = ToRegister(instr->temp());
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object);
+ __ Mov(cp, 0);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, temp);
+ }
+ DeoptimizeIfSmi(temp, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps: public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->CanOmitMapChecks()) {
+ ASSERT(instr->value() == NULL);
+ ASSERT(instr->temp() == NULL);
+ return;
+ }
+
+ Register object = ToRegister(instr->value());
+ Register map_reg = ToRegister(instr->temp());
+
+ __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->has_migration_target()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, object);
+ __ Bind(deferred->check_maps());
+ }
+
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ Label success;
+ for (int i = 0; i < map_set.size(); i++) {
+ Handle<Map> map = map_set.at(i).handle();
+ __ CompareMap(map_reg, map, &success);
+ __ B(eq, &success);
+ }
+
+ // We didn't match a map.
+ if (instr->hydrogen()->has_migration_target()) {
+ __ B(deferred->entry());
+ } else {
+ Deoptimize(instr->environment());
+ }
+
+ __ Bind(&success);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ // TODO(all): Depending of how we chose to implement the deopt, if we could
+ // guarantee that we have a deopt handler reachable by a tbz instruction,
+ // we could use tbz here and produce less code to support this instruction.
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
+ }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ Register value = ToRegister(instr->value());
+ ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
+ // TODO(all): See DoCheckNonSmi for comments on use of tbz.
+ DeoptimizeIfNotSmi(value, instr->environment());
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first, last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ __ Cmp(scratch, first);
+ if (first == last) {
+ // If there is only one type in the interval check for equality.
+ DeoptimizeIf(ne, instr->environment());
+ } else if (last == LAST_TYPE) {
+ // We don't need to compare with the higher bound of the interval.
+ DeoptimizeIf(lo, instr->environment());
+ } else {
+ // If we are below the lower bound, set the C flag and clear the Z flag
+ // to force a deopt.
+ __ Ccmp(scratch, last, CFlag, hs);
+ DeoptimizeIf(hi, instr->environment());
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT((tag == 0) || (tag == mask));
+ // TODO(all): We might be able to use tbz/tbnz if we can guarantee that
+ // the deopt handler is reachable by a tbz instruction.
+ __ Tst(scratch, mask);
+ DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
+ } else {
+ if (tag == 0) {
+ __ Tst(scratch, mask);
+ } else {
+ __ And(scratch, scratch, mask);
+ __ Cmp(scratch, tag);
+ }
+ DeoptimizeIf(ne, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ __ ClampDoubleToUint8(result, input, double_scratch());
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ Register input = ToRegister32(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ __ ClampInt32ToUint8(result, input);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ Register input = ToRegister(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Label done;
+
+ // Both smi and heap number cases are handled.
+ Label is_not_smi;
+ __ JumpIfNotSmi(input, &is_not_smi);
+ __ SmiUntag(result.X(), input);
+ __ ClampInt32ToUint8(result);
+ __ B(&done);
+
+ __ Bind(&is_not_smi);
+
+ // Check for heap number.
+ Label is_heap_number;
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
+
+ // Check for undefined. Undefined is coverted to zero for clamping conversion.
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+ __ Mov(result, 0);
+ __ B(&done);
+
+ // Heap number case.
+ __ Bind(&is_heap_number);
+ DoubleRegister dbl_scratch = double_scratch();
+ DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
+ __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Handle<String> class_name = instr->hydrogen()->class_name();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Register input = ToRegister(instr->value());
+ Register scratch1 = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(input, false_label);
+
+ Register map = scratch2;
+ if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+
+ // We expect CompareObjectType to load the object instance type in scratch1.
+ __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, false_label);
+ __ B(eq, true_label);
+ __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
+ __ B(eq, true_label);
+ } else {
+ __ IsObjectJSObjectType(input, map, scratch1, false_label);
+ }
+
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+ // Check if the constructor in the map is a function.
+ __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
+
+ // Objects with a non-function constructor have class 'Object'.
+ if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
+ __ JumpIfNotObjectType(
+ scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
+ } else {
+ __ JumpIfNotObjectType(
+ scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
+ }
+
+ // The constructor function is in scratch1. Get its instance class name.
+ __ Ldr(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(scratch1,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+ EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ FPRegister object = ToDoubleRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+
+ // If we don't have a NaN, we don't have the hole, so branch now to avoid the
+ // (relatively expensive) hole-NaN check.
+ __ Fcmp(object, object);
+ __ B(vc, instr->FalseLabel(chunk_));
+
+ // We have a NaN, but is it the hole?
+ __ Fmov(temp, object);
+ EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
+ ASSERT(instr->hydrogen()->representation().IsTagged());
+ Register object = ToRegister(instr->object());
+
+ EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register value = ToRegister(instr->value());
+ Register map = ToRegister(instr->temp());
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
+ instr->TrueLabel(chunk()));
+ } else {
+ Register value = ToRegister(instr->value());
+ __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
+ instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk()));
+ }
+ EmitGoto(instr->FalseDestination(chunk()));
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ Condition cond = TokenToCondition(instr->op(), false);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ if (right->IsConstantOperand()) {
+ __ Fcmp(ToDoubleRegister(left),
+ ToDouble(LConstantOperand::cast(right)));
+ } else if (left->IsConstantOperand()) {
+ // Transpose the operands and reverse the condition.
+ __ Fcmp(ToDoubleRegister(right),
+ ToDouble(LConstantOperand::cast(left)));
+ cond = ReverseConditionForCmp(cond);
+ } else {
+ __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
+ }
+
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to false block label.
+ __ B(vs, instr->FalseLabel(chunk_));
+ EmitBranch(instr, cond);
+ } else {
+ if (instr->hydrogen_value()->representation().IsInteger32()) {
+ if (right->IsConstantOperand()) {
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister32(left),
+ ToOperand32I(right));
+ } else {
+ // Transpose the operands and reverse the condition.
+ EmitCompareAndBranch(instr,
+ ReverseConditionForCmp(cond),
+ ToRegister32(right),
+ ToOperand32I(left));
+ }
+ } else {
+ ASSERT(instr->hydrogen_value()->representation().IsSmi());
+ if (right->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister(left),
+ Operand(Smi::FromInt(value)));
+ } else if (left->IsConstantOperand()) {
+ // Transpose the operands and reverse the condition.
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ EmitCompareAndBranch(instr,
+ ReverseConditionForCmp(cond),
+ ToRegister(right),
+ Operand(Smi::FromInt(value)));
+ } else {
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister(left),
+ ToRegister(right));
+ }
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+ EmitCompareAndBranch(instr, eq, left, right);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+ Condition cond = TokenToCondition(op, false);
+
+ ASSERT(ToRegister(instr->left()).Is(x1));
+ ASSERT(ToRegister(instr->right()).Is(x0));
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // Signal that we don't inline smi code before this stub.
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+
+ // Return true or false depending on CompareIC result.
+ // This instruction is marked as call. We can clobber any register.
+ ASSERT(instr->IsMarkedAsCall());
+ __ LoadTrueFalseRoots(x1, x2);
+ __ Cmp(x0, 0);
+ __ Csel(ToRegister(instr->result()), x1, x2, cond);
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ ASSERT(instr->result()->IsDoubleRegister());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fmov(result, instr->value());
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ Mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ ASSERT(is_int32(instr->value()));
+ // Cast the value here to ensure that the value isn't sign extended by the
+ // implicit Operand constructor.
+ __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ Mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Handle<Object> value = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ LoadObject(ToRegister(instr->result()), value);
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
+ Register result = ToRegister(instr->result());
+ if (info()->IsOptimizing()) {
+ __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ ASSERT(result.is(cp));
+ }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Register reg = ToRegister(instr->value());
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ AllowDeferredHandleDereference smi_check;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Register temp = ToRegister(instr->temp());
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ __ Mov(temp, Operand(Handle<Object>(cell)));
+ __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
+ __ Cmp(reg, temp);
+ } else {
+ __ Cmp(reg, Operand(object));
+ }
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->date());
+ Register result = ToRegister(instr->result());
+ Register temp1 = x10;
+ Register temp2 = x11;
+ Smi* index = instr->index();
+ Label runtime, done, deopt, obj_ok;
+
+ ASSERT(object.is(result) && object.Is(x0));
+ ASSERT(instr->IsMarkedAsCall());
+
+ __ JumpIfSmi(object, &deopt);
+ __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
+ __ B(eq, &obj_ok);
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ __ Bind(&obj_ok);
+ if (index->value() == 0) {
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ Mov(temp1, Operand(stamp));
+ __ Ldr(temp1, MemOperand(temp1));
+ __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Cmp(temp1, temp2);
+ __ B(ne, &runtime);
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ B(&done);
+ }
+
+ __ Bind(&runtime);
+ __ Mov(x1, Operand(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ }
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
+ type = Deoptimizer::LAZY;
+ }
+
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
+ DeoptimizeHeader(instr->environment(), &type);
+ Deoptimize(instr->environment(), type);
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+ if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
+ HDiv* hdiv = instr->hydrogen();
+ Register dividend = ToRegister32(instr->left());
+ int32_t divisor = hdiv->right()->GetInteger32Constant();
+ Register result = ToRegister32(instr->result());
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->left()->RangeCanInclude(0) && divisor < 0 &&
+ hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(dividend, 0);
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 &&
+ hdiv->CheckFlag(HValue::kCanOverflow)) {
+ __ Cmp(dividend, kMinInt);
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ Abs(divisor) != 1) {
+ __ Tst(dividend, Abs(divisor) - 1);
+ DeoptimizeIf(ne, instr->environment());
+ }
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ Neg(result, dividend);
+ return;
+ }
+ int32_t shift = WhichPowerOf2(Abs(divisor));
+ if (shift == 0) {
+ __ Mov(result, dividend);
+ } else if (shift == 1) {
+ __ Add(result, dividend, Operand(dividend, LSR, 31));
+ } else {
+ __ Mov(result, Operand(dividend, ASR, 31));
+ __ Add(result, dividend, Operand(result, LSR, 32 - shift));
+ }
+ if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
+ if (divisor < 0) __ Neg(result, result);
+ return;
+ }
+
+ Register dividend = ToRegister32(instr->left());
+ Register divisor = ToRegister32(instr->right());
+ Register result = ToRegister32(instr->result());
+ HValue* hdiv = instr->hydrogen_value();
+
+ // Issue the division first, and then check for any deopt cases whilst the
+ // result is computed.
+ __ Sdiv(result, dividend, divisor);
+
+ if (hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ ASSERT_EQ(NULL, instr->temp());
+ return;
+ }
+
+ Label deopt;
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ Cbz(divisor, &deopt);
+ }
+
+ // Check for (0 / -x) as that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(divisor, 0);
+
+ // If the divisor < 0 (mi), compare the dividend, and deopt if it is
+ // zero, ie. zero dividend with negative divisor deopts.
+ // If the divisor >= 0 (pl, the opposite of mi) set the flags to
+ // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
+ __ Ccmp(dividend, 0, NoFlag, mi);
+ __ B(eq, &deopt);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ // Test dividend for kMinInt by subtracting one (cmp) and checking for
+ // overflow.
+ __ Cmp(dividend, 1);
+ // If overflow is set, ie. dividend = kMinInt, compare the divisor with
+ // -1. If overflow is clear, set the flags for condition ne, as the
+ // dividend isn't -1, and thus we shouldn't deopt.
+ __ Ccmp(divisor, -1, NoFlag, vs);
+ __ B(eq, &deopt);
+ }
+
+ // Compute remainder and deopt if it's not zero.
+ Register remainder = ToRegister32(instr->temp());
+ __ Msub(remainder, result, divisor, dividend);
+ __ Cbnz(remainder, &deopt);
+
+ Label div_ok;
+ __ B(&div_ok);
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ __ Bind(&div_ok);
+}
+
+
+void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister32(instr->result());
+ Label done, deopt;
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ JumpIfMinusZero(input, &deopt);
+ }
+
+ __ TryConvertDoubleToInt32(result, input, double_scratch(), &done);
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ __ Bind(&done);
+
+ if (instr->tag_result()) {
+ __ SmiTag(result.X());
+ }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+ __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // FunctionLiteral instruction is marked as call, we can trash any register.
+ ASSERT(instr->IsMarkedAsCall());
+
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ instr->hydrogen()->is_generator());
+ __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
+ __ Mov(x1, Operand(pretenure ? factory()->true_value()
+ : factory()->false_value()));
+ __ Push(cp, x2, x1);
+ CallRuntime(Runtime::kNewClosure, 3, instr);
+ }
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ Label load_cache, done;
+
+ __ EnumLengthUntagged(result, map);
+ __ Cbnz(result, &load_cache);
+
+ __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ B(&done);
+
+ __ Bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result);
+ __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ DeoptimizeIfZero(result, instr->environment());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ Register object = ToRegister(instr->object());
+ Register null_value = x5;
+
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(object.Is(x0));
+
+ Label deopt;
+
+ __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt);
+
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Cmp(object, null_value);
+ __ B(eq, &deopt);
+
+ __ JumpIfSmi(object, &deopt);
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
+ __ B(le, &deopt);
+
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
+
+ __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ B(&use_cache);
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ // Get the set of properties to enumerate.
+ __ Bind(&call_runtime);
+ __ Push(object);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt);
+
+ __ Bind(&use_cache);
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ __ AssertString(input);
+
+ // Assert that we can use a W register load to get the hash.
+ ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSize);
+ __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+ // Do not emit jump if we are emitting a goto to the next block.
+ if (!IsNextEmittedBlock(block)) {
+ __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
+ }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+ EmitGoto(instr->block_id());
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister32(instr->temp());
+
+ // Assert that the cache status bits fit in a W register.
+ ASSERT(is_uint32(String::kContainsCachedArrayIndexMask));
+ __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
+ __ Tst(temp, String::kContainsCachedArrayIndexMask);
+ EmitBranch(instr, eq);
+}
+
+
+// HHasInstanceTypeAndBranch instruction is built with an interval of type
+// to test but is only used in very restricted ways. The only possible kinds
+// of intervals are:
+// - [ FIRST_TYPE, instr->to() ]
+// - [ instr->form(), LAST_TYPE ]
+// - instr->from() == instr->to()
+//
+// These kinds of intervals can be check with only one compare instruction
+// providing the correct value and test condition are used.
+//
+// TestType() will return the value to use in the compare instruction and
+// BranchCondition() will return the condition to use depending on the kind
+// of interval actually specified in the instruction.
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT((from == to) || (to == LAST_TYPE));
+ return from;
+}
+
+
+// See comment above TestType function for what this function does.
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return eq;
+ if (to == LAST_TYPE) return hs;
+ if (from == FIRST_TYPE) return ls;
+ UNREACHABLE();
+ return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ if (instr->offset()->IsConstantOperand()) {
+ __ Add(result, base, ToOperand32I(instr->offset()));
+ } else {
+ __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
+ }
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // Assert that the arguments are in the registers expected by InstanceofStub.
+ ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
+ ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
+
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+
+ // InstanceofStub returns a result in x0:
+ // 0 => not an instance
+ // smi 1 => instance.
+ __ Cmp(x0, 0);
+ __ LoadTrueFalseRoots(x0, x1);
+ __ Csel(x0, x0, x1, eq);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred =
+ new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label map_check, return_false, cache_miss, done;
+ Register object = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ // x4 is expected in the associated deferred code and stub.
+ Register map_check_site = x4;
+ Register map = x5;
+
+ // This instruction is marked as call. We can clobber any register.
+ ASSERT(instr->IsMarkedAsCall());
+
+ // We must take into account that object is in x11.
+ ASSERT(object.Is(x11));
+ Register scratch = x10;
+
+ // A Smi is not instance of anything.
+ __ JumpIfSmi(object, &return_false);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ {
+ // Below we use Factory::the_hole_value() on purpose instead of loading from
+ // the root array to force relocation and later be able to patch with a
+ // custom value.
+ InstructionAccurateScope scope(masm(), 5);
+ __ bind(&map_check);
+ // Will be patched with the cached map.
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
+ __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
+ __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ cmp(map, Operand(scratch));
+ __ b(&cache_miss, ne);
+ // The address of this instruction is computed relative to the map check
+ // above, so check the size of the code generated.
+ ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
+ // Will be patched with the cached result.
+ __ LoadRelocated(result, Operand(factory()->the_hole_value()));
+ }
+ __ B(&done);
+
+ // The inlined call site cache did not match.
+ // Check null and string before calling the deferred code.
+ __ Bind(&cache_miss);
+ // Compute the address of the map check. It must not be clobbered until the
+ // InstanceOfStub has used it.
+ __ Adr(map_check_site, &map_check);
+ // Null is not instance of anything.
+ __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
+
+ // String values are not instances of anything.
+ // Return false if the object is a string. Otherwise, jump to the deferred
+ // code.
+ // Note that we can't jump directly to deferred code from
+ // IsObjectJSStringType, because it uses tbz for the jump and the deferred
+ // code can be out of range.
+ __ IsObjectJSStringType(object, scratch, NULL, &return_false);
+ __ B(deferred->entry());
+
+ __ Bind(&return_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+ // Here result is either true or false.
+ __ Bind(deferred->exit());
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ Register result = ToRegister(instr->result());
+ ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
+
+ // Prepare InstanceofStub arguments.
+ ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
+ __ LoadObject(InstanceofStub::right(), instr->function());
+
+ InstanceofStub stub(flags);
+ CallCodeGeneric(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+
+ // Put the result value into the result register slot.
+ __ StoreToSafepointRegisterSlot(result, result);
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ Register value = ToRegister32(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Scvtf(result, value);
+}
+
+
+void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
+ // A64 smis can represent all Integer32 values, so this cannot deoptimize.
+ ASSERT(!instr->hydrogen()->value()->HasRange() ||
+ instr->hydrogen()->value()->range()->IsInSmiRange());
+
+ Register value = ToRegister32(instr->value());
+ Register result = ToRegister(instr->result());
+ __ SmiTag(result, value.X());
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // The function is required to be in x1.
+ ASSERT(ToRegister(instr->function()).is(x1));
+ ASSERT(instr->HasPointerMap());
+
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
+ } else {
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
+ instr->arity(),
+ instr,
+ x1);
+ }
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ // Get the frame pointer for the calling frame.
+ __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+ __ Cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(ne, &check_frame_marker);
+ __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ Bind(&check_frame_marker);
+ __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+
+ EmitCompareAndBranch(
+ instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Label* is_object = instr->TrueLabel(chunk_);
+ Label* is_not_object = instr->FalseLabel(chunk_);
+ Register value = ToRegister(instr->value());
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, is_not_object);
+ __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+
+ // Check for undetectable objects.
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
+
+ // Check that instance type is in object type range.
+ __ IsInstanceJSObjectType(map, scratch, NULL);
+ // Flags have been updated by IsInstanceJSObjectType. We can now test the
+ // flags for "le" condition to check if the object's type is a valid
+ // JS object type.
+ EmitBranch(instr, le);
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+ __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+ return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register val = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ Condition true_cond =
+ EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
+
+ EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ Register value = ToRegister(instr->value());
+ STATIC_ASSERT(kSmiTag == 0);
+ EmitTestAndBranch(instr, eq, value, kSmiTagMask);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+
+ EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
+
+ __ Bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ } else {
+ Label not_the_hole;
+ __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Bind(&not_the_hole);
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+ Label deopt;
+
+ // Check that the function really is a function. Leaves map in the result
+ // register.
+ __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt);
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
+ __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ __ Ldr(result, FieldMemOperand(function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ CompareObjectType(result, temp, temp, MAP_TYPE);
+ __ B(ne, &done);
+
+ // Get the prototype from the initial map.
+ __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ __ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ __ Bind(&non_instance);
+ __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ __ B(&done);
+
+ // Deoptimize case.
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ // All done.
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+ Register result = ToRegister(instr->result());
+ __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
+ __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ DeoptimizeIfRoot(
+ result, Heap::kTheHoleValueRootIndex, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->global_object()).Is(x0));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ __ Mov(x2, Operand(instr->name()));
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
+ Register key,
+ Register base,
+ Register scratch,
+ bool key_is_smi,
+ bool key_is_constant,
+ int constant_key,
+ ElementsKind elements_kind,
+ int additional_index) {
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
+ ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
+ : 0;
+
+ if (key_is_constant) {
+ int base_offset = ((constant_key + additional_index) << element_size_shift);
+ return MemOperand(base, base_offset + additional_offset);
+ }
+
+ if (additional_index == 0) {
+ if (key_is_smi) {
+ // Key is smi: untag, and scale by element size.
+ __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
+ return MemOperand(scratch, additional_offset);
+ } else {
+ // Key is not smi, and element size is not byte: scale by element size.
+ if (additional_offset == 0) {
+ return MemOperand(base, key, SXTW, element_size_shift);
+ } else {
+ __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
+ return MemOperand(scratch, additional_offset);
+ }
+ }
+ } else {
+ // TODO(all): Try to combine these cases a bit more intelligently.
+ if (additional_offset == 0) {
+ if (key_is_smi) {
+ __ SmiUntag(scratch, key);
+ __ Add(scratch.W(), scratch.W(), additional_index);
+ } else {
+ __ Add(scratch.W(), key.W(), additional_index);
+ }
+ return MemOperand(base, scratch, LSL, element_size_shift);
+ } else {
+ if (key_is_smi) {
+ __ Add(scratch, base,
+ Operand::UntagSmiAndScale(key, element_size_shift));
+ } else {
+ __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
+ }
+ return MemOperand(
+ scratch,
+ (additional_index << element_size_shift) + additional_offset);
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
+ Register ext_ptr = ToRegister(instr->elements());
+ Register scratch;
+ ElementsKind elements_kind = instr->elements_kind();
+
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ int constant_key = 0;
+ if (key_is_constant) {
+ ASSERT(instr->temp() == NULL);
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ scratch = ToRegister(instr->temp());
+ key = ToRegister(instr->key());
+ }
+
+ MemOperand mem_op =
+ PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
+ key_is_constant, constant_key,
+ elements_kind,
+ instr->additional_index());
+
+ if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
+ (elements_kind == FLOAT32_ELEMENTS)) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result.S(), mem_op);
+ __ Fcvt(result, result.S());
+ } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
+ (elements_kind == FLOAT64_ELEMENTS)) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result, mem_op);
+ } else {
+ Register result = ToRegister(instr->result());
+
+ switch (elements_kind) {
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ Ldrsb(result, mem_op);
+ break;
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ Ldrb(result, mem_op);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ __ Ldrsh(result, mem_op);
+ break;
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ Ldrh(result, mem_op);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ __ Ldrsw(result, mem_op);
+ break;
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ Ldr(result.W(), mem_op);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ // Deopt if value > 0x80000000.
+ __ Tst(result, 0xFFFFFFFF80000000);
+ DeoptimizeIf(ne, instr->environment());
+ }
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind) {
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+
+ // Even though the HLoad/StoreKeyed instructions force the input
+ // representation for the key to be an integer, the input gets replaced during
+ // bounds check elimination with the index argument to the bounds check, which
+ // can be tagged, so that case must be handled here, too.
+ if (key_is_tagged) {
+ __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
+ } else {
+ // Sign extend key because it could be a 32-bit negative value or contain
+ // garbage in the top 32-bits. The address computation happens in 64-bit.
+ ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
+ __ Add(base, elements, Operand(key, SXTW, element_size_shift));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
+ Register elements = ToRegister(instr->elements());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register load_base;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
+ (instr->temp() == NULL));
+
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
+ instr->additional_index());
+ load_base = elements;
+ } else {
+ load_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ Ldr(result, FieldMemOperand(load_base, offset));
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Register scratch = ToRegister(instr->temp());
+
+ // TODO(all): Is it faster to reload this value to an integer register, or
+ // move from fp to integer?
+ __ Fmov(scratch, result);
+ __ Cmp(scratch, kHoleNanInt64);
+ DeoptimizeIf(eq, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register load_base;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(instr->temp() == NULL);
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ load_base = elements;
+ } else {
+ load_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ Representation representation = instr->hydrogen()->representation();
+
+ if (representation.IsInteger32() &&
+ instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) {
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Load(result, UntagSmiFieldMemOperand(load_base, offset),
+ Representation::Integer32());
+ } else {
+ __ Load(result, FieldMemOperand(load_base, offset),
+ representation);
+ }
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ DeoptimizeIfNotSmi(result, instr->environment());
+ } else {
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).Is(x1));
+ ASSERT(ToRegister(instr->key()).Is(x0));
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ ASSERT(ToRegister(instr->result()).Is(x0));
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+ Register object = ToRegister(instr->object());
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ __ Load(result, MemOperand(object, offset), access.representation());
+ return;
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ FPRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ Register source;
+ if (access.IsInobject()) {
+ source = object;
+ } else {
+ // Load the properties array, using result as a scratch register.
+ __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ source = result;
+ }
+
+ if (access.representation().IsSmi() &&
+ instr->hydrogen()->representation().IsInteger32()) {
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Load(result, UntagSmiFieldMemOperand(source, offset),
+ Representation::Integer32());
+ } else {
+ __ Load(result, FieldMemOperand(source, offset), access.representation());
+ }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
+ ASSERT(ToRegister(instr->object()).is(x0));
+ __ Mov(x2, Operand(instr->name()));
+
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLengthSmi(result, map);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fabs(result, input);
+ } else if (r.IsSmi() || r.IsInteger32()) {
+ Register input = r.IsSmi() ? ToRegister(instr->value())
+ : ToRegister32(instr->value());
+ Register result = r.IsSmi() ? ToRegister(instr->result())
+ : ToRegister32(instr->result());
+ Label done;
+ __ Abs(result, input, NULL, &done);
+ Deoptimize(instr->environment());
+ __ Bind(&done);
+ }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+ Label* exit,
+ Label* allocation_entry) {
+ // Handle the tricky cases of MathAbsTagged:
+ // - HeapNumber inputs.
+ // - Negative inputs produce a positive result, so a new HeapNumber is
+ // allocated to hold it.
+ // - Positive inputs are returned as-is, since there is no need to allocate
+ // a new HeapNumber for the result.
+ // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
+ // a smi. In this case, the inline code sets the result and jumps directly
+ // to the allocation_entry label.
+ ASSERT(instr->context() != NULL);
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register input = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+ Register result_bits = ToRegister(instr->temp3());
+ Register result = ToRegister(instr->result());
+
+ Label runtime_allocation;
+
+ // Deoptimize if the input is not a HeapNumber.
+ __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+ DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+
+ // If the argument is positive, we can return it as-is, without any need to
+ // allocate a new HeapNumber for the result. We have to do this in integer
+ // registers (rather than with fabs) because we need to be able to distinguish
+ // the two zeroes.
+ __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ Mov(result, input);
+ __ Tbz(result_bits, kXSignBit, exit);
+
+ // Calculate abs(input) by clearing the sign bit.
+ __ Bic(result_bits, result_bits, kXSignMask);
+
+ // Allocate a new HeapNumber to hold the result.
+ // result_bits The bit representation of the (double) result.
+ __ Bind(allocation_entry);
+ __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
+ // The inline (non-deferred) code will store result_bits into result.
+ __ B(exit);
+
+ __ Bind(&runtime_allocation);
+ if (FLAG_debug_code) {
+ // Because result is in the pointer map, we need to make sure it has a valid
+ // tagged value before we call the runtime. We speculatively set it to the
+ // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
+ // be valid.
+ Label result_ok;
+ Register input = ToRegister(instr->value());
+ __ JumpIfSmi(result, &result_ok);
+ __ Cmp(input, result);
+ // TODO(all): Shouldn't we assert here?
+ DeoptimizeIf(ne, instr->environment());
+ __ Bind(&result_ok);
+ }
+
+ { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
+ __ StoreToSafepointRegisterSlot(x0, result);
+ }
+ // The inline (non-deferred) code will store result_bits into result.
+}
+
+
+void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTagged: public LDeferredCode {
+ public:
+ DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredMathAbsTagged(instr_, exit(),
+ allocation_entry());
+ }
+ virtual LInstruction* instr() { return instr_; }
+ Label* allocation_entry() { return &allocation; }
+ private:
+ LMathAbsTagged* instr_;
+ Label allocation;
+ };
+
+ // TODO(jbramley): The early-exit mechanism would skip the new frame handling
+ // in GenerateDeferredCode. Tidy this up.
+ ASSERT(!NeedsDeferredFrame());
+
+ DeferredMathAbsTagged* deferred =
+ new(zone()) DeferredMathAbsTagged(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
+ instr->hydrogen()->value()->representation().IsSmi());
+ Register input = ToRegister(instr->value());
+ Register result_bits = ToRegister(instr->temp3());
+ Register result = ToRegister(instr->result());
+ Label done;
+
+ // Handle smis inline.
+ // We can treat smis as 64-bit integers, since the (low-order) tag bits will
+ // never get set by the negation. This is therefore the same as the Integer32
+ // case in DoMathAbs, except that it operates on 64-bit values.
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
+
+ // TODO(jbramley): We can't use JumpIfNotSmi here because the tbz it uses
+ // doesn't always have enough range. Consider making a variant of it, or a
+ // TestIsSmi helper.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Tst(input, kSmiTagMask);
+ __ B(ne, deferred->entry());
+
+ __ Abs(result, input, NULL, &done);
+
+ // The result is the magnitude (abs) of the smallest value a smi can
+ // represent, encoded as a double.
+ __ Mov(result_bits, double_to_rawbits(0x80000000));
+ __ B(deferred->allocation_entry());
+
+ __ Bind(deferred->exit());
+ __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
+ DoubleRegister double_temp2 = double_scratch();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+ Register temp3 = ToRegister(instr->temp3());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3);
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ // TODO(jbramley): If we could provide a double result, we could use frintm
+ // and produce a valid double result in a single instruction.
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label deopt;
+ Label done;
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ JumpIfMinusZero(input, &deopt);
+ }
+
+ __ Fcvtms(result, input);
+
+ // Check that the result fits into a 32-bit integer.
+ // - The result did not overflow.
+ __ Cmp(result, Operand(result, SXTW));
+ // - The input was not NaN.
+ __ Fccmp(input, input, NoFlag, eq);
+ __ B(&done, eq);
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Register right = ToRegister32(instr->right());
+ Register remainder = ToRegister32(instr->temp());
+
+ // This can't cause an exception on ARM, so we can speculatively
+ // execute it already now.
+ __ Sdiv(result, left, right);
+
+ // Check for x / 0.
+ DeoptimizeIfZero(right, instr->environment());
+
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // The V flag will be set iff left == kMinInt.
+ __ Cmp(left, 1);
+ __ Ccmp(right, -1, NoFlag, vs);
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(right, 0);
+ __ Ccmp(left, 0, ZFlag, mi);
+ // "right" can't be null because the code would have already been
+ // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
+ // In this case we need to deoptimize to produce a -0.
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ Label done;
+ // If both operands have the same sign then we are done.
+ __ Eor(remainder, left, right);
+ __ Tbz(remainder, kWSignBit, &done);
+
+ // Check if the result needs to be corrected.
+ __ Msub(remainder, result, right, left);
+ __ Cbz(remainder, &done);
+ __ Sub(result, result, 1);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToDoubleRegister(instr->value()).is(d0));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+ 0, 1);
+ ASSERT(ToDoubleRegister(instr->result()).Is(d0));
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Label done;
+
+ // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
+ // Math.pow(-Infinity, 0.5) == +Infinity
+ // Math.pow(-0.0, 0.5) == +0.0
+
+ // Catch -infinity inputs first.
+ // TODO(jbramley): A constant infinity register would be helpful here.
+ __ Fmov(double_scratch(), kFP64NegativeInfinity);
+ __ Fcmp(double_scratch(), input);
+ __ Fabs(result, input);
+ __ B(&done, eq);
+
+ // Add +0.0 to convert -0.0 to +0.0.
+ // TODO(jbramley): A constant zero register would be helpful here.
+ __ Fmov(double_scratch(), 0.0);
+ __ Fadd(double_scratch(), input, double_scratch());
+ __ Fsqrt(result, double_scratch());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(d1));
+ ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(x11));
+ ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
+ ASSERT(ToDoubleRegister(instr->left()).is(d0));
+ ASSERT(ToDoubleRegister(instr->result()).is(d0));
+
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(x11, &no_deopt);
+ __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
+ DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+ __ Bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
+ // supports large integer exponents.
+ Register exponent = ToRegister(instr->right());
+ __ Sxtw(exponent, exponent);
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ }
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ // TODO(jbramley): We could provide a double result here using frint.
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister temp1 = ToDoubleRegister(instr->temp1());
+ Register result = ToRegister(instr->result());
+ Label try_rounding;
+ Label deopt;
+ Label done;
+
+ // Math.round() rounds to the nearest integer, with ties going towards
+ // +infinity. This does not match any IEEE-754 rounding mode.
+ // - Infinities and NaNs are propagated unchanged, but cause deopts because
+ // they can't be represented as integers.
+ // - The sign of the result is the same as the sign of the input. This means
+ // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
+ // result of -0.0.
+
+ DoubleRegister dot_five = double_scratch();
+ __ Fmov(dot_five, 0.5);
+ __ Fabs(temp1, input);
+ __ Fcmp(temp1, dot_five);
+ // If input is in [-0.5, -0], the result is -0.
+ // If input is in [+0, +0.5[, the result is +0.
+ // If the input is +0.5, the result is 1.
+ __ B(hi, &try_rounding); // hi so NaN will also branch.
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Fmov(result, input);
+ __ Cmp(result, 0);
+ DeoptimizeIf(mi, instr->environment()); // [-0.5, -0.0].
+ }
+ __ Fcmp(input, dot_five);
+ __ Mov(result, 1); // +0.5.
+ // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+ // flag kBailoutOnMinusZero, will return 0 (xzr).
+ __ Csel(result, result, xzr, eq);
+ __ B(&done);
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ __ Bind(&try_rounding);
+ // Since we're providing a 32-bit result, we can implement ties-to-infinity by
+ // adding 0.5 to the input, then taking the floor of the result. This does not
+ // work for very large positive doubles because adding 0.5 would cause an
+ // intermediate rounding stage, so a different approach will be necessary if a
+ // double result is needed.
+ __ Fadd(temp1, input, dot_five);
+ __ Fcvtms(result, temp1);
+
+ // Deopt if
+ // * the input was NaN
+ // * the result is not representable using a 32-bit integer.
+ __ Fcmp(input, 0.0);
+ __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc);
+ __ B(ne, &deopt);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fsqrt(result, input);
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ HMathMinMax::Operation op = instr->hydrogen()->operation();
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+
+ __ Cmp(left, right);
+ __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
+ } else if (instr->hydrogen()->representation().IsSmi()) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+
+ __ Cmp(left, right);
+ __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+
+ if (op == HMathMinMax::kMathMax) {
+ __ Fmax(result, left, right);
+ } else {
+ ASSERT(op == HMathMinMax::kMathMin);
+ __ Fmin(result, left, right);
+ }
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ HValue* hleft = hmod->left();
+ HValue* hright = hmod->right();
+
+ Label done;
+ Register result = ToRegister32(instr->result());
+ Register dividend = ToRegister32(instr->left());
+
+ bool need_minus_zero_check = (hmod->CheckFlag(HValue::kBailoutOnMinusZero) &&
+ hleft->CanBeNegative() && hmod->CanBeZero());
+
+ if (hmod->RightIsPowerOf2()) {
+ // Note: The code below even works when right contains kMinInt.
+ int32_t divisor = Abs(hright->GetInteger32Constant());
+
+ if (hleft->CanBeNegative()) {
+ __ Cmp(dividend, 0);
+ __ Cneg(result, dividend, mi);
+ __ And(result, result, divisor - 1);
+ __ Cneg(result, result, mi);
+ if (need_minus_zero_check) {
+ __ Cbnz(result, &done);
+ // The result is 0. Deoptimize if the dividend was negative.
+ DeoptimizeIf(mi, instr->environment());
+ }
+ } else {
+ __ And(result, dividend, divisor - 1);
+ }
+
+ } else {
+ Label deopt;
+ Register divisor = ToRegister32(instr->right());
+ // Compute:
+ // modulo = dividend - quotient * divisor
+ __ Sdiv(result, dividend, divisor);
+ if (hright->CanBeZero()) {
+ // Combine the deoptimization sites.
+ Label ok;
+ __ Cbnz(divisor, &ok);
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ __ Bind(&ok);
+ }
+ __ Msub(result, result, divisor, dividend);
+ if (need_minus_zero_check) {
+ __ Cbnz(result, &done);
+ if (deopt.is_bound()) {
+ __ Tbnz(dividend, kWSignBit, &deopt);
+ } else {
+ DeoptimizeIfNegative(dividend, instr->environment());
+ }
+ }
+ }
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
+ ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
+ bool is_smi = instr->hydrogen()->representation().IsSmi();
+ Register result =
+ is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
+ Register left =
+ is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
+ int32_t right = ToInteger32(instr->right());
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero) {
+ if (right < 0) {
+ // The result is -0 if right is negative and left is zero.
+ DeoptimizeIfZero(left, instr->environment());
+ } else if (right == 0) {
+ // The result is -0 if the right is zero and the left is negative.
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ }
+
+ switch (right) {
+ // Cases which can detect overflow.
+ case -1:
+ if (can_overflow) {
+ // Only 0x80000000 can overflow here.
+ __ Negs(result, left);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Neg(result, left);
+ }
+ break;
+ case 0:
+ // This case can never overflow.
+ __ Mov(result, 0);
+ break;
+ case 1:
+ // This case can never overflow.
+ __ Mov(result, left, kDiscardForSameWReg);
+ break;
+ case 2:
+ if (can_overflow) {
+ __ Adds(result, left, left);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, left);
+ }
+ break;
+
+ // All other cases cannot detect overflow, because it would probably be no
+ // faster than using the smull method in LMulI.
+ // TODO(jbramley): Investigate this, and add overflow support if it would
+ // be useful.
+ default:
+ ASSERT(!can_overflow);
+
+ // Multiplication by constant powers of two (and some related values)
+ // can be done efficiently with shifted operands.
+ if (right >= 0) {
+ if (IsPowerOf2(right)) {
+ // result = left << log2(right)
+ __ Lsl(result, left, WhichPowerOf2(right));
+ } else if (IsPowerOf2(right - 1)) {
+ // result = left + left << log2(right - 1)
+ __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
+ } else if (IsPowerOf2(right + 1)) {
+ // result = -left + left << log2(right + 1)
+ __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
+ __ Neg(result, result);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ if (IsPowerOf2(-right)) {
+ // result = -left << log2(-right)
+ __ Neg(result, Operand(left, LSL, WhichPowerOf2(-right)));
+ } else if (IsPowerOf2(-right + 1)) {
+ // result = left - left << log2(-right + 1)
+ __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
+ } else if (IsPowerOf2(-right - 1)) {
+ // result = -left - left << log2(-right - 1)
+ __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
+ __ Neg(result, result);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ break;
+ }
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Register right = ToRegister32(instr->right());
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero) {
+ // If one operand is zero and the other is negative, the result is -0.
+ // - Set Z (eq) if either left or right, or both, are 0.
+ __ Cmp(left, 0);
+ __ Ccmp(right, 0, ZFlag, ne);
+ // - If so (eq), set N (mi) if left + right is negative.
+ // - Otherwise, clear N.
+ __ Ccmn(left, right, NoFlag, eq);
+ DeoptimizeIf(mi, instr->environment());
+ }
+
+ if (can_overflow) {
+ __ Smull(result.X(), left, right);
+ __ Cmp(result.X(), Operand(result, SXTW));
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ __ Mul(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoMulS(LMulS* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero) {
+ // If one operand is zero and the other is negative, the result is -0.
+ // - Set Z (eq) if either left or right, or both, are 0.
+ __ Cmp(left, 0);
+ __ Ccmp(right, 0, ZFlag, ne);
+ // - If so (eq), set N (mi) if left + right is negative.
+ // - Otherwise, clear N.
+ __ Ccmn(left, right, NoFlag, eq);
+ DeoptimizeIf(mi, instr->environment());
+ }
+
+ STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
+ if (can_overflow) {
+ __ Smulh(result, left, right);
+ __ Cmp(result, Operand(result.W(), SXTW));
+ __ SmiTag(result);
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ // TODO(jbramley): This could be rewritten to support UseRegisterAtStart.
+ ASSERT(!AreAliased(result, right));
+ __ SmiUntag(result, left);
+ __ Mul(result, result, right);
+ }
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = ToRegister(instr->result());
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ // NumberTagU and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD: public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
+ } else {
+ __ B(deferred->entry());
+ }
+
+ __ Bind(deferred->exit());
+ __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ Label slow, convert_and_store;
+ Register src = ToRegister32(value);
+ Register dst = ToRegister(instr->result());
+ Register scratch1 = ToRegister(temp1);
+
+ if (FLAG_inline_new) {
+ Register scratch2 = ToRegister(temp2);
+ __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
+ __ B(&convert_and_store);
+ }
+
+ // Slow case: call the runtime system to do the number allocation.
+ __ Bind(&slow);
+ // TODO(3095996): Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains an
+ // integer value.
+ __ Mov(dst, 0);
+ {
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagU and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, dst);
+ }
+
+ // Convert number to floating point and store in the newly allocated heap
+ // number.
+ __ Bind(&convert_and_store);
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Ucvtf(dbl_scratch, src);
+ __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2());
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ Register value = ToRegister32(instr->value());
+ Register result = ToRegister(instr->result());
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ Cmp(value, Smi::kMaxValue);
+ __ B(hi, deferred->entry());
+ __ SmiTag(result, value.X());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+
+ Label done, load_smi;
+
+ // Work out what untag mode we're working with.
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ __ JumpIfSmi(input, &load_smi);
+
+ Label convert_undefined, deopt;
+
+ // Heap number map check.
+ Label* not_heap_number = can_convert_undefined_to_nan ? &convert_undefined
+ : &deopt;
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, not_heap_number);
+
+ // Load heap number.
+ __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
+ if (instr->hydrogen()->deoptimize_on_minus_zero()) {
+ __ JumpIfMinusZero(result, &deopt);
+ }
+ __ B(&done);
+
+ if (can_convert_undefined_to_nan) {
+ __ Bind(&convert_undefined);
+ __ JumpIfNotRoot(input, Heap::kUndefinedValueRootIndex, &deopt);
+
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ B(&done);
+ }
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+ // Fall through to load_smi.
+ }
+
+ // Smi to double register conversion.
+ __ Bind(&load_smi);
+ __ SmiUntagToDouble(result, input);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ LOperand* argument = instr->value();
+ if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
+ } else {
+ __ Push(ToRegister(argument));
+ }
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in x0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
+ __ Push(x0);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+ if (info()->saves_caller_doubles()) {
+ RestoreCallerDoubles();
+ }
+
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ Register stack_pointer = masm()->StackPointer();
+ __ Mov(stack_pointer, fp);
+ no_frame_start = masm_->pc_offset();
+ __ Pop(fp, lr);
+ }
+
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ __ Drop(parameter_count + 1);
+ } else {
+ Register parameter_count = ToRegister(instr->parameter_count());
+ __ DropBySMI(parameter_count);
+ }
+ __ Ret();
+
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+ Register temp,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
+ ASSERT(!temp.is(string));
+ ASSERT(!temp.is(ToRegister(index)));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Add(temp, string, Operand(ToRegister32(index), SXTW));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ Add(temp, string, Operand(ToRegister32(index), SXTW, 1));
+ }
+ return FieldMemOperand(temp, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ if (FLAG_debug_code) {
+ __ Ldr(temp, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+
+ __ And(temp, temp,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ Cmp(temp, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, kUnexpectedStringType);
+ }
+
+ MemOperand operand =
+ BuildSeqStringOperand(string, temp, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Ldrb(result, operand);
+ } else {
+ __ Ldrh(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (FLAG_debug_code) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
+ encoding_mask);
+ }
+ MemOperand operand =
+ BuildSeqStringOperand(string, temp, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Strb(value, operand);
+ } else {
+ __ Strh(value, operand);
+ }
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+ __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done, untag;
+
+ if (instr->needs_check()) {
+ DeoptimizeIfNotSmi(input, instr->environment());
+ }
+
+ __ Bind(&untag);
+ __ SmiUntag(result, input);
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ LOperand* right_op = instr->right();
+ Register left = ToRegister32(instr->left());
+ Register result = ToRegister32(instr->result());
+
+ if (right_op->IsRegister()) {
+ Register right = ToRegister32(instr->right());
+ switch (instr->op()) {
+ case Token::ROR: __ Ror(result, left, right); break;
+ case Token::SAR: __ Asr(result, left, right); break;
+ case Token::SHL: __ Lsl(result, left, right); break;
+ case Token::SHR:
+ if (instr->can_deopt()) {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ DeoptimizeIfNegative(left, instr->environment());
+ __ Bind(&right_not_zero);
+ }
+ __ Lsr(result, left, right);
+ break;
+ default: UNREACHABLE();
+ }
+ } else {
+ ASSERT(right_op->IsConstantOperand());
+ int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
+ if (shift_count == 0) {
+ if ((instr->op() == Token::SHR) && instr->can_deopt()) {
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ __ Mov(result, left, kDiscardForSameWReg);
+ } else {
+ switch (instr->op()) {
+ case Token::ROR: __ Ror(result, left, shift_count); break;
+ case Token::SAR: __ Asr(result, left, shift_count); break;
+ case Token::SHL: __ Lsl(result, left, shift_count); break;
+ case Token::SHR: __ Lsr(result, left, shift_count); break;
+ default: UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoShiftS(LShiftS* instr) {
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
+ Register result = ToRegister(instr->result());
+
+ // Only ROR by register needs a temp.
+ ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
+ (instr->temp() == NULL));
+
+ if (right_op->IsRegister()) {
+ Register right = ToRegister(instr->right());
+ switch (instr->op()) {
+ case Token::ROR: {
+ Register temp = ToRegister(instr->temp());
+ __ Ubfx(temp, right, kSmiShift, 5);
+ __ SmiUntag(result, left);
+ __ Ror(result.W(), result.W(), temp.W());
+ __ SmiTag(result);
+ break;
+ }
+ case Token::SAR:
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Asr(result, left, result);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Lsl(result, left, result);
+ break;
+ case Token::SHR:
+ if (instr->can_deopt()) {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ DeoptimizeIfNegative(left, instr->environment());
+ __ Bind(&right_not_zero);
+ }
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Lsr(result, left, result);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ default: UNREACHABLE();
+ }
+ } else {
+ ASSERT(right_op->IsConstantOperand());
+ int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
+ if (shift_count == 0) {
+ if ((instr->op() == Token::SHR) && instr->can_deopt()) {
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ __ Mov(result, left);
+ } else {
+ switch (instr->op()) {
+ case Token::ROR:
+ __ SmiUntag(result, left);
+ __ Ror(result.W(), result.W(), shift_count);
+ __ SmiTag(result);
+ break;
+ case Token::SAR:
+ __ Asr(result, left, shift_count);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Lsl(result, left, shift_count);
+ break;
+ case Token::SHR:
+ __ Lsr(result, left, shift_count);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ default: UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ Debug("LDebugBreak", 0, BREAK);
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register scratch1 = x5;
+ Register scratch2 = x6;
+ ASSERT(instr->IsMarkedAsCall());
+
+ ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
+ // TODO(all): if Mov could handle object in new space then it could be used
+ // here.
+ __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
+ __ Mov(scratch2, Operand(Smi::FromInt(instr->hydrogen()->flags())));
+ __ Push(cp, scratch1, scratch2); // The context is the first argument.
+ CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStackCheck* instr_;
+ };
+
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(hs, &done);
+
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+
+ __ Bind(&done);
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new(zone()) DeferredStackCheck(this, instr);
+ __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(lo, deferred_stack_check->entry());
+
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ __ Bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
+ }
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ Register temp = ToRegister(instr->temp());
+ __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
+ __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ Ldr(scratch, target);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ } else {
+ __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
+ }
+ }
+
+ __ Str(value, target);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context,
+ target.offset(),
+ value,
+ scratch,
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+ __ Bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register value = ToRegister(instr->value());
+ Register cell = ToRegister(instr->temp1());
+
+ // Load the cell.
+ __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted. We deoptimize in that case.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Register payload = ToRegister(instr->temp2());
+ __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
+ DeoptimizeIfRoot(
+ payload, Heap::kTheHoleValueRootIndex, instr->environment());
+ }
+
+ // Store the value.
+ __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
+ // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
+ Register ext_ptr = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch;
+ ElementsKind elements_kind = instr->elements_kind();
+
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ ASSERT(instr->temp() == NULL);
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ scratch = ToRegister(instr->temp());
+ }
+
+ MemOperand dst =
+ PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
+ key_is_constant, constant_key,
+ elements_kind,
+ instr->additional_index());
+
+ if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
+ (elements_kind == FLOAT32_ELEMENTS)) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Fcvt(dbl_scratch.S(), value);
+ __ Str(dbl_scratch.S(), dst);
+ } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
+ (elements_kind == FLOAT64_ELEMENTS)) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ Str(value, dst);
+ } else {
+ Register value = ToRegister(instr->value());
+
+ switch (elements_kind) {
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ Strb(value, dst);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ Strh(value, dst);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ Str(value.W(), dst);
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
+ Register elements = ToRegister(instr->elements());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register store_base = ToRegister(instr->temp());
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Fmov(dbl_scratch,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ Fmaxnm(dbl_scratch, dbl_scratch, value);
+ __ Str(dbl_scratch, FieldMemOperand(store_base, offset));
+ } else {
+ __ Str(value, FieldMemOperand(store_base, offset));
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register store_base = ToRegister(instr->temp());
+ Register key = no_reg;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ Representation representation = instr->hydrogen()->value()->representation();
+ if (representation.IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Store(value, UntagSmiFieldMemOperand(store_base, offset),
+ Representation::Integer32());
+ } else {
+ __ Store(value, FieldMemOperand(store_base, offset), representation);
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Add(key, store_base, offset - kHeapObjectTag);
+ __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).Is(x2));
+ ASSERT(ToRegister(instr->key()).Is(x1));
+ ASSERT(ToRegister(instr->value()).Is(x0));
+
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+// TODO(jbramley): Once the merge is done and we're tracking bleeding_edge, try
+// to tidy up this function.
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
+ Register object = ToRegister(instr->object());
+ Register temp0 = ToRegister(instr->temp0());
+ Register temp1 = ToRegister(instr->temp1());
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ Register value = ToRegister(instr->value());
+ __ Store(value, MemOperand(object, offset), representation);
+ return;
+ }
+
+ Handle<Map> transition = instr->transition();
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
+ if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ Register value = ToRegister(instr->value());
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ DeoptimizeIfSmi(value, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
+ }
+ } else if (representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ FPRegister value = ToDoubleRegister(instr->value());
+ __ Str(value, FieldMemOperand(object, offset));
+ return;
+ }
+
+ if (!transition.is_null()) {
+ // Store the new map value.
+ Register new_map_value = temp0;
+ __ Mov(new_map_value, Operand(transition));
+ __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
+ if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ // Update the write barrier for the map field.
+ __ RecordWriteField(object,
+ HeapObject::kMapOffset,
+ new_map_value,
+ temp1,
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ }
+ }
+
+ // Do the store.
+ Register value = ToRegister(instr->value());
+ Register destination;
+ if (access.IsInobject()) {
+ destination = object;
+ } else {
+ __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ destination = temp0;
+ }
+
+ if (representation.IsSmi() &&
+ instr->hydrogen()->value()->representation().IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+#ifdef DEBUG
+ __ Ldr(temp1, FieldMemOperand(destination, offset));
+ __ AssertSmi(temp1);
+#endif
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Store(value, UntagSmiFieldMemOperand(destination, offset),
+ Representation::Integer32());
+ } else {
+ __ Store(value, FieldMemOperand(destination, offset), representation);
+ }
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ __ RecordWriteField(destination,
+ offset,
+ value, // Clobbered.
+ temp1, // Clobbered.
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->value()).is(x0));
+ ASSERT(ToRegister(instr->object()).is(x1));
+
+ // Name must be in x2.
+ __ Mov(x2, Operand(instr->name()));
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(),
+ instr->strict_mode_flag());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->left()).Is(x1));
+ ASSERT(ToRegister(instr->right()).Is(x0));
+ StringAddStub stub(instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ DeferredStringCharCodeAt* deferred =
+ new(zone()) DeferredStringCharCodeAt(this, instr);
+
+ StringCharLoadGenerator::Generate(masm(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ Push(index);
+
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ instr->context());
+ __ AssertSmi(x0);
+ __ SmiUntag(x0);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode: public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new(zone()) DeferredStringCharFromCode(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ __ Cmp(char_code, Operand(String::kMaxOneByteCharCode));
+ __ B(hi, deferred->entry());
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ Add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(eq, deferred->entry());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTag(char_code);
+ __ Push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+
+ Condition condition = TokenToCondition(op, false);
+
+ EmitCompareAndBranch(instr, condition, x0, 0);
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+ if (can_overflow) {
+ __ Subs(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Sub(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoSubS(LSubS* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+ if (can_overflow) {
+ __ Subs(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Sub(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ Register input = ToRegister(value);
+ Register scratch1 = ToRegister(temp1);
+ DoubleRegister dbl_scratch1 = double_scratch();
+
+ Label done;
+
+ // Load heap object map.
+ __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
+
+ if (instr->truncating()) {
+ Register output = ToRegister(instr->result());
+ Register scratch2 = ToRegister(temp2);
+ Label check_bools;
+
+ // If it's not a heap number, jump to undefined check.
+ __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
+
+ // A heap number: load value and convert to int32 using truncating function.
+ __ TruncateHeapNumberToI(output, input);
+ __ B(&done);
+
+ __ Bind(&check_bools);
+
+ Register true_root = output;
+ Register false_root = scratch2;
+ __ LoadTrueFalseRoots(true_root, false_root);
+ __ Cmp(scratch1, true_root);
+ __ Cset(output, eq);
+ __ Ccmp(scratch1, false_root, ZFlag, ne);
+ __ B(eq, &done);
+
+ // Output contains zero, undefined is converted to zero for truncating
+ // conversions.
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+ } else {
+ Register output = ToRegister32(instr->result());
+
+ DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
+ Label converted;
+
+ // Deoptimized if it's not a heap number.
+ DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+
+ // A heap number: load value and convert to int32 using non-truncating
+ // function. If the result is out of range, branch to deoptimize.
+ __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2, &converted);
+ Deoptimize(instr->environment());
+
+ __ Bind(&converted);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(output, 0);
+ __ B(ne, &done);
+ __ Fmov(scratch1, dbl_scratch1);
+ DeoptimizeIfNegative(scratch1, instr->environment());
+ }
+ }
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2());
+ }
+
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+
+ // TODO(jbramley): We can't use JumpIfNotSmi here because the tbz it uses
+ // doesn't always have enough range. Consider making a variant of it, or a
+ // TestIsSmi helper.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Tst(input, kSmiTagMask);
+ __ B(ne, deferred->entry());
+
+ __ SmiUntag(output, input);
+ __ Bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->value()).Is(x0));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ __ Push(x0);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Label materialized;
+ // Registers will be used as follows:
+ // x7 = literals array.
+ // x1 = regexp literal.
+ // x0 = regexp literal clone.
+ // x10-x12 are used as temporaries.
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadObject(x7, instr->hydrogen()->literals());
+ __ Ldr(x1, FieldMemOperand(x7, literal_offset));
+ __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
+
+ // Create regexp literal using runtime function
+ // Result will be in x0.
+ __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ Mov(x11, Operand(instr->hydrogen()->pattern()));
+ __ Mov(x10, Operand(instr->hydrogen()->flags()));
+ __ Push(x7, x12, x11, x10);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ __ Mov(x1, x0);
+
+ __ Bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+
+ __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&runtime_allocate);
+ __ Mov(x0, Operand(Smi::FromInt(size)));
+ __ Push(x1, x0);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ __ Pop(x1);
+
+ __ Bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp1 = ToRegister(instr->temp1());
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
+
+ Label not_applicable;
+ __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
+
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map = ToRegister(instr->temp2());
+ __ Mov(new_map, Operand(to_map));
+ __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ // Write barrier.
+ __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1,
+ GetLinkRegisterState(), kDontSaveFPRegs);
+ } else {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ PushSafepointRegistersScope scope(
+ this, Safepoint::kWithRegistersAndDoubles);
+ __ Mov(x0, object);
+ __ Mov(x1, Operand(to_map));
+ TransitionElementsKindStub stub(from_kind, to_kind);
+ __ CallStub(&stub);
+ RecordSafepointWithRegistersAndDoubles(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ }
+ __ Bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ Label no_memento_found;
+ __ JumpIfJSArrayHasAllocationMemento(object, temp1, temp2, &no_memento_found);
+ Deoptimize(instr->environment());
+ __ Bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ TruncateDoubleToI(result, input);
+ if (instr->tag_result()) {
+ __ SmiTag(result, result);
+ }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ Register input = ToRegister(instr->value());
+ __ Push(input);
+ CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Handle<String> type_name = instr->type_literal();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Register value = ToRegister(instr->value());
+
+ if (type_name->Equals(heap()->number_string())) {
+ ASSERT(instr->temp1() != NULL);
+ Register map = ToRegister(instr->temp1());
+
+ __ JumpIfSmi(value, true_label);
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (type_name->Equals(heap()->string_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ __ JumpIfObjectType(
+ value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+
+ } else if (type_name->Equals(heap()->symbol_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
+ EmitBranch(instr, eq);
+
+ } else if (type_name->Equals(heap()->boolean_string())) {
+ __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
+ __ CompareRoot(value, Heap::kFalseValueRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ __ CompareRoot(value, Heap::kNullValueRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (type_name->Equals(heap()->undefined_string())) {
+ ASSERT(instr->temp1() != NULL);
+ Register scratch = ToRegister(instr->temp1());
+
+ __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
+ __ JumpIfSmi(value, false_label);
+ // Check for undetectable objects and jump to the true branch in this case.
+ __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
+
+ } else if (type_name->Equals(heap()->function_string())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ ASSERT(instr->temp1() != NULL);
+ Register type = ToRegister(instr->temp1());
+
+ __ JumpIfSmi(value, false_label);
+ __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
+ // HeapObject's type has been loaded into type register by JumpIfObjectType.
+ EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
+
+ } else if (type_name->Equals(heap()->object_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ if (!FLAG_harmony_typeof) {
+ __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
+ }
+ __ JumpIfObjectType(value, map, scratch,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
+ __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(gt, false_label);
+ // Check for undetectable objects => false.
+ __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+
+ } else {
+ __ B(false_label);
+ }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
+}
+
+
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ Register value = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange() ||
+ instr->hydrogen()->value()->range()->upper() == kMaxInt) {
+ // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
+ // interval, so we treat kMaxInt as a sentinel for this entire interval.
+ DeoptimizeIfNegative(value.W(), instr->environment());
+ }
+ __ SmiTag(result, value);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ Register map = ToRegister(instr->map());
+ Register temp = ToRegister(instr->temp());
+ __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Cmp(map, temp);
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // If the receiver is null or undefined, we have to pass the global object as
+ // a receiver to normal functions. Values have to be passed unchanged to
+ // builtins and strict-mode functions.
+ Label global_object, done, deopt;
+
+ if (!instr->hydrogen()->known_function()) {
+ __ Ldr(result, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+
+ // CompilerHints is an int32 field. See objects.h.
+ __ Ldr(result.W(),
+ FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
+
+ // Do not transform the receiver to object for strict mode functions.
+ __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &done);
+
+ // Do not transform the receiver to object for builtins.
+ __ Tbnz(result, SharedFunctionInfo::kNative, &done);
+ }
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
+
+ // Deoptimize if the receiver is not a JS object.
+ __ JumpIfSmi(receiver, &deopt);
+ __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
+ __ Mov(result, receiver);
+ __ B(ge, &done);
+ // Otherwise, fall through to deopt.
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ __ Bind(&global_object);
+ __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+
+ __ AssertSmi(index);
+
+ Label out_of_object, done;
+ __ Cmp(index, Operand(Smi::FromInt(0)));
+ __ B(lt, &out_of_object);
+
+ STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+ __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
+
+ __ B(&done);
+
+ __ Bind(&out_of_object);
+ __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ // Index is equal to negated out of object property index plus 1.
+ __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result,
+ FixedArray::kHeaderSize - kPointerSize));
+ __ Bind(&done);
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/a64/lithium-codegen-a64.h b/deps/v8/src/a64/lithium-codegen-a64.h
new file mode 100644
index 0000000000..006165157f
--- /dev/null
+++ b/deps/v8/src/a64/lithium-codegen-a64.h
@@ -0,0 +1,473 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_LITHIUM_CODEGEN_A64_H_
+#define V8_A64_LITHIUM_CODEGEN_A64_H_
+
+#include "a64/lithium-a64.h"
+
+#include "a64/lithium-gap-resolver-a64.h"
+#include "deoptimizer.h"
+#include "lithium-codegen.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+#include "v8utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+class BranchGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : LCodeGenBase(chunk, assembler, info),
+ deoptimizations_(4, info->zone()),
+ deopt_jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
+ inlined_function_count_(0),
+ scope_(info->scope()),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
+ osr_pc_offset_(-1),
+ frame_is_built_(false),
+ safepoints_(info->zone()),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+ // Simple accessors.
+ Scope* scope() const { return scope_; }
+
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ LinkRegisterStatus GetLinkRegisterState() const {
+ return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+ }
+
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode();
+
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code);
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+ Register ToRegister32(LOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ Operand ToOperand32I(LOperand* op);
+ Operand ToOperand32U(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
+ // TODO(jbramley): Examine these helpers and check that they make sense.
+ // IsInteger32Constant returns true for smi constants, for example.
+ bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
+
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ // Return a double scratch register which can be used locally
+ // when generating code for a lithium instruction.
+ DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
+
+ // Deferred code support.
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+ Label* exit,
+ Label* allocation_entry);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2);
+ void DoDeferredTaggedToI(LTaggedToI* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2);
+ void DoDeferredAllocate(LAllocate* instr);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+
+ Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
+
+ static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+ void EmitGoto(int block);
+ void DoGap(LGap* instr);
+
+ // Generic version of EmitBranch. It contains some code to avoid emitting a
+ // branch on the next emitted basic block where we could just fall-through.
+ // You shouldn't use that directly but rather consider one of the helper like
+ // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
+ template<class InstrType>
+ void EmitBranchGeneric(InstrType instr,
+ const BranchGenerator& branch);
+
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition condition);
+
+ template<class InstrType>
+ void EmitCompareAndBranch(InstrType instr,
+ Condition condition,
+ const Register& lhs,
+ const Operand& rhs);
+
+ template<class InstrType>
+ void EmitTestAndBranch(InstrType instr,
+ Condition condition,
+ const Register& value,
+ uint64_t mask);
+
+ template<class InstrType>
+ void EmitBranchIfNonZeroNumber(InstrType instr,
+ const FPRegister& value,
+ const FPRegister& scratch);
+
+ template<class InstrType>
+ void EmitBranchIfHeapNumber(InstrType instr,
+ const Register& value);
+
+ template<class InstrType>
+ void EmitBranchIfRoot(InstrType instr,
+ const Register& value,
+ Heap::RootListIndex index);
+
+ // Emits optimized code to deep-copy the contents of statically known object
+ // graphs (e.g. object literal boilerplate). Expects a pointer to the
+ // allocated destination object in the result register, and a pointer to the
+ // source object in the source register.
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ Register scratch,
+ int* offset,
+ AllocationSiteMode mode);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+ SmiCheck check_needed);
+
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+ MemOperand BuildSeqStringOperand(Register string,
+ Register temp,
+ LOperand* index,
+ String::Encoding encoding);
+ Deoptimizer::BailoutType DeoptimizeHeader(
+ LEnvironment* environment,
+ Deoptimizer::BailoutType* override_bailout_type);
+ void Deoptimize(LEnvironment* environment);
+ void Deoptimize(LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type);
+ void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIfZero(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
+ void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
+ void DeoptimizeIfRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment);
+ void DeoptimizeIfNotRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment);
+ void ApplyCheckIf(Condition cc, LBoundsCheck* check);
+
+ MemOperand PrepareKeyedExternalArrayOperand(Register key,
+ Register base,
+ Register scratch,
+ bool key_is_smi,
+ bool key_is_constant,
+ int constant_key,
+ ElementsKind elements_kind,
+ int additional_index);
+ void CalcKeyedArrayBaseRegister(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
+
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+ void Abort(BailoutReason reason);
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
+ // Code generation steps. Returns true if code generation should continue.
+ bool GeneratePrologue();
+ bool GenerateDeferredCode();
+ bool GenerateDeoptJumpTable();
+ bool GenerateSafepointTable();
+
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, num_arguments, instr);
+ }
+
+ void LoadContextFromDeferred(LOperand* context);
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context);
+
+ // Generate a direct call to a known function.
+ // If the function is already loaded into x1 by the caller, function_reg may
+ // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
+ // automatically load it.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ Register function_reg = NoReg);
+
+ // Support for recording safepoint and position information.
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+
+ ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
+ int inlined_function_count_;
+ Scope* const scope_;
+ TranslationBuffer translations_;
+ ZoneList<LDeferredCode*> deferred_;
+ int osr_pc_offset_;
+ bool frame_is_built_;
+
+ // Builder that keeps track of safepoints in the code. The table itself is
+ // emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ int old_position_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
+ public:
+ PushSafepointRegistersScope(LCodeGen* codegen,
+ Safepoint::Kind kind)
+ : codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = kind;
+
+ switch (codegen_->expected_safepoint_kind_) {
+ case Safepoint::kWithRegisters:
+ codegen_->masm_->PushSafepointRegisters();
+ break;
+ case Safepoint::kWithRegistersAndDoubles:
+ codegen_->masm_->PushSafepointRegisters();
+ codegen_->masm_->PushSafepointFPRegisters();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ~PushSafepointRegistersScope() {
+ Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+ ASSERT((kind & Safepoint::kWithRegisters) != 0);
+ switch (kind) {
+ case Safepoint::kWithRegisters:
+ codegen_->masm_->PopSafepointRegisters();
+ break;
+ case Safepoint::kWithRegistersAndDoubles:
+ codegen_->masm_->PopSafepointFPRegisters();
+ codegen_->masm_->PopSafepointRegisters();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() { }
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label* exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ int instruction_index_;
+};
+
+
+// This is the abstract class used by EmitBranchGeneric.
+// It is used to emit code for conditional branching. The Emit() function
+// emits code to branch when the condition holds and EmitInverted() emits
+// the branch when the inverted condition is verified.
+//
+// For actual examples of condition see the concrete implementation in
+// lithium-codegen-a64.cc (e.g. BranchOnCondition, CompareAndBranch).
+class BranchGenerator BASE_EMBEDDED {
+ public:
+ explicit BranchGenerator(LCodeGen* codegen)
+ : codegen_(codegen) { }
+
+ virtual ~BranchGenerator() { }
+
+ virtual void Emit(Label* label) const = 0;
+ virtual void EmitInverted(Label* label) const = 0;
+
+ protected:
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ LCodeGen* codegen_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_LITHIUM_CODEGEN_A64_H_
diff --git a/deps/v8/src/a64/lithium-gap-resolver-a64.cc b/deps/v8/src/a64/lithium-gap-resolver-a64.cc
new file mode 100644
index 0000000000..3087a3e930
--- /dev/null
+++ b/deps/v8/src/a64/lithium-gap-resolver-a64.cc
@@ -0,0 +1,326 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "a64/lithium-gap-resolver-a64.h"
+#include "a64/lithium-codegen-a64.h"
+
+namespace v8 {
+namespace internal {
+
+// We use the root register to spill a value while breaking a cycle in parallel
+// moves. We don't need access to roots while resolving the move list and using
+// the root register has two advantages:
+// - It is not in crankshaft allocatable registers list, so it can't interfere
+// with any of the moves we are resolving.
+// - We don't need to push it on the stack, as we can reload it with its value
+// once we have resolved a cycle.
+#define kSavedValue root
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
+ saved_destination_(NULL), need_to_restore_root_(false) { }
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when we reach this move again.
+ PerformMove(i);
+ if (in_cycle_) RestoreValue();
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+
+ if (!move.IsEliminated()) {
+ ASSERT(move.source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ if (need_to_restore_root_) {
+ ASSERT(kSavedValue.Is(root));
+ __ InitializeRootRegister();
+ need_to_restore_root_ = false;
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+ LMoveOperands& current_move = moves_[index];
+
+ ASSERT(!current_move.IsPending());
+ ASSERT(!current_move.IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
+ LOperand* destination = current_move.destination();
+ current_move.set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ current_move.set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+
+void LGapResolver::BreakCycle(int index) {
+ ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ ASSERT(!in_cycle_);
+
+ // We use a register which is not allocatable by crankshaft to break the cycle
+ // to be sure it doesn't interfere with the moves we are resolving.
+ ASSERT(!kSavedValue.IsAllocatable());
+ need_to_restore_root_ = true;
+
+ // We save in a register the source of that move and we remember its
+ // destination. Then we mark this move as resolved so the cycle is
+ // broken and we can perform the other moves.
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+
+ if (source->IsRegister()) {
+ __ Mov(kSavedValue, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ // TODO(all): We should use a double register to store the value to avoid
+ // the penalty of the mov across register banks. We are going to reserve
+ // d31 to hold 0.0 value. We could clobber this register while breaking the
+ // cycle and restore it after like we do with the root register.
+ // LGapResolver::RestoreValue() will need to be updated as well when we'll
+ // do that.
+ __ Fmov(kSavedValue, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+
+ // Mark this move as resolved.
+ // This move will be actually performed by moving the saved value to this
+ // move's destination in LGapResolver::RestoreValue().
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+ ASSERT(in_cycle_);
+ ASSERT(saved_destination_ != NULL);
+
+ if (saved_destination_->IsRegister()) {
+ __ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedValue);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Mov(cgen_->ToRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ __ Str(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ Ldr(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ EmitStackSlotMove(index);
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsSmi(constant_source)) {
+ __ Mov(dst, Operand(cgen_->ToSmi(constant_source)));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
+ __ Mov(dst, cgen_->ToInteger32(constant_source));
+ } else {
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ DoubleRegister result = cgen_->ToDoubleRegister(destination);
+ __ Fmov(result, cgen_->ToDouble(constant_source));
+ } else {
+ ASSERT(destination->IsStackSlot());
+ ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ need_to_restore_root_ = true;
+ if (cgen_->IsSmi(constant_source)) {
+ __ Mov(kSavedValue, Operand(cgen_->ToSmi(constant_source)));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
+ __ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
+ } else {
+ __ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
+ }
+ __ Str(kSavedValue, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ Fmov(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ Str(src, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand src = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ Ldr(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ EmitStackSlotMove(index);
+ }
+
+ } else {
+ UNREACHABLE();
+ }
+
+ // The move has been emitted, we can eliminate it.
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::EmitStackSlotMove(int index) {
+ // We need a temp register to perform a stack slot to stack slot move, and
+ // the register must not be involved in breaking cycles.
+
+ // Use the Crankshaft double scratch register as the temporary.
+ DoubleRegister temp = crankshaft_fp_scratch;
+
+ LOperand* src = moves_[index].source();
+ LOperand* dst = moves_[index].destination();
+
+ ASSERT(src->IsStackSlot());
+ ASSERT(dst->IsStackSlot());
+ __ Ldr(temp, cgen_->ToMemOperand(src));
+ __ Str(temp, cgen_->ToMemOperand(dst));
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/a64/lithium-gap-resolver-a64.h b/deps/v8/src/a64/lithium-gap-resolver-a64.h
new file mode 100644
index 0000000000..427065933e
--- /dev/null
+++ b/deps/v8/src/a64/lithium-gap-resolver-a64.h
@@ -0,0 +1,90 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
+#define V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Emit a move from one stack slot to another.
+ void EmitStackSlotMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+
+ // We use the root register as a scratch in a few places. When that happens,
+ // this flag is set to indicate that it needs to be restored.
+ bool need_to_restore_root_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
diff --git a/deps/v8/src/a64/macro-assembler-a64-inl.h b/deps/v8/src/a64/macro-assembler-a64-inl.h
new file mode 100644
index 0000000000..0c62a8b62e
--- /dev/null
+++ b/deps/v8/src/a64/macro-assembler-a64-inl.h
@@ -0,0 +1,1647 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_MACRO_ASSEMBLER_A64_INL_H_
+#define V8_A64_MACRO_ASSEMBLER_A64_INL_H_
+
+#include <ctype.h>
+
+#include "v8globals.h"
+#include "globals.h"
+
+#include "a64/assembler-a64.h"
+#include "a64/assembler-a64-inl.h"
+#include "a64/macro-assembler-a64.h"
+#include "a64/instrument-a64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+MemOperand UntagSmiFieldMemOperand(Register object, int offset) {
+ return UntagSmiMemOperand(object, offset - kHeapObjectTag);
+}
+
+
+MemOperand UntagSmiMemOperand(Register object, int offset) {
+ // Assumes that Smis are shifted by 32 bits and little endianness.
+ STATIC_ASSERT(kSmiShift == 32);
+ return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
+}
+
+
+Handle<Object> MacroAssembler::CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+}
+
+
+void MacroAssembler::And(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, AND);
+}
+
+
+void MacroAssembler::Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Tst(const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, BIC);
+}
+
+
+void MacroAssembler::Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, BICS);
+}
+
+
+void MacroAssembler::Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ORR);
+}
+
+
+void MacroAssembler::Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ORN);
+}
+
+
+void MacroAssembler::Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, EOR);
+}
+
+
+void MacroAssembler::Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, EON);
+}
+
+
+void MacroAssembler::Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
+ }
+}
+
+
+void MacroAssembler::Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
+ }
+}
+
+
+void MacroAssembler::Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
+ }
+}
+
+void MacroAssembler::Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, SetFlags, ADD);
+ }
+}
+
+
+void MacroAssembler::Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
+ }
+}
+
+
+void MacroAssembler::Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, SetFlags, SUB);
+ }
+}
+
+
+void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Adds(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Subs(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Neg(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ if (operand.IsImmediate()) {
+ Mov(rd, -operand.immediate());
+ } else {
+ Sub(rd, AppropriateZeroRegFor(rd), operand);
+ }
+}
+
+
+void MacroAssembler::Negs(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Subs(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void MacroAssembler::Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void MacroAssembler::Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void MacroAssembler::Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void MacroAssembler::Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void MacroAssembler::Ngc(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbc(rd, zr, operand);
+}
+
+
+void MacroAssembler::Ngcs(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbcs(rd, zr, operand);
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Mov(rd, ~imm);
+}
+
+
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
+void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
+ ASSERT(allow_macro_instructions_); \
+ LoadStoreMacro(REG, addr, OP); \
+}
+LS_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+
+void MacroAssembler::Adr(const Register& rd, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ adr(rd, label);
+}
+
+
+void MacroAssembler::Asr(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ asr(rd, rn, shift);
+}
+
+
+void MacroAssembler::Asr(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ asrv(rd, rn, rm);
+}
+
+
+void MacroAssembler::B(Label* label) {
+ b(label);
+ CheckVeneers(false);
+}
+
+
+void MacroAssembler::B(Condition cond, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ B(label, cond);
+}
+
+
+void MacroAssembler::Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ bfi(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ bfxil(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Bind(Label* label) {
+ ASSERT(allow_macro_instructions_);
+ bind(label);
+}
+
+
+void MacroAssembler::Bl(Label* label) {
+ ASSERT(allow_macro_instructions_);
+ bl(label);
+}
+
+
+void MacroAssembler::Blr(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ blr(xn);
+}
+
+
+void MacroAssembler::Br(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ br(xn);
+}
+
+
+void MacroAssembler::Brk(int code) {
+ ASSERT(allow_macro_instructions_);
+ brk(code);
+}
+
+
+void MacroAssembler::Cinc(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cinc(rd, rn, cond);
+}
+
+
+void MacroAssembler::Cinv(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cinv(rd, rn, cond);
+}
+
+
+void MacroAssembler::Cls(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ cls(rd, rn);
+}
+
+
+void MacroAssembler::Clz(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ clz(rd, rn);
+}
+
+
+void MacroAssembler::Cneg(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cneg(rd, rn, cond);
+}
+
+
+// Conditionally zero the destination register. Only X registers are supported
+// due to the truncation side-effect when used on W registers.
+void MacroAssembler::CzeroX(const Register& rd,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsSP() && rd.Is64Bits());
+ ASSERT((cond != al) && (cond != nv));
+ csel(rd, xzr, rd, cond);
+}
+
+
+// Conditionally move a value into the destination register. Only X registers
+// are supported due to the truncation side-effect when used on W registers.
+void MacroAssembler::CmovX(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsSP());
+ ASSERT(rd.Is64Bits() && rn.Is64Bits());
+ ASSERT((cond != al) && (cond != nv));
+ if (!rd.is(rn)) {
+ csel(rd, rn, rd, cond);
+ }
+}
+
+
+void MacroAssembler::Cset(const Register& rd, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cset(rd, cond);
+}
+
+
+void MacroAssembler::Csetm(const Register& rd, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csetm(rd, cond);
+}
+
+
+void MacroAssembler::Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
+ ASSERT(allow_macro_instructions_);
+ dmb(domain, type);
+}
+
+
+void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
+ ASSERT(allow_macro_instructions_);
+ dsb(domain, type);
+}
+
+
+void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
+ ASSERT(allow_macro_instructions_);
+ debug(message, code, params);
+}
+
+
+void MacroAssembler::Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ extr(rd, rn, rm, lsb);
+}
+
+
+void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fabs(fd, fn);
+}
+
+
+void MacroAssembler::Fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fadd(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+ fccmp(fn, fm, nzcv, cond);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fcmp(fn, fm);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
+ ASSERT(allow_macro_instructions_);
+ if (value != 0.0) {
+ FPRegister tmp = AppropriateTempFor(fn);
+ Fmov(tmp, value);
+ fcmp(fn, tmp);
+ } else {
+ fcmp(fn, value);
+ }
+}
+
+
+void MacroAssembler::Fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+ fcsel(fd, fn, fm, cond);
+}
+
+
+void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fcvt(fd, fn);
+}
+
+
+void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtas(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtau(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtms(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtmu(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtns(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtnu(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtzs(rd, fn);
+}
+void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtzu(rd, fn);
+}
+
+
+void MacroAssembler::Fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fdiv(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fmadd(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmax(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmaxnm(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmin(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fminnm(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
+ ASSERT(allow_macro_instructions_);
+ // Only emit an instruction if fd and fn are different, and they are both D
+ // registers. fmov(s0, s0) is not a no-op because it clears the top word of
+ // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
+ // top of q0, but FPRegister does not currently support Q registers.
+ if (!fd.Is(fn) || !fd.Is64Bits()) {
+ fmov(fd, fn);
+ }
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, Register rn) {
+ ASSERT(allow_macro_instructions_);
+ fmov(fd, rn);
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, double imm) {
+ ASSERT(allow_macro_instructions_);
+ if ((fd.Is64Bits() && IsImmFP64(imm)) ||
+ (fd.Is32Bits() && IsImmFP32(imm)) ||
+ ((imm == 0.0) && (copysign(1.0, imm) == 1.0))) {
+ // These cases can be handled by the Assembler.
+ fmov(fd, imm);
+ } else {
+ // TODO(all): The Assembler would try to relocate the immediate with
+ // Assembler::ldr(const FPRegister& ft, double imm) but it is not
+ // implemented yet.
+ if (fd.SizeInBits() == kDRegSize) {
+ Mov(Tmp0(), double_to_rawbits(imm));
+ Fmov(fd, Tmp0());
+ } else {
+ ASSERT(fd.SizeInBits() == kSRegSize);
+ Mov(WTmp0(), float_to_rawbits(static_cast<float>(imm)));
+ Fmov(fd, WTmp0());
+ }
+ }
+}
+
+
+void MacroAssembler::Fmov(Register rd, FPRegister fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fmov(rd, fn);
+}
+
+
+void MacroAssembler::Fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fmsub(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmul(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fneg(fd, fn);
+}
+
+
+void MacroAssembler::Fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fnmadd(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fnmsub(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frinta(fd, fn);
+}
+
+
+void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frintn(fd, fn);
+}
+
+
+void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frintz(fd, fn);
+}
+
+
+void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fsqrt(fd, fn);
+}
+
+
+void MacroAssembler::Fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fsub(fd, fn, fm);
+}
+
+
+void MacroAssembler::Hint(SystemHint code) {
+ ASSERT(allow_macro_instructions_);
+ hint(code);
+}
+
+
+void MacroAssembler::Hlt(int code) {
+ ASSERT(allow_macro_instructions_);
+ hlt(code);
+}
+
+
+void MacroAssembler::Isb() {
+ ASSERT(allow_macro_instructions_);
+ isb();
+}
+
+
+void MacroAssembler::Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!AreAliased(rt, rt2));
+ ldnp(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!AreAliased(rt, rt2));
+ ldp(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ ASSERT(!rt2.IsZero());
+ ldpsw(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldr(const FPRegister& ft, double imm) {
+ ASSERT(allow_macro_instructions_);
+ ldr(ft, imm);
+}
+
+
+void MacroAssembler::Ldr(const Register& rt, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ ldr(rt, imm);
+}
+
+
+void MacroAssembler::Lsl(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsl(rd, rn, shift);
+}
+
+
+void MacroAssembler::Lsl(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lslv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Lsr(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsr(rd, rn, shift);
+}
+
+
+void MacroAssembler::Lsr(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsrv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ madd(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ mneg(rd, rn, rm);
+}
+
+
+void MacroAssembler::Mov(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ // Emit a register move only if the registers are distinct, or if they are
+ // not X registers. Note that mov(w0, w0) is not a no-op because it clears
+ // the top word of x0.
+ if (!rd.Is(rn) || !rd.Is64Bits()) {
+ Assembler::mov(rd, rn);
+ }
+}
+
+
+void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ movk(rd, imm, shift);
+}
+
+
+void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ mrs(rt, sysreg);
+}
+
+
+void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ msr(sysreg, rt);
+}
+
+
+void MacroAssembler::Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ msub(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ mul(rd, rn, rm);
+}
+
+
+void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rbit(rd, rn);
+}
+
+
+void MacroAssembler::Ret(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ ret(xn);
+ CheckVeneers(false);
+}
+
+
+void MacroAssembler::Rev(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev(rd, rn);
+}
+
+
+void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev16(rd, rn);
+}
+
+
+void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev32(rd, rn);
+}
+
+
+void MacroAssembler::Ror(const Register& rd,
+ const Register& rs,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ror(rd, rs, shift);
+}
+
+
+void MacroAssembler::Ror(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rorv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sbfiz(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sbfx(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ ASSERT(allow_macro_instructions_);
+ scvtf(fd, rn, fbits);
+}
+
+
+void MacroAssembler::Sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sdiv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smaddl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smsubl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smull(rd, rn, rm);
+}
+
+
+void MacroAssembler::Smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smulh(rd, rn, rm);
+}
+
+
+void MacroAssembler::Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ ASSERT(allow_macro_instructions_);
+ stnp(rt, rt2, dst);
+}
+
+
+void MacroAssembler::Stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ ASSERT(allow_macro_instructions_);
+ stp(rt, rt2, dst);
+}
+
+
+void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxtb(rd, rn);
+}
+
+
+void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxth(rd, rn);
+}
+
+
+void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxtw(rd, rn);
+}
+
+
+void MacroAssembler::Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ubfiz(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ubfx(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ ASSERT(allow_macro_instructions_);
+ ucvtf(fd, rn, fbits);
+}
+
+
+void MacroAssembler::Udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ udiv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ umaddl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ umsubl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxtb(rd, rn);
+}
+
+
+void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxth(rd, rn);
+}
+
+
+void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxtw(rd, rn);
+}
+
+
+void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
+ ASSERT(!csp.Is(sp_));
+ // TODO(jbramley): Several callers rely on this not using scratch registers,
+ // so we use the assembler directly here. However, this means that large
+ // immediate values of 'space' cannot be handled. Once we merge with V8, we
+ // should try to use the new scope that controls scratch register usage.
+ InstructionAccurateScope scope(this);
+ if ((space.IsImmediate()) && !is_uint12(space.immediate())) {
+ // The subtract instruction supports a 12-bit immediate, shifted left by
+ // zero or 12 bits. So, in two instructions, we can subtract any immediate
+ // between zero and (1 << 24) - 1.
+ int64_t imm = space.immediate();
+ ASSERT(is_uint24(imm));
+
+ int64_t imm_top_12_bits = imm >> 12;
+ sub(csp, StackPointer(), imm_top_12_bits << 12);
+ imm -= imm_top_12_bits << 12;
+ if (imm > 0) {
+ sub(csp, csp, imm);
+ }
+ } else {
+ sub(csp, StackPointer(), space);
+ }
+}
+
+
+void MacroAssembler::InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ Mov(root, Operand(roots_array_start));
+}
+
+
+void MacroAssembler::SmiTag(Register dst, Register src) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ Lsl(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
+
+
+void MacroAssembler::SmiUntag(Register dst, Register src) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(src);
+ }
+ Asr(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
+
+
+void MacroAssembler::SmiUntagToDouble(FPRegister dst,
+ Register src,
+ UntagMode mode) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
+ AssertSmi(src);
+ }
+ Scvtf(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiUntagToFloat(FPRegister dst,
+ Register src,
+ UntagMode mode) {
+ ASSERT(dst.Is32Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
+ AssertSmi(src);
+ }
+ Scvtf(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::JumpIfSmi(Register value,
+ Label* smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ // Check if the tag bit is set.
+ if (smi_label) {
+ Tbz(value, 0, smi_label);
+ if (not_smi_label) {
+ B(not_smi_label);
+ }
+ } else {
+ ASSERT(not_smi_label);
+ Tbnz(value, 0, not_smi_label);
+ }
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
+ JumpIfSmi(value, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfBothSmi(Register value1,
+ Register value2,
+ Label* both_smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ // Check if both tag bits are clear.
+ Orr(Tmp0(), value1, value2);
+ JumpIfSmi(Tmp0(), both_smi_label, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register value1,
+ Register value2,
+ Label* either_smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ // Check if either tag bit is clear.
+ And(Tmp0(), value1, value2);
+ JumpIfSmi(Tmp0(), either_smi_label, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfEitherNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label) {
+ JumpIfBothSmi(value1, value2, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfBothNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label) {
+ JumpIfEitherSmi(value1, value2, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::IsObjectNameType(Register object,
+ Register type,
+ Label* fail) {
+ CompareObjectType(object, type, type, LAST_NAME_TYPE);
+ B(hi, fail);
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ // If cmp result is lt, the following ccmp will clear all flags.
+ // Z == 0, N == V implies gt condition.
+ Cmp(scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ Ccmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE, NoFlag, ge);
+
+ // If we didn't get a valid label object just fall through and leave the
+ // flags updated.
+ if (fail != NULL) {
+ B(gt, fail);
+ }
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object,
+ Register type,
+ Label* not_string,
+ Label* string) {
+ Ldr(type, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
+
+ STATIC_ASSERT(kStringTag == 0);
+ ASSERT((string != NULL) || (not_string != NULL));
+ if (string == NULL) {
+ TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
+ } else if (not_string == NULL) {
+ TestAndBranchIfAllClear(type.W(), kIsNotStringMask, string);
+ } else {
+ TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
+ B(string);
+ }
+}
+
+
+void MacroAssembler::Push(Handle<Object> handle) {
+ Mov(Tmp0(), Operand(handle));
+ Push(Tmp0());
+}
+
+
+void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
+ uint64_t size = count * unit_size;
+
+ if (size == 0) {
+ return;
+ }
+
+ if (csp.Is(StackPointer())) {
+ ASSERT(size % 16 == 0);
+ } else {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+
+ if (unit_size == 0) {
+ return;
+ }
+
+ const int shift = CountTrailingZeros(unit_size, kXRegSize);
+ const Operand size(count, LSL, shift);
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (!csp.Is(StackPointer())) {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+ const int shift = CountTrailingZeros(unit_size, kXRegSize) - kSmiShift;
+ const Operand size(count_smi,
+ (shift >= 0) ? (LSL) : (LSR),
+ (shift >= 0) ? (shift) : (-shift));
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (!csp.Is(StackPointer())) {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
+ uint64_t size = count * unit_size;
+
+ if (size == 0) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (csp.Is(StackPointer())) {
+ ASSERT(size % 16 == 0);
+ } else if (emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+
+ if (unit_size == 0) {
+ return;
+ }
+
+ const int shift = CountTrailingZeros(unit_size, kXRegSize);
+ const Operand size(count, LSL, shift);
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+ const int shift = CountTrailingZeros(unit_size, kXRegSize) - kSmiShift;
+ const Operand size(count_smi,
+ (shift >= 0) ? (LSL) : (LSR),
+ (shift >= 0) ? (shift) : (-shift));
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::CompareAndBranch(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* label) {
+ if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
+ ((cond == eq) || (cond == ne))) {
+ if (cond == eq) {
+ Cbz(lhs, label);
+ } else {
+ Cbnz(lhs, label);
+ }
+ } else {
+ Cmp(lhs, rhs);
+ B(cond, label);
+ }
+}
+
+
+void MacroAssembler::TestAndBranchIfAnySet(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label) {
+ int bits = reg.SizeInBits();
+ ASSERT(CountSetBits(bit_pattern, bits) > 0);
+ if (CountSetBits(bit_pattern, bits) == 1) {
+ Tbnz(reg, MaskToBit(bit_pattern), label);
+ } else {
+ Tst(reg, bit_pattern);
+ B(ne, label);
+ }
+}
+
+
+void MacroAssembler::TestAndBranchIfAllClear(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label) {
+ int bits = reg.SizeInBits();
+ ASSERT(CountSetBits(bit_pattern, bits) > 0);
+ if (CountSetBits(bit_pattern, bits) == 1) {
+ Tbz(reg, MaskToBit(bit_pattern), label);
+ } else {
+ Tst(reg, bit_pattern);
+ B(eq, label);
+ }
+}
+
+
+void MacroAssembler::InlineData(uint64_t data) {
+ ASSERT(is_uint16(data));
+ InstructionAccurateScope scope(this, 1);
+ movz(xzr, data);
+}
+
+
+void MacroAssembler::EnableInstrumentation() {
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateEnable);
+}
+
+
+void MacroAssembler::DisableInstrumentation() {
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateDisable);
+}
+
+
+void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
+ ASSERT(strlen(marker_name) == 2);
+
+ // We allow only printable characters in the marker names. Unprintable
+ // characters are reserved for controlling features of the instrumentation.
+ ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
+
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, (marker_name[1] << 8) | marker_name[0]);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_A64_MACRO_ASSEMBLER_A64_INL_H_
diff --git a/deps/v8/src/a64/macro-assembler-a64.cc b/deps/v8/src/a64/macro-assembler-a64.cc
new file mode 100644
index 0000000000..14fb2fda63
--- /dev/null
+++ b/deps/v8/src/a64/macro-assembler-a64.cc
@@ -0,0 +1,4975 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "bootstrapper.h"
+#include "codegen.h"
+#include "cpu-profiler.h"
+#include "debug.h"
+#include "isolate-inl.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
+#define __
+
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate,
+ byte * buffer,
+ unsigned buffer_size)
+ : Assembler(arg_isolate, buffer, buffer_size),
+ generating_stub_(false),
+#if DEBUG
+ allow_macro_instructions_(true),
+#endif
+ has_frame_(false),
+ use_real_aborts_(true),
+ sp_(jssp), tmp0_(ip0), tmp1_(ip1), fptmp0_(fp_scratch) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
+}
+
+
+void MacroAssembler::LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(Tmp0(), operand);
+ Logical(rd, rn, Tmp0(), op);
+
+ } else if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.SizeInBits();
+ ASSERT(rd.Is64Bits() || is_uint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = ~immediate;
+ if (rd.Is32Bits()) {
+ immediate &= kWRegMask;
+ }
+ }
+
+ // Special cases for all set or all clear immediates.
+ if (immediate == 0) {
+ switch (op) {
+ case AND:
+ Mov(rd, 0);
+ return;
+ case ORR: // Fall through.
+ case EOR:
+ Mov(rd, rn);
+ return;
+ case ANDS: // Fall through.
+ case BICS:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if ((rd.Is64Bits() && (immediate == -1L)) ||
+ (rd.Is32Bits() && (immediate == 0xffffffffL))) {
+ switch (op) {
+ case AND:
+ Mov(rd, rn);
+ return;
+ case ORR:
+ Mov(rd, immediate);
+ return;
+ case EOR:
+ Mvn(rd, rn);
+ return;
+ case ANDS: // Fall through.
+ case BICS:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // Immediate can't be encoded: synthesize using move immediate.
+ Register temp = AppropriateTempFor(rn);
+ Mov(temp, immediate);
+ if (rd.Is(csp)) {
+ // If rd is the stack pointer we cannot use it as the destination
+ // register so we use the temp register as an intermediate again.
+ Logical(temp, rn, temp, op);
+ Mov(csp, temp);
+ } else {
+ Logical(rd, rn, temp, op);
+ }
+ }
+
+ } else if (operand.IsExtendedRegister()) {
+ ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ // Add/sub extended supports shift <= 4. We want to support exactly the
+ // same modes here.
+ ASSERT(operand.shift_amount() <= 4);
+ ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ Register temp = AppropriateTempFor(rn, operand.reg());
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ Logical(rd, rn, temp, op);
+
+ } else {
+ // The operand can be encoded in the instruction.
+ ASSERT(operand.IsShiftedRegister());
+ Logical(rd, rn, operand, op);
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
+ ASSERT(!rd.IsZero());
+
+ // TODO(all) extend to support more immediates.
+ //
+ // Immediates on Aarch64 can be produced using an initial value, and zero to
+ // three move keep operations.
+ //
+ // Initial values can be generated with:
+ // 1. 64-bit move zero (movz).
+ // 2. 32-bit move inverted (movn).
+ // 3. 64-bit move inverted.
+ // 4. 32-bit orr immediate.
+ // 5. 64-bit orr immediate.
+ // Move-keep may then be used to modify each of the 16-bit half-words.
+ //
+ // The code below supports all five initial value generators, and
+ // applying move-keep operations to move-zero and move-inverted initial
+ // values.
+
+ unsigned reg_size = rd.SizeInBits();
+ unsigned n, imm_s, imm_r;
+ if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
+ // Immediate can be represented in a move zero instruction. Movz can't
+ // write to the stack pointer.
+ movz(rd, imm);
+ } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
+ // Immediate can be represented in a move inverted instruction. Movn can't
+ // write to the stack pointer.
+ movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
+ } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be represented in a logical orr instruction.
+ LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
+ } else {
+ // Generic immediate case. Imm will be represented by
+ // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
+ // A move-zero or move-inverted is generated for the first non-zero or
+ // non-0xffff immX, and a move-keep for subsequent non-zero immX.
+
+ uint64_t ignored_halfword = 0;
+ bool invert_move = false;
+ // If the number of 0xffff halfwords is greater than the number of 0x0000
+ // halfwords, it's more efficient to use move-inverted.
+ if (CountClearHalfWords(~imm, reg_size) >
+ CountClearHalfWords(imm, reg_size)) {
+ ignored_halfword = 0xffffL;
+ invert_move = true;
+ }
+
+ // Mov instructions can't move value into the stack pointer, so set up a
+ // temporary register, if needed.
+ Register temp = rd.IsSP() ? AppropriateTempFor(rd) : rd;
+
+ // Iterate through the halfwords. Use movn/movz for the first non-ignored
+ // halfword, and movk for subsequent halfwords.
+ ASSERT((reg_size % 16) == 0);
+ bool first_mov_done = false;
+ for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
+ uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
+ if (imm16 != ignored_halfword) {
+ if (!first_mov_done) {
+ if (invert_move) {
+ movn(temp, (~imm16) & 0xffffL, 16 * i);
+ } else {
+ movz(temp, imm16, 16 * i);
+ }
+ first_mov_done = true;
+ } else {
+ // Construct a wider constant.
+ movk(temp, imm16, 16 * i);
+ }
+ }
+ }
+ ASSERT(first_mov_done);
+
+ // Move the temporary if the original destination register was the stack
+ // pointer.
+ if (rd.IsSP()) {
+ mov(rd, temp);
+ }
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ // Provide a swap register for instructions that need to write into the
+ // system stack pointer (and can't do this inherently).
+ Register dst = (rd.Is(csp)) ? (Tmp1()) : (rd);
+
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(dst, operand);
+
+ } else if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(dst, operand.immediate());
+
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Emit a shift instruction if moving a shifted register. This operation
+ // could also be achieved using an orr instruction (like orn used by Mvn),
+ // but using a shift instruction makes the disassembly clearer.
+ EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
+
+ } else if (operand.IsExtendedRegister()) {
+ // Emit an extend instruction if moving an extended register. This handles
+ // extend with post-shift operations, too.
+ EmitExtendShift(dst, operand.reg(), operand.extend(),
+ operand.shift_amount());
+
+ } else {
+ // Otherwise, emit a register move only if the registers are distinct, or
+ // if they are not X registers.
+ //
+ // Note that mov(w0, w0) is not a no-op because it clears the top word of
+ // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
+ // registers is not required to clear the top word of the X register. In
+ // this case, the instruction is discarded.
+ //
+ // If csp is an operand, add #0 is emitted, otherwise, orr #0.
+ if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
+ (discard_mode == kDontDiscardForSameWReg))) {
+ Assembler::mov(rd, operand.reg());
+ }
+ // This case can handle writes into the system stack pointer directly.
+ dst = rd;
+ }
+
+ // Copy the result to the system stack pointer.
+ if (!dst.Is(rd)) {
+ ASSERT(rd.IsZero());
+ ASSERT(dst.Is(Tmp1()));
+ Assembler::mov(rd, dst);
+ }
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(Tmp0(), operand);
+ Mvn(rd, Tmp0());
+
+ } else if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(rd, ~operand.immediate());
+
+ } else if (operand.IsExtendedRegister()) {
+ // Emit two instructions for the extend case. This differs from Mov, as
+ // the extend and invert can't be achieved in one instruction.
+ Register temp = AppropriateTempFor(rd, operand.reg());
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ mvn(rd, temp);
+
+ } else {
+ // Otherwise, emit a register move only if the registers are distinct.
+ // If the jssp is an operand, add #0 is emitted, otherwise, orr #0.
+ mvn(rd, operand);
+ }
+}
+
+
+unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
+ ASSERT((reg_size % 8) == 0);
+ int count = 0;
+ for (unsigned i = 0; i < (reg_size / 16); i++) {
+ if ((imm & 0xffff) == 0) {
+ count++;
+ }
+ imm >>= 16;
+ }
+ return count;
+}
+
+
+// The movz instruction can generate immediates containing an arbitrary 16-bit
+// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
+bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
+ ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
+ return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
+}
+
+
+// The movn instruction can generate immediates containing an arbitrary 16-bit
+// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
+bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
+ return IsImmMovz(~imm, reg_size);
+}
+
+
+void MacroAssembler::ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ ASSERT((cond != al) && (cond != nv));
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(Tmp0(), operand);
+ ConditionalCompareMacro(rn, Tmp0(), nzcv, cond, op);
+
+ } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
+ (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
+ // The immediate can be encoded in the instruction, or the operand is an
+ // unshifted register: call the assembler.
+ ConditionalCompare(rn, operand, nzcv, cond, op);
+
+ } else {
+ // The operand isn't directly supported by the instruction: perform the
+ // operation on a temporary register.
+ Register temp = AppropriateTempFor(rn);
+ Mov(temp, operand);
+ ConditionalCompare(rn, temp, nzcv, cond, op);
+ }
+}
+
+
+void MacroAssembler::Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ if (operand.IsImmediate()) {
+ // Immediate argument. Handle special cases of 0, 1 and -1 using zero
+ // register.
+ int64_t imm = operand.immediate();
+ Register zr = AppropriateZeroRegFor(rn);
+ if (imm == 0) {
+ csel(rd, rn, zr, cond);
+ } else if (imm == 1) {
+ csinc(rd, rn, zr, cond);
+ } else if (imm == -1) {
+ csinv(rd, rn, zr, cond);
+ } else {
+ Register temp = AppropriateTempFor(rn);
+ Mov(temp, operand.immediate());
+ csel(rd, rn, temp, cond);
+ }
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
+ // Unshifted register argument.
+ csel(rd, rn, operand.reg(), cond);
+ } else {
+ // All other arguments.
+ Register temp = AppropriateTempFor(rn);
+ Mov(temp, operand);
+ csel(rd, rn, temp, cond);
+ }
+}
+
+
+void MacroAssembler::AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
+ !operand.NeedsRelocation() && (S == LeaveFlags)) {
+ // The instruction would be a nop. Avoid generating useless code.
+ return;
+ }
+
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(Tmp0(), operand);
+ AddSubMacro(rd, rn, Tmp0(), S, op);
+ } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
+ (rn.IsZero() && !operand.IsShiftedRegister()) ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ Register temp = AppropriateTempFor(rn);
+ Mov(temp, operand);
+ AddSub(rd, rn, temp, S, op);
+ } else {
+ AddSub(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(Tmp0(), operand);
+ AddSubWithCarryMacro(rd, rn, Tmp0(), S, op);
+
+ } else if (operand.IsImmediate() ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ // Add/sub with carry (immediate or ROR shifted register.)
+ Register temp = AppropriateTempFor(rn);
+ Mov(temp, operand);
+ AddSubWithCarry(rd, rn, temp, S, op);
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Add/sub with carry (shifted register).
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ ASSERT(operand.shift() != ROR);
+ ASSERT(is_uintn(operand.shift_amount(),
+ rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2));
+ Register temp = AppropriateTempFor(rn, operand.reg());
+ EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else if (operand.IsExtendedRegister()) {
+ // Add/sub with carry (extended register).
+ ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ // Add/sub extended supports a shift <= 4. We want to support exactly the
+ // same modes.
+ ASSERT(operand.shift_amount() <= 4);
+ ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ Register temp = AppropriateTempFor(rn, operand.reg());
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else {
+ // The addressing mode is directly supported by the instruction.
+ AddSubWithCarry(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ int64_t offset = addr.offset();
+ LSDataSize size = CalcLSDataSize(op);
+
+ // Check if an immediate offset fits in the immediate field of the
+ // appropriate instruction. If not, emit two instructions to perform
+ // the operation.
+ if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
+ !IsImmLSUnscaled(offset)) {
+ // Immediate offset that can't be encoded using unsigned or unscaled
+ // addressing modes.
+ Register temp = AppropriateTempFor(addr.base());
+ Mov(temp, addr.offset());
+ LoadStore(rt, MemOperand(addr.base(), temp), op);
+ } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
+ // Post-index beyond unscaled addressing range.
+ LoadStore(rt, MemOperand(addr.base()), op);
+ add(addr.base(), addr.base(), offset);
+ } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
+ // Pre-index beyond unscaled addressing range.
+ add(addr.base(), addr.base(), offset);
+ LoadStore(rt, MemOperand(addr.base()), op);
+ } else {
+ // Encodable in one load/store instruction.
+ LoadStore(rt, addr, op);
+ }
+}
+
+
+void MacroAssembler::Load(const Register& rt,
+ const MemOperand& addr,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+
+ if (r.IsInteger8()) {
+ Ldrsb(rt, addr);
+ } else if (r.IsUInteger8()) {
+ Ldrb(rt, addr);
+ } else if (r.IsInteger16()) {
+ Ldrsh(rt, addr);
+ } else if (r.IsUInteger16()) {
+ Ldrh(rt, addr);
+ } else if (r.IsInteger32()) {
+ Ldr(rt.W(), addr);
+ } else {
+ ASSERT(rt.Is64Bits());
+ Ldr(rt, addr);
+ }
+}
+
+
+void MacroAssembler::Store(const Register& rt,
+ const MemOperand& addr,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ Strb(rt, addr);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ Strh(rt, addr);
+ } else if (r.IsInteger32()) {
+ Str(rt.W(), addr);
+ } else {
+ ASSERT(rt.Is64Bits());
+ Str(rt, addr);
+ }
+}
+
+
+bool MacroAssembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
+ // Account for the branch around the veneers and the guard.
+ int protection_offset = 2 * kInstructionSize;
+ return pc_offset() > max_reachable_pc - margin - protection_offset -
+ static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
+}
+
+
+void MacroAssembler::EmitVeneers(bool need_protection) {
+ RecordComment("[ Veneers");
+
+ Label end;
+ if (need_protection) {
+ B(&end);
+ }
+
+ EmitVeneersGuard();
+
+ {
+ InstructionAccurateScope scope(this);
+ Label size_check;
+
+ std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
+
+ it = unresolved_branches_.begin();
+ while (it != unresolved_branches_.end()) {
+ if (ShouldEmitVeneer(it->first)) {
+ Instruction* branch = InstructionAt(it->second.pc_offset_);
+ Label* label = it->second.label_;
+
+#ifdef DEBUG
+ __ bind(&size_check);
+#endif
+ // Patch the branch to point to the current position, and emit a branch
+ // to the label.
+ Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
+ RemoveBranchFromLabelLinkChain(branch, label, veneer);
+ branch->SetImmPCOffsetTarget(veneer);
+ b(label);
+#ifdef DEBUG
+ ASSERT(SizeOfCodeGeneratedSince(&size_check) <=
+ static_cast<uint64_t>(kMaxVeneerCodeSize));
+ size_check.Unuse();
+#endif
+
+ it_to_delete = it++;
+ unresolved_branches_.erase(it_to_delete);
+ } else {
+ ++it;
+ }
+ }
+ }
+
+ Bind(&end);
+
+ RecordComment("]");
+}
+
+
+void MacroAssembler::EmitVeneersGuard() {
+ if (emit_debug_code()) {
+ Unreachable();
+ }
+}
+
+
+void MacroAssembler::CheckVeneers(bool need_protection) {
+ if (unresolved_branches_.empty()) {
+ return;
+ }
+
+ CHECK(pc_offset() < unresolved_branches_first_limit());
+ int margin = kVeneerDistanceMargin;
+ if (!need_protection) {
+ // Prefer emitting veneers protected by an existing instruction.
+ // The 4 divisor is a finger in the air guess. With a default margin of 2KB,
+ // that leaves 512B = 128 instructions of extra margin to avoid requiring a
+ // protective branch.
+ margin += margin / 4;
+ }
+ if (ShouldEmitVeneer(unresolved_branches_first_limit(), margin)) {
+ EmitVeneers(need_protection);
+ }
+}
+
+
+bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
+ Label *label, ImmBranchType b_type) {
+ bool need_longer_range = false;
+ // There are two situations in which we care about the offset being out of
+ // range:
+ // - The label is bound but too far away.
+ // - The label is not bound but linked, and the previous branch
+ // instruction in the chain is too far away.
+ if (label->is_bound() || label->is_linked()) {
+ need_longer_range =
+ !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
+ }
+ if (!need_longer_range && !label->is_bound()) {
+ int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
+ unresolved_branches_.insert(
+ std::pair<int, FarBranchInfo>(max_reachable_pc,
+ FarBranchInfo(pc_offset(), label)));
+ }
+ return need_longer_range;
+}
+
+
+void MacroAssembler::B(Label* label, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
+
+ if (need_extra_instructions) {
+ b(&done, InvertCondition(cond));
+ b(label);
+ } else {
+ b(label, cond);
+ }
+ CheckVeneers(!need_extra_instructions);
+ bind(&done);
+}
+
+
+void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
+
+ if (need_extra_instructions) {
+ tbz(rt, bit_pos, &done);
+ b(label);
+ } else {
+ tbnz(rt, bit_pos, label);
+ }
+ CheckVeneers(!need_extra_instructions);
+ bind(&done);
+}
+
+
+void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
+
+ if (need_extra_instructions) {
+ tbnz(rt, bit_pos, &done);
+ b(label);
+ } else {
+ tbz(rt, bit_pos, label);
+ }
+ CheckVeneers(!need_extra_instructions);
+ bind(&done);
+}
+
+
+void MacroAssembler::Cbnz(const Register& rt, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
+
+ if (need_extra_instructions) {
+ cbz(rt, &done);
+ b(label);
+ } else {
+ cbnz(rt, label);
+ }
+ CheckVeneers(!need_extra_instructions);
+ bind(&done);
+}
+
+
+void MacroAssembler::Cbz(const Register& rt, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
+
+ if (need_extra_instructions) {
+ cbnz(rt, &done);
+ b(label);
+ } else {
+ cbz(rt, label);
+ }
+ CheckVeneers(!need_extra_instructions);
+ bind(&done);
+}
+
+
+// Pseudo-instructions.
+
+
+void MacroAssembler::Abs(const Register& rd, const Register& rm,
+ Label* is_not_representable,
+ Label* is_representable) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(AreSameSizeAndType(rd, rm));
+
+ Cmp(rm, 1);
+ Cneg(rd, rm, lt);
+
+ // If the comparison sets the v flag, the input was the smallest value
+ // representable by rm, and the mathematical result of abs(rm) is not
+ // representable using two's complement.
+ if ((is_not_representable != NULL) && (is_representable != NULL)) {
+ B(is_not_representable, vs);
+ B(is_representable);
+ } else if (is_not_representable != NULL) {
+ B(is_not_representable, vs);
+ } else if (is_representable != NULL) {
+ B(is_representable, vc);
+ }
+}
+
+
+// Abstracted stack operations.
+
+
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3) {
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ ASSERT(src0.IsValid());
+
+ int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
+ int size = src0.SizeInBytes();
+
+ PrepareForPush(count, size);
+ PushHelper(count, size, src0, src1, src2, src3);
+}
+
+
+void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3) {
+ // It is not valid to pop into the same register more than once in one
+ // instruction, not even into the zero register.
+ ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
+ ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ ASSERT(dst0.IsValid());
+
+ int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
+ int size = dst0.SizeInBytes();
+
+ PrepareForPop(count, size);
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::PushCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+
+ PrepareForPush(registers.Count(), size);
+ // Push up to four registers at a time because if the current stack pointer is
+ // csp and reg_size is 32, registers must be pushed in blocks of four in order
+ // to maintain the 16-byte alignment for csp.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& src0 = registers.PopHighestIndex();
+ const CPURegister& src1 = registers.PopHighestIndex();
+ const CPURegister& src2 = registers.PopHighestIndex();
+ const CPURegister& src3 = registers.PopHighestIndex();
+ int count = count_before - registers.Count();
+ PushHelper(count, size, src0, src1, src2, src3);
+ }
+}
+
+
+void MacroAssembler::PopCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+
+ PrepareForPop(registers.Count(), size);
+ // Pop up to four registers at a time because if the current stack pointer is
+ // csp and reg_size is 32, registers must be pushed in blocks of four in
+ // order to maintain the 16-byte alignment for csp.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ const CPURegister& dst2 = registers.PopLowestIndex();
+ const CPURegister& dst3 = registers.PopLowestIndex();
+ int count = count_before - registers.Count();
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+ }
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::PushMultipleTimes(int count, Register src) {
+ int size = src.SizeInBytes();
+
+ PrepareForPush(count, size);
+
+ if (FLAG_optimize_for_size && count > 8) {
+ Label loop;
+ __ Mov(Tmp0(), count / 2);
+ __ Bind(&loop);
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ __ Subs(Tmp0(), Tmp0(), 1);
+ __ B(ne, &loop);
+
+ count %= 2;
+ }
+
+ // Push up to four registers at a time if possible because if the current
+ // stack pointer is csp and the register size is 32, registers must be pushed
+ // in blocks of four in order to maintain the 16-byte alignment for csp.
+ while (count >= 4) {
+ PushHelper(4, size, src, src, src, src);
+ count -= 4;
+ }
+ if (count >= 2) {
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ count -= 2;
+ }
+ if (count == 1) {
+ PushHelper(1, size, src, NoReg, NoReg, NoReg);
+ count -= 1;
+ }
+ ASSERT(count == 0);
+}
+
+
+void MacroAssembler::PushHelper(int count, int size,
+ const CPURegister& src0,
+ const CPURegister& src1,
+ const CPURegister& src2,
+ const CPURegister& src3) {
+ // Ensure that we don't unintentially modify scratch or debug registers.
+ InstructionAccurateScope scope(this);
+
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ ASSERT(size == src0.SizeInBytes());
+
+ // When pushing multiple registers, the store order is chosen such that
+ // Push(a, b) is equivalent to Push(a) followed by Push(b).
+ switch (count) {
+ case 1:
+ ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+ str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
+ break;
+ case 2:
+ ASSERT(src2.IsNone() && src3.IsNone());
+ stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
+ break;
+ case 3:
+ ASSERT(src3.IsNone());
+ stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
+ str(src0, MemOperand(StackPointer(), 2 * size));
+ break;
+ case 4:
+ // Skip over 4 * size, then fill in the gap. This allows four W registers
+ // to be pushed using csp, whilst maintaining 16-byte alignment for csp
+ // at all times.
+ stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
+ stp(src1, src0, MemOperand(StackPointer(), 2 * size));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PopHelper(int count, int size,
+ const CPURegister& dst0,
+ const CPURegister& dst1,
+ const CPURegister& dst2,
+ const CPURegister& dst3) {
+ // Ensure that we don't unintentially modify scratch or debug registers.
+ InstructionAccurateScope scope(this);
+
+ ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ ASSERT(size == dst0.SizeInBytes());
+
+ // When popping multiple registers, the load order is chosen such that
+ // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
+ switch (count) {
+ case 1:
+ ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+ ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
+ break;
+ case 2:
+ ASSERT(dst2.IsNone() && dst3.IsNone());
+ ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
+ break;
+ case 3:
+ ASSERT(dst3.IsNone());
+ ldr(dst2, MemOperand(StackPointer(), 2 * size));
+ ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
+ break;
+ case 4:
+ // Load the higher addresses first, then load the lower addresses and
+ // skip the whole block in the second instruction. This allows four W
+ // registers to be popped using csp, whilst maintaining 16-byte alignment
+ // for csp at all times.
+ ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
+ ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PrepareForPush(int count, int size) {
+ // TODO(jbramley): Use AssertStackConsistency here, if possible. See the
+ // AssertStackConsistency for details of why we can't at the moment.
+ if (csp.Is(StackPointer())) {
+ // If the current stack pointer is csp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ ASSERT((count * size) % 16 == 0);
+ } else {
+ // Even if the current stack pointer is not the system stack pointer (csp),
+ // the system stack pointer will still be modified in order to comply with
+ // ABI rules about accessing memory below the system stack pointer.
+ BumpSystemStackPointer(count * size);
+ }
+}
+
+
+void MacroAssembler::PrepareForPop(int count, int size) {
+ AssertStackConsistency();
+ if (csp.Is(StackPointer())) {
+ // If the current stack pointer is csp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ ASSERT((count * size) % 16 == 0);
+ }
+}
+
+
+void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ ASSERT(offset.immediate() >= 0);
+ } else if (emit_debug_code()) {
+ Cmp(xzr, offset);
+ Check(le, kStackAccessBelowStackPointer);
+ }
+
+ Str(src, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ ASSERT(offset.immediate() >= 0);
+ } else if (emit_debug_code()) {
+ Cmp(xzr, offset);
+ Check(le, kStackAccessBelowStackPointer);
+ }
+
+ Ldr(dst, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PokePair(const CPURegister& src1,
+ const CPURegister& src2,
+ int offset) {
+ ASSERT(AreSameSizeAndType(src1, src2));
+ ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
+ Stp(src1, src2, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PeekPair(const CPURegister& dst1,
+ const CPURegister& dst2,
+ int offset) {
+ ASSERT(AreSameSizeAndType(dst1, dst2));
+ ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
+ Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PushCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
+
+ // This method must not be called unless the current stack pointer is the
+ // system stack pointer (csp).
+ ASSERT(csp.Is(StackPointer()));
+
+ MemOperand tos(csp, -2 * kXRegSizeInBytes, PreIndex);
+
+ stp(d14, d15, tos);
+ stp(d12, d13, tos);
+ stp(d10, d11, tos);
+ stp(d8, d9, tos);
+
+ stp(x29, x30, tos);
+ stp(x27, x28, tos); // x28 = jssp
+ stp(x25, x26, tos);
+ stp(x23, x24, tos);
+ stp(x21, x22, tos);
+ stp(x19, x20, tos);
+}
+
+
+void MacroAssembler::PopCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
+
+ // This method must not be called unless the current stack pointer is the
+ // system stack pointer (csp).
+ ASSERT(csp.Is(StackPointer()));
+
+ MemOperand tos(csp, 2 * kXRegSizeInBytes, PostIndex);
+
+ ldp(x19, x20, tos);
+ ldp(x21, x22, tos);
+ ldp(x23, x24, tos);
+ ldp(x25, x26, tos);
+ ldp(x27, x28, tos); // x28 = jssp
+ ldp(x29, x30, tos);
+
+ ldp(d8, d9, tos);
+ ldp(d10, d11, tos);
+ ldp(d12, d13, tos);
+ ldp(d14, d15, tos);
+}
+
+
+void MacroAssembler::AssertStackConsistency() {
+ if (emit_debug_code() && !csp.Is(StackPointer())) {
+ if (csp.Is(StackPointer())) {
+ // TODO(jbramley): Check for csp alignment if it is the stack pointer.
+ } else {
+ // TODO(jbramley): Currently we cannot use this assertion in Push because
+ // some calling code assumes that the flags are preserved. For an example,
+ // look at Builtins::Generate_ArgumentsAdaptorTrampoline.
+ Cmp(csp, StackPointer());
+ Check(ls, kTheCurrentStackPointerIsBelowCsp);
+ }
+ }
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+ Heap::RootListIndex index) {
+ // TODO(jbramley): Most root values are constants, and can be synthesized
+ // without a load. Refer to the ARM back end for details.
+ Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index) {
+ Str(source, MemOperand(root, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::LoadTrueFalseRoots(Register true_root,
+ Register false_root) {
+ STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
+ Ldp(true_root, false_root,
+ MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ Mov(result, Operand(cell));
+ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ } else {
+ Mov(result, Operand(object));
+ }
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Map::EnumLengthBits::kMask);
+}
+
+
+void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
+}
+
+
+void MacroAssembler::CheckEnumCache(Register object,
+ Register null_value,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* call_runtime) {
+ ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
+ scratch3));
+
+ Register empty_fixed_array_value = scratch0;
+ Register current_object = scratch1;
+
+ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Label next, start;
+
+ Mov(current_object, object);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ Register map = scratch2;
+ Register enum_length = scratch3;
+ Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
+
+ EnumLengthUntagged(enum_length, map);
+ Cmp(enum_length, kInvalidEnumCacheSentinel);
+ B(eq, call_runtime);
+
+ B(&start);
+
+ Bind(&next);
+ Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
+
+ // For all objects but the receiver, check that the cache is empty.
+ EnumLengthUntagged(enum_length, map);
+ Cbnz(enum_length, call_runtime);
+
+ Bind(&start);
+
+ // Check that there are no elements. Register current_object contains the
+ // current JS object we've reached through the prototype chain.
+ Label no_elements;
+ Ldr(current_object, FieldMemOperand(current_object,
+ JSObject::kElementsOffset));
+ Cmp(current_object, empty_fixed_array_value);
+ B(eq, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
+ B(ne, call_runtime);
+
+ Bind(&no_elements);
+ Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
+ Cmp(current_object, null_value);
+ B(ne, &next);
+}
+
+
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ Add(scratch1, receiver,
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
+ Cmp(scratch1, Operand(new_space_start));
+ B(lt, no_memento_found);
+
+ Mov(scratch2, Operand(new_space_allocation_top));
+ Ldr(scratch2, MemOperand(scratch2));
+ Cmp(scratch1, scratch2);
+ B(gt, no_memento_found);
+
+ Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
+ Cmp(scratch1,
+ Operand(isolate()->factory()->allocation_memento_map()));
+}
+
+
+void MacroAssembler::JumpToHandlerEntry(Register exception,
+ Register object,
+ Register state,
+ Register scratch1,
+ Register scratch2) {
+ // Handler expects argument in x0.
+ ASSERT(exception.Is(x0));
+
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
+ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
+ Lsr(scratch2, state, StackHandler::kKindWidth);
+ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+ Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
+ Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
+ Br(scratch1);
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+ Condition cond,
+ Label* branch) {
+ ASSERT(cond == eq || cond == ne);
+ // Use Tmp1() to have a different destination register, as Tmp0() will be used
+ // for relocation.
+ And(Tmp1(), object, Operand(ExternalReference::new_space_mask(isolate())));
+ Cmp(Tmp1(), Operand(ExternalReference::new_space_start(isolate())));
+ B(cond, branch);
+}
+
+
+void MacroAssembler::Throw(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The handler expects the exception in x0.
+ ASSERT(value.Is(x0));
+
+ // Drop the stack pointer to the top of the top handler.
+ ASSERT(jssp.Is(StackPointer()));
+ Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
+ isolate())));
+ Ldr(jssp, MemOperand(scratch1));
+ // Restore the next handler.
+ Pop(scratch2);
+ Str(scratch2, MemOperand(scratch1));
+
+ // Get the code object and state. Restore the context and frame pointer.
+ Register object = scratch1;
+ Register state = scratch2;
+ Pop(object, state, cp, fp);
+
+ // If the handler is a JS frame, restore the context to the frame.
+ // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+ // or cp.
+ Label not_js_frame;
+ Cbz(cp, &not_js_frame);
+ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ Bind(&not_js_frame);
+
+ JumpToHandlerEntry(value, object, state, scratch3, scratch4);
+}
+
+
+void MacroAssembler::ThrowUncatchable(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The handler expects the exception in x0.
+ ASSERT(value.Is(x0));
+
+ // Drop the stack pointer to the top of the top stack handler.
+ ASSERT(jssp.Is(StackPointer()));
+ Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
+ isolate())));
+ Ldr(jssp, MemOperand(scratch1));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label fetch_next, check_kind;
+ B(&check_kind);
+ Bind(&fetch_next);
+ Peek(jssp, StackHandlerConstants::kNextOffset);
+
+ Bind(&check_kind);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+ Peek(scratch2, StackHandlerConstants::kStateOffset);
+ TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ Pop(scratch2);
+ Str(scratch2, MemOperand(scratch1));
+
+ // Get the code object and state. Clear the context and frame pointer (0 was
+ // saved in the handler).
+ Register object = scratch1;
+ Register state = scratch2;
+ Pop(object, state, cp, fp);
+
+ JumpToHandlerEntry(value, object, state, scratch3, scratch4);
+}
+
+
+void MacroAssembler::Throw(BailoutReason reason) {
+ Label throw_start;
+ Bind(&throw_start);
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ RecordComment("Throw message: ");
+ RecordComment((msg != NULL) ? msg : "UNKNOWN");
+#endif
+
+ Mov(x0, Operand(Smi::FromInt(reason)));
+ Push(x0);
+
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // ThrowMessage should not return here.
+ Unreachable();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label ok;
+ B(InvertCondition(cc), &ok);
+ Throw(reason);
+ Bind(&ok);
+}
+
+
+void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) {
+ Label ok;
+ JumpIfNotSmi(value, &ok);
+ Throw(reason);
+ Bind(&ok);
+}
+
+
+void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
+ ASSERT(smi.Is64Bits());
+ Abs(smi, smi, slow);
+}
+
+
+void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(eq, reason);
+ }
+}
+
+
+void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, reason);
+ }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ // TODO(jbramley): Add AbortIfSmi and related functions.
+ Label not_smi;
+ JumpIfNotSmi(object, &not_smi);
+ Abort(kOperandIsASmiAndNotAName);
+ Bind(&not_smi);
+
+ Ldr(Tmp1(), FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(Tmp1(), Tmp1(), LAST_NAME_TYPE);
+ Check(ls, kOperandIsNotAName);
+ }
+}
+
+
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ Register temp = Tmp1();
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, kOperandIsASmiAndNotAString);
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
+ Check(lo, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
+ // All arguments must be on the stack before this function is called.
+ // x0 holds the return value after the call.
+
+ // Check that the number of arguments matches what the function expects.
+ // If f->nargs is -1, the function can accept a variable number of arguments.
+ if (f->nargs >= 0 && f->nargs != num_arguments) {
+ // Illegal operation: drop the stack arguments and return undefined.
+ if (num_arguments > 0) {
+ Drop(num_arguments);
+ }
+ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ return;
+ }
+
+ // Place the necessary arguments.
+ Mov(x0, num_arguments);
+ Mov(x1, Operand(ExternalReference(f, isolate())));
+
+ CEntryStub stub(1, save_doubles);
+ CallStub(&stub);
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+void MacroAssembler::CallApiFunctionAndReturn(
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ int spill_offset,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
+ ASM_LOCATION("CallApiFunctionAndReturn");
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate());
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate()),
+ next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate()),
+ next_address);
+
+ ASSERT(function_address.is(x1) || function_address.is(x2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag = isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag));
+ Ldrb(w10, MemOperand(x10));
+ Cbz(w10, &profiler_disabled);
+ Mov(x3, Operand(thunk_ref));
+ B(&end_profiler_check);
+
+ Bind(&profiler_disabled);
+ Mov(x3, function_address);
+ Bind(&end_profiler_check);
+
+ // Save the callee-save registers we are going to use.
+ // TODO(all): Is this necessary? ARM doesn't do it.
+ STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
+ Poke(x19, (spill_offset + 0) * kXRegSizeInBytes);
+ Poke(x20, (spill_offset + 1) * kXRegSizeInBytes);
+ Poke(x21, (spill_offset + 2) * kXRegSizeInBytes);
+ Poke(x22, (spill_offset + 3) * kXRegSizeInBytes);
+
+ // Allocate HandleScope in callee-save registers.
+ // We will need to restore the HandleScope after the call to the API function,
+ // by allocating it in callee-save registers they will be preserved by C code.
+ Register handle_scope_base = x22;
+ Register next_address_reg = x19;
+ Register limit_reg = x20;
+ Register level_reg = w21;
+
+ Mov(handle_scope_base, Operand(next_address));
+ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ Add(level_reg, level_reg, 1);
+ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ Mov(x0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub;
+ stub.GenerateCall(this, x3);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ Mov(x0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ Ldr(x0, return_value_operand);
+ Bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ if (emit_debug_code()) {
+ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
+ Cmp(w1, level_reg);
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ Sub(level_reg, level_reg, 1);
+ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
+ Cmp(limit_reg, x1);
+ B(ne, &delete_allocated_handles);
+
+ Bind(&leave_exit_frame);
+ // Restore callee-saved registers.
+ Peek(x19, (spill_offset + 0) * kXRegSizeInBytes);
+ Peek(x20, (spill_offset + 1) * kXRegSizeInBytes);
+ Peek(x21, (spill_offset + 2) * kXRegSizeInBytes);
+ Peek(x22, (spill_offset + 3) * kXRegSizeInBytes);
+
+ // Check if the function scheduled an exception.
+ Mov(x5, Operand(ExternalReference::scheduled_exception_address(isolate())));
+ Ldr(x5, MemOperand(x5));
+ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
+ Bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ Ldr(cp, *context_restore_operand);
+ }
+
+ LeaveExitFrame(false, x1, !restore_context);
+ Drop(stack_space);
+ Ret();
+
+ Bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0);
+ }
+ B(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ Bind(&delete_allocated_handles);
+ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ // Save the return value in a callee-save register.
+ Register saved_result = x19;
+ Mov(saved_result, x0);
+ Mov(x0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(
+ ExternalReference::delete_handle_scope_extensions(isolate()), 1);
+ Mov(x0, saved_result);
+ B(&leave_exit_frame);
+}
+
+
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments) {
+ Mov(x0, num_arguments);
+ Mov(x1, Operand(ext));
+
+ CEntryStub stub(1);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+ Mov(x1, Operand(builtin));
+ CEntryStub stub(1);
+ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the builtins object into target register.
+ Ldr(target, GlobalObjectMemOperand());
+ Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ // Load the JavaScript builtin function from the builtins object.
+ Ldr(target, FieldMemOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ ASSERT(!target.is(x1));
+ GetBuiltinFunction(x1, id);
+ // Load the code entry point from the builtins object.
+ Ldr(target, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ ASM_LOCATION("MacroAssembler::InvokeBuiltin");
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ GetBuiltinEntry(x2, id);
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(x2));
+ Call(x2);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(x2);
+ }
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Mov(x0, num_arguments);
+ JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size) {
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
+}
+
+
+void MacroAssembler::InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(string, length, scratch1, scratch2));
+ LoadRoot(scratch2, map_index);
+ SmiTag(scratch1, length);
+ Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+
+ Mov(scratch2, String::kEmptyHashField);
+ Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
+ Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if V8_HOST_ARCH_A64
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one ARM
+ // platform for another ARM platform with a different alignment.
+ return OS::ActivationFrameAlignment();
+#else // V8_HOST_ARCH_A64
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // V8_HOST_ARCH_A64
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_of_reg_args) {
+ CallCFunction(function, num_of_reg_args, 0);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_of_reg_args,
+ int num_of_double_args) {
+ Mov(Tmp0(), Operand(function));
+ CallCFunction(Tmp0(), num_of_reg_args, num_of_double_args);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ int num_of_reg_args,
+ int num_of_double_args) {
+ ASSERT(has_frame());
+ // We can pass 8 integer arguments in registers. If we need to pass more than
+ // that, we'll need to implement support for passing them on the stack.
+ ASSERT(num_of_reg_args <= 8);
+
+ // If we're passing doubles, we're limited to the following prototypes
+ // (defined by ExternalReference::Type):
+ // BUILTIN_COMPARE_CALL: int f(double, double)
+ // BUILTIN_FP_FP_CALL: double f(double, double)
+ // BUILTIN_FP_CALL: double f(double)
+ // BUILTIN_FP_INT_CALL: double f(double, int)
+ if (num_of_double_args > 0) {
+ ASSERT(num_of_reg_args <= 1);
+ ASSERT((num_of_double_args + num_of_reg_args) <= 2);
+ }
+
+
+ // If the stack pointer is not csp, we need to derive an aligned csp from the
+ // current stack pointer.
+ const Register old_stack_pointer = StackPointer();
+ if (!csp.Is(old_stack_pointer)) {
+ AssertStackConsistency();
+
+ int sp_alignment = ActivationFrameAlignment();
+ // The ABI mandates at least 16-byte alignment.
+ ASSERT(sp_alignment >= 16);
+ ASSERT(IsPowerOf2(sp_alignment));
+
+ // The current stack pointer is a callee saved register, and is preserved
+ // across the call.
+ ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
+
+ // Align and synchronize the system stack pointer with jssp.
+ Bic(csp, old_stack_pointer, sp_alignment - 1);
+ SetStackPointer(csp);
+ }
+
+ // Call directly. The function called cannot cause a GC, or allow preemption,
+ // so the return address in the link register stays correct.
+ Call(function);
+
+ if (!csp.Is(old_stack_pointer)) {
+ if (emit_debug_code()) {
+ // Because the stack pointer must be aligned on a 16-byte boundary, the
+ // aligned csp can be up to 12 bytes below the jssp. This is the case
+ // where we only pushed one W register on top of an aligned jssp.
+ Register temp = Tmp1();
+ ASSERT(ActivationFrameAlignment() == 16);
+ Sub(temp, csp, old_stack_pointer);
+ // We want temp <= 0 && temp >= -12.
+ Cmp(temp, 0);
+ Ccmp(temp, -12, NFlag, le);
+ Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
+ }
+ SetStackPointer(old_stack_pointer);
+ }
+}
+
+
+void MacroAssembler::Jump(Register target) {
+ Br(target);
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
+ Mov(Tmp0(), Operand(target, rmode));
+ Br(Tmp0());
+}
+
+
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ AllowDeferredHandleDereference embedding_raw_address;
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+void MacroAssembler::Call(Register target) {
+ BlockConstPoolScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ Blr(target);
+
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
+#endif
+}
+
+
+void MacroAssembler::Call(Label* target) {
+ BlockConstPoolScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ Bl(target);
+
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
+#endif
+}
+
+
+// MacroAssembler::CallSize is sensitive to changes in this function, as it
+// requires to know how many instructions are used to branch to the target.
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
+ BlockConstPoolScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+ // Statement positions are expected to be recorded when the target
+ // address is loaded.
+ positions_recorder()->WriteRecordedPositions();
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ if (rmode == RelocInfo::NONE64) {
+ uint64_t imm = reinterpret_cast<uint64_t>(target);
+ movz(Tmp0(), (imm >> 0) & 0xffff, 0);
+ movk(Tmp0(), (imm >> 16) & 0xffff, 16);
+ movk(Tmp0(), (imm >> 32) & 0xffff, 32);
+ movk(Tmp0(), (imm >> 48) & 0xffff, 48);
+ } else {
+ LoadRelocated(Tmp0(), Operand(reinterpret_cast<intptr_t>(target), rmode));
+ }
+ Blr(Tmp0());
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
+#endif
+}
+
+
+void MacroAssembler::Call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
+ SetRecordedAstId(ast_id);
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
+
+ AllowDeferredHandleDereference embedding_raw_address;
+ Call(reinterpret_cast<Address>(code.location()), rmode);
+
+#ifdef DEBUG
+ // Check the size of the code generated.
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
+#endif
+}
+
+
+int MacroAssembler::CallSize(Register target) {
+ USE(target);
+ return kInstructionSize;
+}
+
+
+int MacroAssembler::CallSize(Label* target) {
+ USE(target);
+ return kInstructionSize;
+}
+
+
+int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
+ USE(target);
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ if (rmode == RelocInfo::NONE64) {
+ return kCallSizeWithoutRelocation;
+ } else {
+ return kCallSizeWithRelocation;
+ }
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+ USE(code);
+ USE(ast_id);
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ if (rmode == RelocInfo::NONE64) {
+ return kCallSizeWithoutRelocation;
+ } else {
+ return kCallSizeWithRelocation;
+ }
+}
+
+
+
+
+
+void MacroAssembler::JumpForHeapNumber(Register object,
+ Register heap_number_map,
+ Label* on_heap_number,
+ Label* on_not_heap_number) {
+ ASSERT(on_heap_number || on_not_heap_number);
+ // Tmp0() is used as a scratch register.
+ ASSERT(!AreAliased(Tmp0(), heap_number_map));
+ AssertNotSmi(object);
+
+ // Load the HeapNumber map if it is not passed.
+ if (heap_number_map.Is(NoReg)) {
+ heap_number_map = Tmp1();
+ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ } else {
+ // This assert clobbers Tmp0(), so do it before loading Tmp0() with the map.
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ }
+
+ Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ Cmp(Tmp0(), heap_number_map);
+
+ if (on_heap_number) {
+ B(eq, on_heap_number);
+ }
+ if (on_not_heap_number) {
+ B(ne, on_not_heap_number);
+ }
+}
+
+
+void MacroAssembler::JumpIfHeapNumber(Register object,
+ Label* on_heap_number,
+ Register heap_number_map) {
+ JumpForHeapNumber(object,
+ heap_number_map,
+ on_heap_number,
+ NULL);
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Label* on_not_heap_number,
+ Register heap_number_map) {
+ JumpForHeapNumber(object,
+ heap_number_map,
+ NULL,
+ on_not_heap_number);
+}
+
+
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
+
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
+ FixedArray::kLengthOffset));
+ Asr(mask, mask, 1); // Divide length by two.
+ Sub(mask, mask, 1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(kDoubleSize == (kWRegSizeInBytes * 2));
+ Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
+ Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
+ Eor(scratch1, scratch1, scratch2);
+ And(scratch1, scratch1, mask);
+
+ // Calculate address of entry in string cache: each entry consists of two
+ // pointer sized fields.
+ Add(scratch1, number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
+ Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ Fcmp(d0, d1);
+ B(ne, not_found);
+ B(&load_result_from_cache);
+
+ Bind(&is_smi);
+ Register scratch = scratch1;
+ And(scratch, mask, Operand::UntagSmi(object));
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ Add(scratch, number_string_cache,
+ Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ Cmp(object, probe);
+ B(ne, not_found);
+
+ // Get the result from the cache.
+ Bind(&load_result_from_cache);
+ Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
+ scratch1, scratch2);
+}
+
+
+void MacroAssembler::TryConvertDoubleToInt(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion,
+ Label* on_failed_conversion) {
+ // Convert to an int and back again, then compare with the original value.
+ Fcvtzs(as_int, value);
+ Scvtf(scratch_d, as_int);
+ Fcmp(value, scratch_d);
+
+ if (on_successful_conversion) {
+ B(on_successful_conversion, eq);
+ }
+ if (on_failed_conversion) {
+ B(on_failed_conversion, ne);
+ }
+}
+
+
+void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
+ Label* on_negative_zero) {
+ // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
+ // cause overflow.
+ Fmov(Tmp0(), input);
+ Cmp(Tmp0(), 1);
+ B(vs, on_negative_zero);
+}
+
+
+void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
+ // Clamp the value to [0..255].
+ Cmp(input.W(), Operand(input.W(), UXTB));
+ // If input < input & 0xff, it must be < 0, so saturate to 0.
+ Csel(output.W(), wzr, input.W(), lt);
+ // Create a constant 0xff.
+ Mov(WTmp0(), 255);
+ // If input > input & 0xff, it must be > 255, so saturate to 255.
+ Csel(output.W(), WTmp0(), output.W(), gt);
+}
+
+
+void MacroAssembler::ClampInt32ToUint8(Register in_out) {
+ ClampInt32ToUint8(in_out, in_out);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register output,
+ DoubleRegister input,
+ DoubleRegister dbl_scratch) {
+ // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
+ // - Inputs lower than 0 (including -infinity) produce 0.
+ // - Inputs higher than 255 (including +infinity) produce 255.
+ // Also, it seems that PIXEL types use round-to-nearest rather than
+ // round-towards-zero.
+
+ // Squash +infinity before the conversion, since Fcvtnu will normally
+ // convert it to 0.
+ Fmov(dbl_scratch, 255);
+ Fmin(dbl_scratch, dbl_scratch, input);
+
+ // Convert double to unsigned integer. Values less than zero become zero.
+ // Values greater than 255 have already been clamped to 255.
+ Fcvtnu(output, dbl_scratch);
+}
+
+
+void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in a tight loop.
+ ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, Tmp0(), Tmp1()));
+ ASSERT(count >= 2);
+
+ const Register& remaining = scratch3;
+ Mov(remaining, count / 2);
+
+ // Only use the Assembler, so we can use Tmp0() and Tmp1().
+ InstructionAccurateScope scope(this);
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ sub(dst_untagged, dst, kHeapObjectTag);
+ sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields in pairs.
+ Label loop;
+ bind(&loop);
+ ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2,
+ PostIndex));
+ stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2,
+ PostIndex));
+ sub(remaining, remaining, 1);
+ cbnz(remaining, &loop);
+
+ // Handle the leftovers.
+ if (count & 1) {
+ ldr(Tmp0(), MemOperand(src_untagged));
+ str(Tmp0(), MemOperand(dst_untagged));
+ }
+}
+
+
+void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ ASSERT(!AreAliased(dst, src, scratch1, scratch2, Tmp0(), Tmp1()));
+
+ // Only use the Assembler, so we can use Tmp0() and Tmp1().
+ InstructionAccurateScope scope(this);
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ sub(dst_untagged, dst, kHeapObjectTag);
+ sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields in pairs.
+ for (unsigned i = 0; i < count / 2; i++) {
+ ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2,
+ PostIndex));
+ stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2,
+ PostIndex));
+ }
+
+ // Handle the leftovers.
+ if (count & 1) {
+ ldr(Tmp0(), MemOperand(src_untagged));
+ str(Tmp0(), MemOperand(dst_untagged));
+ }
+}
+
+
+void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ ASSERT(!AreAliased(dst, src, scratch1, Tmp0(), Tmp1()));
+
+ // Only use the Assembler, so we can use Tmp0() and Tmp1().
+ InstructionAccurateScope scope(this);
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = Tmp1();
+ sub(dst_untagged, dst, kHeapObjectTag);
+ sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields one by one.
+ for (unsigned i = 0; i < count; i++) {
+ ldr(Tmp0(), MemOperand(src_untagged, kXRegSizeInBytes, PostIndex));
+ str(Tmp0(), MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex));
+ }
+}
+
+
+void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
+ unsigned count) {
+ // One of two methods is used:
+ //
+ // For high 'count' values where many scratch registers are available:
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in a tight loop.
+ //
+ // For low 'count' values or where few scratch registers are available:
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ //
+ // In both cases, fields are copied in pairs if possible, and left-overs are
+ // handled separately.
+ ASSERT(!temps.IncludesAliasOf(dst));
+ ASSERT(!temps.IncludesAliasOf(src));
+ ASSERT(!temps.IncludesAliasOf(Tmp0()));
+ ASSERT(!temps.IncludesAliasOf(Tmp1()));
+ ASSERT(!temps.IncludesAliasOf(xzr));
+ ASSERT(!AreAliased(dst, src, Tmp0(), Tmp1()));
+
+ if (emit_debug_code()) {
+ Cmp(dst, src);
+ Check(ne, kTheSourceAndDestinationAreTheSame);
+ }
+
+ // The value of 'count' at which a loop will be generated (if there are
+ // enough scratch registers).
+ static const unsigned kLoopThreshold = 8;
+
+ ASSERT(!temps.IsEmpty());
+ Register scratch1 = Register(temps.PopLowestIndex());
+ Register scratch2 = Register(temps.PopLowestIndex());
+ Register scratch3 = Register(temps.PopLowestIndex());
+
+ if (scratch3.IsValid() && (count >= kLoopThreshold)) {
+ CopyFieldsLoopPairsHelper(dst, src, count, scratch1, scratch2, scratch3);
+ } else if (scratch2.IsValid()) {
+ CopyFieldsUnrolledPairsHelper(dst, src, count, scratch1, scratch2);
+ } else if (scratch1.IsValid()) {
+ CopyFieldsUnrolledHelper(dst, src, count, scratch1);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::CopyBytes(Register dst,
+ Register src,
+ Register length,
+ Register scratch,
+ CopyHint hint) {
+ ASSERT(!AreAliased(src, dst, length, scratch));
+
+ // TODO(all): Implement a faster copy function, and use hint to determine
+ // which algorithm to use for copies.
+ if (emit_debug_code()) {
+ // Check copy length.
+ Cmp(length, 0);
+ Assert(ge, kUnexpectedNegativeValue);
+
+ // Check src and dst buffers don't overlap.
+ Add(scratch, src, length); // Calculate end of src buffer.
+ Cmp(scratch, dst);
+ Add(scratch, dst, length); // Calculate end of dst buffer.
+ Ccmp(scratch, src, ZFlag, gt);
+ Assert(le, kCopyBuffersOverlap);
+ }
+
+ Label loop, done;
+ Cbz(length, &done);
+
+ Bind(&loop);
+ Sub(length, length, 1);
+ Ldrb(scratch, MemOperand(src, 1, PostIndex));
+ Strb(scratch, MemOperand(dst, 1, PostIndex));
+ Cbnz(length, &loop);
+ Bind(&done);
+}
+
+
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ B(&entry);
+ Bind(&loop);
+ // TODO(all): consider using stp here.
+ Str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
+ Bind(&entry);
+ Cmp(start_offset, end_offset);
+ B(lt, &loop);
+}
+
+
+void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure,
+ SmiCheckType smi_check) {
+
+ if (smi_check == DO_SMI_CHECK) {
+ JumpIfEitherSmi(first, second, failure);
+ } else if (emit_debug_code()) {
+ ASSERT(smi_check == DONT_DO_SMI_CHECK);
+ Label not_smi;
+ JumpIfEitherSmi(first, second, NULL, &not_smi);
+
+ // At least one input is a smi, but the flags indicated a smi check wasn't
+ // needed.
+ Abort(kUnexpectedSmi);
+
+ Bind(&not_smi);
+ }
+
+ // Test that both first and second are sequential ASCII strings.
+ Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+ JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
+ scratch2,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ ASSERT(!AreAliased(scratch1, second));
+ ASSERT(!AreAliased(scratch1, scratch2));
+ static const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ And(scratch1, first, kFlatAsciiStringMask);
+ And(scratch2, second, kFlatAsciiStringMask);
+ Cmp(scratch1, kFlatAsciiStringTag);
+ Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ And(scratch, type, kFlatAsciiStringMask);
+ Cmp(scratch, kFlatAsciiStringTag);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ ASSERT(!AreAliased(first, second, scratch1, scratch2));
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ And(scratch1, first, kFlatAsciiStringMask);
+ And(scratch2, second, kFlatAsciiStringMask);
+ Cmp(scratch1, kFlatAsciiStringTag);
+ Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Register type,
+ Label* not_unique_name) {
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
+ // continue
+ // } else {
+ // goto not_unique_name
+ // }
+ Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
+ Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
+ B(ne, not_unique_name);
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag,
+ bool* definitely_mismatches,
+ const CallWrapper& call_wrapper) {
+ bool definitely_matches = false;
+ *definitely_mismatches = false;
+ Label regular_invoke;
+
+ // Check whether the expected and actual arguments count match. If not,
+ // setup registers according to contract with ArgumentsAdaptorTrampoline:
+ // x0: actual arguments count.
+ // x1: function (passed through to callee).
+ // x2: expected arguments count.
+
+ // The code below is made a lot easier because the calling code already sets
+ // up actual and expected registers according to the contract if values are
+ // passed in registers.
+ ASSERT(actual.is_immediate() || actual.reg().is(x0));
+ ASSERT(expected.is_immediate() || expected.reg().is(x2));
+ ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
+
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+
+ } else {
+ Mov(x0, actual.immediate());
+ if (expected.immediate() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Don't worry about adapting arguments for builtins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ *definitely_mismatches = true;
+ // Set up x2 for the argument adaptor.
+ Mov(x2, expected.immediate());
+ }
+ }
+
+ } else { // expected is a register.
+ Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
+ : Operand(actual.reg());
+ // If actual == expected perform a regular invocation.
+ Cmp(expected.reg(), actual_op);
+ B(eq, &regular_invoke);
+ // Otherwise set up x0 for the argument adaptor.
+ Mov(x0, actual_op);
+ }
+
+ // If the argument counts may mismatch, generate a call to the argument
+ // adaptor.
+ if (!definitely_matches) {
+ if (!code_constant.is_null()) {
+ Mov(x3, Operand(code_constant));
+ Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
+ }
+
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor));
+ Call(adaptor);
+ call_wrapper.AfterCall();
+ if (!*definitely_mismatches) {
+ // If the arg counts don't match, no extra code is emitted by
+ // MAsm::InvokeCode and we can just fall through.
+ B(done);
+ }
+ } else {
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ }
+ Bind(&regular_invoke);
+}
+
+
+void MacroAssembler::InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ Label done;
+
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+ &definitely_mismatches, call_wrapper);
+
+ // If we are certain that actual != expected, then we know InvokePrologue will
+ // have handled the call through the argument adaptor mechanism.
+ // The called function expects the call kind in x5.
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ Call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(code);
+ }
+ }
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ Bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ ASSERT(function.is(x1));
+
+ Register expected_reg = x2;
+ Register code_reg = x3;
+
+ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // The number of arguments is stored as an int32_t, and -1 is a marker
+ // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
+ // extension to correctly handle it.
+ Ldr(expected_reg, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+ Ldrsw(expected_reg,
+ FieldMemOperand(expected_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ Ldr(code_reg,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+
+ ParameterCount expected(expected_reg);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ ASSERT(function.Is(x1));
+
+ Register code_reg = x3;
+
+ // Set up the context.
+ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ __ LoadObject(x1, function);
+ InvokeFunction(x1, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiValueSize == 32);
+
+ // Try to convert with a FPU convert instruction. It's trivial to compute
+ // the modulo operation on an integer register so we convert to a 64-bit
+ // integer, then find the 32-bit result from that.
+ //
+ // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
+ // when the double is out of range. NaNs and infinities will be converted to 0
+ // (as ECMA-262 requires).
+ Fcvtzs(result, double_input);
+
+ // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
+ // representable using a double, so if the result is one of those then we know
+ // that saturation occured, and we need to manually handle the conversion.
+ //
+ // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
+ // 1 will cause signed overflow.
+ Cmp(result, 1);
+ Ccmp(result, -1, VFlag, vc);
+
+ B(vc, done);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DoubleRegister double_input) {
+ Label done;
+ ASSERT(jssp.Is(StackPointer()));
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Push(lr);
+ Push(double_input); // Put input on stack.
+
+ DoubleToIStub stub(jssp,
+ result,
+ 0,
+ true, // is_truncating
+ true); // skip_fastpath
+ CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
+
+ Drop(1, kDoubleSize); // Drop the double input on the stack.
+ Pop(lr);
+
+ Bind(&done);
+
+ // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed:
+ // https://code.google.com/p/v8/issues/detail?id=3149
+ Sxtw(result, result.W());
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result,
+ Register object) {
+ Label done;
+ ASSERT(!result.is(object));
+ ASSERT(jssp.Is(StackPointer()));
+
+ Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+ TryInlineTruncateDoubleToI(result, fp_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Push(lr);
+ DoubleToIStub stub(object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true, // is_truncating
+ true); // skip_fastpath
+ CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
+ Pop(lr);
+
+ Bind(&done);
+
+ // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed:
+ // https://code.google.com/p/v8/issues/detail?id=3149
+ Sxtw(result, result.W());
+}
+
+
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ ASSERT(StackPointer().Is(jssp));
+ // TODO(jbramley): Does x1 contain a JSFunction here, or does it already
+ // have the special STUB smi?
+ __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ // Compiled stubs don't age, and so they don't need the predictable code
+ // ageing sequence.
+ __ Push(lr, fp, cp, Tmp0());
+ __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ if (isolate()->IsCodePreAgingActive()) {
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ __ EmitCodeAgeSequence(stub);
+ } else {
+ __ EmitFrameSetupForCodeAgePatching();
+ }
+ }
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+ ASSERT(jssp.Is(StackPointer()));
+ Push(lr, fp, cp);
+ Mov(Tmp1(), Operand(Smi::FromInt(type)));
+ Mov(Tmp0(), Operand(CodeObject()));
+ Push(Tmp1(), Tmp0());
+ // jssp[4] : lr
+ // jssp[3] : fp
+ // jssp[2] : cp
+ // jssp[1] : type
+ // jssp[0] : code object
+
+ // Adjust FP to point to saved FP.
+ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+ ASSERT(jssp.Is(StackPointer()));
+ // Drop the execution stack down to the frame pointer and restore
+ // the caller frame pointer and return address.
+ Mov(jssp, fp);
+ AssertStackConsistency();
+ Pop(fp, lr);
+}
+
+
+void MacroAssembler::ExitFramePreserveFPRegs() {
+ PushCPURegList(kCallerSavedFP);
+}
+
+
+void MacroAssembler::ExitFrameRestoreFPRegs() {
+ // Read the registers from the stack without popping them. The stack pointer
+ // will be reset as part of the unwinding process.
+ CPURegList saved_fp_regs = kCallerSavedFP;
+ ASSERT(saved_fp_regs.Count() % 2 == 0);
+
+ int offset = ExitFrameConstants::kLastExitFrameField;
+ while (!saved_fp_regs.IsEmpty()) {
+ const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
+ const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
+ offset -= 2 * kDRegSizeInBytes;
+ Ldp(dst1, dst0, MemOperand(fp, offset));
+ }
+}
+
+
+// TODO(jbramley): Check that we're handling the frame pointer correctly.
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+ const Register& scratch,
+ int extra_space) {
+ ASSERT(jssp.Is(StackPointer()));
+
+ // Set up the new stack frame.
+ Mov(scratch, Operand(CodeObject()));
+ Push(lr, fp);
+ Mov(fp, StackPointer());
+ Push(xzr, scratch);
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // jssp -> fp[-16]: CodeObject()
+ STATIC_ASSERT((2 * kPointerSize) ==
+ ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
+ STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
+ STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
+
+ // Save the frame pointer and context pointer in the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ isolate())));
+ Str(fp, MemOperand(scratch));
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Str(cp, MemOperand(scratch));
+
+ STATIC_ASSERT((-2 * kPointerSize) ==
+ ExitFrameConstants::kLastExitFrameField);
+ if (save_doubles) {
+ ExitFramePreserveFPRegs();
+ }
+
+ // Reserve space for the return address and for user requested memory.
+ // We do this before aligning to make sure that we end up correctly
+ // aligned with the minimum of wasted space.
+ Claim(extra_space + 1, kXRegSizeInBytes);
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // jssp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // jssp[8]: Extra space reserved for caller (if extra_space != 0).
+ // jssp -> jssp[0]: Space reserved for the return address.
+
+ // Align and synchronize the system stack pointer with jssp.
+ AlignAndSetCSPForFrame();
+ ASSERT(csp.Is(StackPointer()));
+
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // csp[...]: Saved doubles, if saved_doubles is true.
+ // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // Alignment padding, if necessary.
+ // csp -> csp[0]: Space reserved for the return address.
+
+ // ExitFrame::GetStateForFramePointer expects to find the return address at
+ // the memory address immediately below the pointer stored in SPOffset.
+ // It is not safe to derive much else from SPOffset, because the size of the
+ // padding can vary.
+ Add(scratch, csp, kXRegSizeInBytes);
+ Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+
+// Leave the current exit frame.
+void MacroAssembler::LeaveExitFrame(bool restore_doubles,
+ const Register& scratch,
+ bool restore_context) {
+ ASSERT(csp.Is(StackPointer()));
+
+ if (restore_doubles) {
+ ExitFrameRestoreFPRegs();
+ }
+
+ // Restore the context pointer from the top frame.
+ if (restore_context) {
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Ldr(cp, MemOperand(scratch));
+ }
+
+ if (emit_debug_code()) {
+ // Also emit debug code to clear the cp in the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Str(xzr, MemOperand(scratch));
+ }
+ // Clear the frame pointer from the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ isolate())));
+ Str(xzr, MemOperand(scratch));
+
+ // Pop the exit frame.
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[...]: The rest of the frame.
+ Mov(jssp, fp);
+ SetStackPointer(jssp);
+ AssertStackConsistency();
+ Pop(fp, lr);
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Mov(scratch1, value);
+ Mov(scratch2, Operand(ExternalReference(counter)));
+ Str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ ASSERT(value != 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Mov(scratch2, Operand(ExternalReference(counter)));
+ Ldr(scratch1, MemOperand(scratch2));
+ Add(scratch1, scratch1, value);
+ Str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ IncrementCounter(counter, -value, scratch1, scratch2);
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ for (int i = 1; i < context_chain_length; i++) {
+ Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ }
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in cp).
+ Mov(dst, cp);
+ }
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::DebugBreak() {
+ Mov(x0, 0);
+ Mov(x1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+ CEntryStub ces(1);
+ ASSERT(AllowThisStubCall(&ces));
+ Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
+}
+#endif
+
+
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+ int handler_index) {
+ ASSERT(jssp.Is(StackPointer()));
+ // Adjust this code if the asserts don't hold.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // For the JSEntry handler, we must preserve the live registers x0-x4.
+ // (See JSEntryStub::GenerateBody().)
+
+ unsigned state =
+ StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
+
+ // Set up the code object and the state for pushing.
+ Mov(x10, Operand(CodeObject()));
+ Mov(x11, state);
+
+ // Push the frame pointer, context, state, and code object.
+ if (kind == StackHandler::JS_ENTRY) {
+ ASSERT(Smi::FromInt(0) == 0);
+ Push(xzr, xzr, x11, x10);
+ } else {
+ Push(fp, cp, x11, x10);
+ }
+
+ // Link the current handler as the next handler.
+ Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ Ldr(x10, MemOperand(x11));
+ Push(x10);
+ // Set this new handler as the current one.
+ Str(jssp, MemOperand(x11));
+}
+
+
+void MacroAssembler::PopTryHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ Pop(x10);
+ Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ Drop(StackHandlerConstants::kSize - kXRegSizeInBytes, kByteSizeInBytes);
+ Str(x10, MemOperand(x11));
+}
+
+
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ // We apply salt to the original zap value to easily spot the values.
+ Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
+ Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
+ }
+ B(gc_required);
+ return;
+ }
+
+ ASSERT(!AreAliased(result, scratch1, scratch2, Tmp0(), Tmp1()));
+ ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits() &&
+ Tmp0().Is64Bits() && Tmp1().Is64Bits());
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ ASSERT(0 == (object_size & kObjectAlignmentMask));
+
+ // Check relative positions of allocation top and limit addresses.
+ // The values must be adjacent in memory to allow the use of LDP.
+ ExternalReference heap_allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference heap_allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register top_address = scratch1;
+ Register allocation_limit = scratch2;
+ Mov(top_address, Operand(heap_allocation_top));
+
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and the allocation limit.
+ Ldp(result, allocation_limit, MemOperand(top_address));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry.
+ Ldr(Tmp0(), MemOperand(top_address));
+ Cmp(result, Tmp0());
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load the allocation limit. 'result' already contains the allocation top.
+ Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ }
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on A64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ // Calculate new top and bail out if new space is exhausted.
+ Adds(Tmp1(), result, object_size);
+ B(vs, gc_required);
+ Cmp(Tmp1(), allocation_limit);
+ B(hi, gc_required);
+ Str(Tmp1(), MemOperand(top_address));
+
+ // Tag the object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Orr(result, result, kHeapObjectTag);
+ }
+}
+
+
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ // We apply salt to the original zap value to easily spot the values.
+ Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
+ Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
+ }
+ B(gc_required);
+ return;
+ }
+
+ ASSERT(!AreAliased(object_size, result, scratch1, scratch2, Tmp0(), Tmp1()));
+ ASSERT(object_size.Is64Bits() && result.Is64Bits() && scratch1.Is64Bits() &&
+ scratch2.Is64Bits() && Tmp0().Is64Bits() && Tmp1().Is64Bits());
+
+ // Check relative positions of allocation top and limit addresses.
+ // The values must be adjacent in memory to allow the use of LDP.
+ ExternalReference heap_allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference heap_allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register top_address = scratch1;
+ Register allocation_limit = scratch2;
+ Mov(top_address, Operand(heap_allocation_top));
+
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and the allocation limit.
+ Ldp(result, allocation_limit, MemOperand(top_address));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry.
+ Ldr(Tmp0(), MemOperand(top_address));
+ Cmp(result, Tmp0());
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load the allocation limit. 'result' already contains the allocation top.
+ Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ }
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on A64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ // Calculate new top and bail out if new space is exhausted
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ Adds(Tmp1(), result, Operand(object_size, LSL, kPointerSizeLog2));
+ } else {
+ Adds(Tmp1(), result, object_size);
+ }
+
+ if (emit_debug_code()) {
+ Tst(Tmp1(), kObjectAlignmentMask);
+ Check(eq, kUnalignedAllocationInNewSpace);
+ }
+
+ B(vs, gc_required);
+ Cmp(Tmp1(), allocation_limit);
+ B(hi, gc_required);
+ Str(Tmp1(), MemOperand(top_address));
+
+ // Tag the object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Orr(result, result, kHeapObjectTag);
+ }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ // Make sure the object has no tag before resetting top.
+ Bic(object, object, kHeapObjectTagMask);
+#ifdef DEBUG
+ // Check that the object un-allocated is below the current top.
+ Mov(scratch, Operand(new_space_allocation_top));
+ Ldr(scratch, MemOperand(scratch));
+ Cmp(object, scratch);
+ Check(lt, kUndoAllocationOfNonAllocatedMemory);
+#endif
+ // Write the address of the object to un-allocate as the current top.
+ Mov(scratch, Operand(new_space_allocation_top));
+ Str(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ Add(scratch1, length, length); // Length in bytes, not chars.
+ Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
+ Bic(scratch1, scratch1, kObjectAlignmentMask);
+
+ // Allocate two-byte string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT(kCharSize == 1);
+ Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
+ Bic(scratch1, scratch1, kObjectAlignmentMask);
+
+ // Allocate ASCII string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+ Mov(scratch1, Operand(high_promotion_mode));
+ Ldr(scratch1, MemOperand(scratch1));
+ Cbz(scratch1, &allocate_new_space);
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ B(&install_map);
+
+ Bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ Bind(&install_map);
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kSlicedStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kSlicedAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+// Allocates a heap number or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map) {
+ // Allocate an object in the heap for the heap number and tag it as a heap
+ // object.
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ // Store heap number map in the allocated object.
+ if (heap_number_map.Is(NoReg)) {
+ heap_number_map = scratch1;
+ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ }
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+}
+
+
+void MacroAssembler::AllocateHeapNumberWithValue(Register result,
+ DoubleRegister value,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map) {
+ // TODO(all): Check if it would be more efficient to use STP to store both
+ // the map and the value.
+ AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map);
+ Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+void MacroAssembler::JumpIfObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_cond_pass,
+ Condition cond) {
+ CompareObjectType(object, map, type_reg, type);
+ B(cond, if_cond_pass);
+}
+
+
+void MacroAssembler::JumpIfNotObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_not_object) {
+ JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
+}
+
+
+// Sets condition flags based on comparison, and returns type in type_reg.
+void MacroAssembler::CompareObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type) {
+ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(map, type_reg, type);
+}
+
+
+// Sets condition flags based on comparison, and returns type in type_reg.
+void MacroAssembler::CompareInstanceType(Register map,
+ Register type_reg,
+ InstanceType type) {
+ Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Cmp(type_reg, type);
+}
+
+
+void MacroAssembler::CompareMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* early_success) {
+ // TODO(jbramley): The early_success label isn't used. Remove it.
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareMap(scratch, map, early_success);
+}
+
+
+void MacroAssembler::CompareMap(Register obj_map,
+ Handle<Map> map,
+ Label* early_success) {
+ // TODO(jbramley): The early_success label isn't used. Remove it.
+ Cmp(obj_map, Operand(map));
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+
+ Label success;
+ CompareMap(obj, scratch, map, &success);
+ B(ne, fail);
+ Bind(&success);
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ JumpIfNotRoot(scratch, index, fail);
+}
+
+
+void MacroAssembler::CheckMap(Register obj_map,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj_map, fail);
+ }
+ Label success;
+ CompareMap(obj_map, map, &success);
+ B(ne, fail);
+ Bind(&success);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Cmp(scratch, Operand(map));
+ B(ne, &fail);
+ Jump(success, RelocInfo::CODE_TARGET);
+ Bind(&fail);
+}
+
+
+void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
+ Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ Ldrb(Tmp0(), FieldMemOperand(Tmp0(), Map::kBitFieldOffset));
+ Tst(Tmp0(), mask);
+}
+
+
+void MacroAssembler::LoadElementsKind(Register result, Register object) {
+ // Load map.
+ __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
+ // Load the map's "bit field 2".
+ __ Ldrb(result, FieldMemOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ BoundFunctionAction action) {
+ ASSERT(!AreAliased(function, result, scratch));
+
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
+
+ // Check that the function really is a function. Load map into result reg.
+ JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
+
+ if (action == kMissOnBoundFunction) {
+ Register scratch_w = scratch.W();
+ Ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ // On 64-bit platforms, compiler hints field is not a smi. See definition of
+ // kCompilerHintsOffset in src/objects.h.
+ Ldr(scratch_w,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
+ }
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ Ldr(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and simply
+ // miss the cache instead. This will allow us to allocate a prototype object
+ // on-demand in the runtime system.
+ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
+
+ // Get the prototype from the initial map.
+ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ Bind(&non_instance);
+ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ Bind(&done);
+}
+
+
+void MacroAssembler::CompareRoot(const Register& obj,
+ Heap::RootListIndex index) {
+ ASSERT(!AreAliased(obj, Tmp0()));
+ LoadRoot(Tmp0(), index);
+ Cmp(obj, Tmp0());
+}
+
+
+void MacroAssembler::JumpIfRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_equal) {
+ CompareRoot(obj, index);
+ B(eq, if_equal);
+}
+
+
+void MacroAssembler::JumpIfNotRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_not_equal) {
+ CompareRoot(obj, index);
+ B(ne, if_not_equal);
+}
+
+
+void MacroAssembler::CompareAndSplit(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if ((if_true == if_false) && (if_false == fall_through)) {
+ // Fall through.
+ } else if (if_true == if_false) {
+ B(if_true);
+ } else if (if_false == fall_through) {
+ CompareAndBranch(lhs, rhs, cond, if_true);
+ } else if (if_true == fall_through) {
+ CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false);
+ } else {
+ CompareAndBranch(lhs, rhs, cond, if_true);
+ B(if_false);
+ }
+}
+
+
+void MacroAssembler::TestAndSplit(const Register& reg,
+ uint64_t bit_pattern,
+ Label* if_all_clear,
+ Label* if_any_set,
+ Label* fall_through) {
+ if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
+ // Fall through.
+ } else if (if_all_clear == if_any_set) {
+ B(if_all_clear);
+ } else if (if_all_clear == fall_through) {
+ TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
+ } else if (if_any_set == fall_through) {
+ TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
+ } else {
+ TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
+ B(if_all_clear);
+ }
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
+ B(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ // If cond==ls, set cond=hi, otherwise compare.
+ Ccmp(scratch,
+ Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
+ B(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastSmiElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Cmp(scratch, Map::kMaximumBitField2FastHoleySmiElementValue);
+ B(hi, fail);
+}
+
+
+// Note: The ARM version of this clobbers elements_reg, but this version does
+// not. Some uses of this in A64 assume that elements_reg will be preserved.
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ FPRegister fpscratch1,
+ FPRegister fpscratch2,
+ Label* fail,
+ int elements_offset) {
+ ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
+ Label store_num;
+
+ // Speculatively convert the smi to a double - all smis can be exactly
+ // represented as a double.
+ SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
+
+ // If value_reg is a smi, we're done.
+ JumpIfSmi(value_reg, &store_num);
+
+ // Ensure that the object is a heap number.
+ CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
+ fail, DONT_DO_SMI_CHECK);
+
+ Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+
+ // Check for NaN by comparing the number to itself: NaN comparison will
+ // report unordered, indicated by the overflow flag being set.
+ Fcmp(fpscratch1, fpscratch1);
+ Fcsel(fpscratch1, fpscratch2, fpscratch1, vs);
+
+ // Store the result.
+ Bind(&store_num);
+ Add(scratch1, elements_reg,
+ Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
+ Str(fpscratch1,
+ FieldMemOperand(scratch1,
+ FixedDoubleArray::kHeaderSize - elements_offset));
+}
+
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ STATIC_ASSERT(kSmiTag == 0);
+ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
+ SmiTag(index, hash);
+}
+
+
+void MacroAssembler::EmitSeqStringSetCharCheck(
+ Register string,
+ Register index,
+ SeqStringSetCharCheckIndexType index_type,
+ Register scratch,
+ uint32_t encoding_mask) {
+ ASSERT(!AreAliased(string, index, scratch));
+
+ if (index_type == kIndexIsSmi) {
+ AssertSmi(index);
+ }
+
+ // Check that string is an object.
+ AssertNotSmi(string, kNonObject);
+
+ // Check that string has an appropriate map.
+ Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
+ Cmp(scratch, encoding_mask);
+ Check(eq, kUnexpectedStringType);
+
+ Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
+ Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
+ Check(lt, kIndexIsTooLarge);
+
+ ASSERT_EQ(0, Smi::FromInt(0));
+ Cmp(index, 0);
+ Check(ge, kIndexIsNegative);
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss) {
+ // TODO(jbramley): Sort out the uses of Tmp0() and Tmp1() in this function.
+ // The ARM version takes two scratch registers, and that should be enough for
+ // all of the checks.
+
+ Label same_contexts;
+
+ ASSERT(!AreAliased(holder_reg, scratch));
+
+ // Load current lexical context from the stack frame.
+ Ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+ Cmp(scratch, 0);
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
+#endif
+
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
+ Ldr(scratch, FieldMemOperand(scratch, offset));
+ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // Read the first word and compare to the global_context_map.
+ Register temp = Tmp1();
+ Ldr(temp, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ CompareRoot(temp, Heap::kNativeContextMapRootIndex);
+ Check(eq, kExpectedNativeContext);
+ }
+
+ // Check if both contexts are the same.
+ ldr(Tmp0(), FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ cmp(scratch, Tmp0());
+ b(&same_contexts, eq);
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // Move Tmp0() into a different register, as CompareRoot will use it.
+ Register temp = Tmp1();
+ mov(temp, Tmp0());
+ CompareRoot(temp, Heap::kNullValueRootIndex);
+ Check(ne, kExpectedNonNullContext);
+
+ Ldr(temp, FieldMemOperand(temp, HeapObject::kMapOffset));
+ CompareRoot(temp, Heap::kNativeContextMapRootIndex);
+ Check(eq, kExpectedNativeContext);
+
+ // Let's consider that Tmp0() has been cloberred by the MacroAssembler.
+ // We reload it with its value.
+ ldr(Tmp0(), FieldMemOperand(holder_reg,
+ JSGlobalProxy::kNativeContextOffset));
+ }
+
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ int token_offset = Context::kHeaderSize +
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+ ldr(scratch, FieldMemOperand(scratch, token_offset));
+ ldr(Tmp0(), FieldMemOperand(Tmp0(), token_offset));
+ cmp(scratch, Tmp0());
+ b(miss, ne);
+
+ bind(&same_contexts);
+}
+
+
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
+void MacroAssembler::GetNumberHash(Register key, Register scratch) {
+ ASSERT(!AreAliased(key, scratch));
+
+ // Xor original key with a seed.
+ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ Eor(key, key, Operand::UntagSmi(scratch));
+
+ // The algorithm uses 32-bit integer values.
+ key = key.W();
+ scratch = scratch.W();
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash <<1 15);
+ Mvn(scratch, key);
+ Add(key, scratch, Operand(key, LSL, 15));
+ // hash = hash ^ (hash >> 12);
+ Eor(key, key, Operand(key, LSR, 12));
+ // hash = hash + (hash << 2);
+ Add(key, key, Operand(key, LSL, 2));
+ // hash = hash ^ (hash >> 4);
+ Eor(key, key, Operand(key, LSR, 4));
+ // hash = hash * 2057;
+ Mov(scratch, Operand(key, LSL, 11));
+ Add(key, key, Operand(key, LSL, 3));
+ Add(key, key, scratch);
+ // hash = hash ^ (hash >> 16);
+ Eor(key, key, Operand(key, LSR, 16));
+}
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
+
+ Label done;
+
+ SmiUntag(scratch0, key);
+ GetNumberHash(scratch0, scratch1);
+
+ // Compute the capacity mask.
+ Ldrsw(scratch1,
+ UntagSmiFieldMemOperand(elements,
+ SeededNumberDictionary::kCapacityOffset));
+ Sub(scratch1, scratch1, 1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
+ } else {
+ Mov(scratch2, scratch0);
+ }
+ And(scratch2, scratch2, scratch1);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+ // Check if the key is identical to the name.
+ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
+ Ldr(scratch3,
+ FieldMemOperand(scratch2,
+ SeededNumberDictionary::kElementsStartOffset));
+ Cmp(key, scratch3);
+ if (i != (kNumberDictionaryProbes - 1)) {
+ B(eq, &done);
+ } else {
+ B(ne, miss);
+ }
+ }
+
+ Bind(&done);
+ // Check that the value is a normal property.
+ const int kDetailsOffset =
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+ TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+ Ldr(result, FieldMemOperand(scratch2, kValueOffset));
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address,
+ Register scratch,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ ASSERT(!AreAliased(object, address, scratch));
+ Label done, store_buffer_overflow;
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfNotInNewSpace(object, &ok);
+ Abort(kRememberedSetPointerInNewSpace);
+ bind(&ok);
+ }
+ // Load store buffer top.
+ Mov(Tmp0(), Operand(ExternalReference::store_buffer_top(isolate())));
+ Ldr(scratch, MemOperand(Tmp0()));
+ // Store pointer to buffer and increment buffer top.
+ Str(address, MemOperand(scratch, kPointerSize, PostIndex));
+ // Write back new top of buffer.
+ Str(scratch, MemOperand(Tmp0()));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
+ (1 << (14 + kPointerSizeLog2)));
+ if (and_then == kFallThroughAtEnd) {
+ Tbz(scratch, (14 + kPointerSizeLog2), &done);
+ } else {
+ ASSERT(and_then == kReturnAtEnd);
+ Tbnz(scratch, (14 + kPointerSizeLog2), &store_buffer_overflow);
+ Ret();
+ }
+
+ Bind(&store_buffer_overflow);
+ Push(lr);
+ StoreBufferOverflowStub store_buffer_overflow_stub =
+ StoreBufferOverflowStub(fp_mode);
+ CallStub(&store_buffer_overflow_stub);
+ Pop(lr);
+
+ Bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
+ }
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ PopXRegList(kSafepointSavedRegisters);
+ Drop(num_unsaved);
+}
+
+
+void MacroAssembler::PushSafepointRegisters() {
+ // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
+ // adjust the stack for unsaved registers.
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ ASSERT(num_unsaved >= 0);
+ Claim(num_unsaved);
+ PushXRegList(kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PushSafepointFPRegisters() {
+ PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize,
+ FPRegister::kAllocatableFPRegisters));
+}
+
+
+void MacroAssembler::PopSafepointFPRegisters() {
+ PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize,
+ FPRegister::kAllocatableFPRegisters));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // Make sure the safepoint registers list is what we expect.
+ ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
+
+ // Safepoint registers are stored contiguously on the stack, but not all the
+ // registers are saved. The following registers are excluded:
+ // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
+ // the macro assembler.
+ // - x28 (jssp) because JS stack pointer doesn't need to be included in
+ // safepoint registers.
+ // - x31 (csp) because the system stack pointer doesn't need to be included
+ // in safepoint registers.
+ //
+ // This function implements the mapping of register code to index into the
+ // safepoint register slots.
+ if ((reg_code >= 0) && (reg_code <= 15)) {
+ return reg_code;
+ } else if ((reg_code >= 18) && (reg_code <= 27)) {
+ // Skip ip0 and ip1.
+ return reg_code - 2;
+ } else if ((reg_code == 29) || (reg_code == 30)) {
+ // Also skip jssp.
+ return reg_code - 3;
+ } else {
+ // This register has no safepoint register slot.
+ UNREACHABLE();
+ return -1;
+ }
+}
+
+
+void MacroAssembler::CheckPageFlagSet(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_any_set) {
+ And(scratch, object, ~Page::kPageAlignmentMask);
+ Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ TestAndBranchIfAnySet(scratch, mask, if_any_set);
+}
+
+
+void MacroAssembler::CheckPageFlagClear(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_all_clear) {
+ And(scratch, object, ~Page::kPageAlignmentMask);
+ Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ TestAndBranchIfAllClear(scratch, mask, if_all_clear);
+}
+
+
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip the barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
+
+ Add(scratch, object, offset - kHeapObjectTag);
+ if (emit_debug_code()) {
+ Label ok;
+ Tst(scratch, (1 << kPointerSizeLog2) - 1);
+ B(eq, &ok);
+ Abort(kUnalignedCellInWriteBarrier);
+ Bind(&ok);
+ }
+
+ RecordWrite(object,
+ scratch,
+ value,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ OMIT_SMI_CHECK);
+
+ Bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
+ Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
+ }
+}
+
+
+// Will clobber: object, address, value, Tmp0(), Tmp1().
+// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
+//
+// The register 'object' contains a heap object pointer. The heap object tag is
+// shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ ASM_LOCATION("MacroAssembler::RecordWrite");
+ ASSERT(!AreAliased(object, value));
+
+ if (emit_debug_code()) {
+ Ldr(Tmp0(), MemOperand(address));
+ Cmp(Tmp0(), value);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ JumpIfSmi(value, &done);
+ }
+
+ CheckPageFlagClear(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ &done);
+ CheckPageFlagClear(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ &done);
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ Push(lr);
+ }
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ Pop(lr);
+ }
+
+ Bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
+ Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
+ }
+}
+
+
+void MacroAssembler::AssertHasValidColor(const Register& reg) {
+ if (emit_debug_code()) {
+ // The bit sequence is backward. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label color_is_valid;
+ Tbnz(reg, 0, &color_is_valid);
+ Tbz(reg, 1, &color_is_valid);
+ Abort(kUnexpectedColorFound);
+ Bind(&color_is_valid);
+ }
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register shift_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg, no_reg));
+ // addr_reg is divided into fields:
+ // |63 page base 20|19 high 8|7 shift 3|2 0|
+ // 'high' gives the index of the cell holding color bits for the object.
+ // 'shift' gives the offset in the cell for this object's color.
+ const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ Ubfx(Tmp0(), addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
+ Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
+ Add(bitmap_reg, bitmap_reg, Operand(Tmp0(), LSL, Bitmap::kBytesPerCellLog2));
+ // bitmap_reg:
+ // |63 page base 20|19 zeros 15|14 high 3|2 0|
+ Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register shift_scratch,
+ Label* has_color,
+ int first_bit,
+ int second_bit) {
+ // See mark-compact.h for color definitions.
+ ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
+
+ GetMarkBits(object, bitmap_scratch, shift_scratch);
+ Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ // Shift the bitmap down to get the color of the object in bits [1:0].
+ Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
+
+ AssertHasValidColor(bitmap_scratch);
+
+ // These bit sequences are backwards. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+
+ // Check for the color.
+ if (first_bit == 0) {
+ // Checking for white.
+ ASSERT(second_bit == 0);
+ // We only need to test the first bit.
+ Tbz(bitmap_scratch, 0, has_color);
+ } else {
+ Label other_color;
+ // Checking for grey or black.
+ Tbz(bitmap_scratch, 0, &other_color);
+ if (second_bit == 0) {
+ Tbz(bitmap_scratch, 1, has_color);
+ } else {
+ Tbnz(bitmap_scratch, 1, has_color);
+ }
+ Bind(&other_color);
+ }
+
+ // Fall through if it does not have the right color.
+}
+
+
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ Mov(scratch, Operand(map));
+ Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset));
+ TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
+ }
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black) {
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!AreAliased(object, scratch0, scratch1));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // Scratch contains elements pointer.
+ Mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ Bind(&loop_again);
+ Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
+ Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
+}
+
+
+void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
+ Register result) {
+ ASSERT(!result.Is(ldr_location));
+ const uint32_t kLdrLitOffset_lsb = 5;
+ const uint32_t kLdrLitOffset_width = 19;
+ Ldr(result, MemOperand(ldr_location));
+ if (emit_debug_code()) {
+ And(result, result, LoadLiteralFMask);
+ Cmp(result, LoadLiteralFixed);
+ Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
+ // The instruction was clobbered. Reload it.
+ Ldr(result, MemOperand(ldr_location));
+ }
+ Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
+ Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register shift_scratch,
+ Register load_scratch,
+ Register length_scratch,
+ Label* value_is_white_and_not_data) {
+ ASSERT(!AreAliased(
+ value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
+
+ // These bit sequences are backwards. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+
+ GetMarkBits(value, bitmap_scratch, shift_scratch);
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Lsr(load_scratch, load_scratch, shift_scratch);
+
+ AssertHasValidColor(load_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ Label done;
+ Tbnz(load_scratch, 0, &done);
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ Register map = load_scratch; // Holds map while checking type.
+ Label is_data_object;
+
+ // Check for heap-number.
+ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ Mov(length_scratch, HeapNumber::kSize);
+ JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
+
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = load_scratch;
+ Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ TestAndBranchIfAnySet(instance_type,
+ kIsIndirectStringMask | kIsNotStringMask,
+ value_is_white_and_not_data);
+
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ Mov(length_scratch, ExternalString::kSize);
+ TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
+
+ // Sequential string, either ASCII or UC16.
+ // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+ // getting the length multiplied by 2.
+ ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
+ String::kLengthOffset));
+ Tst(instance_type, kStringEncodingMask);
+ Cset(load_scratch, eq);
+ Lsl(length_scratch, length_scratch, load_scratch);
+ Add(length_scratch,
+ length_scratch,
+ SeqString::kHeaderSize + kObjectAlignmentMask);
+ Bic(length_scratch, length_scratch, kObjectAlignmentMask);
+
+ Bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ Register mask = shift_scratch;
+ Mov(load_scratch, 1);
+ Lsl(mask, load_scratch, shift_scratch);
+
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Orr(load_scratch, load_scratch, mask);
+ Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+ Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ Add(load_scratch, load_scratch, length_scratch);
+ Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
+ if (emit_debug_code()) {
+ Check(cond, reason);
+ }
+}
+
+
+
+void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
+ if (emit_debug_code()) {
+ CheckRegisterIsClear(reg, reason);
+ }
+}
+
+
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+ Heap::RootListIndex index,
+ BailoutReason reason) {
+ // CompareRoot uses Tmp0().
+ ASSERT(!reg.Is(Tmp0()));
+ if (emit_debug_code()) {
+ CompareRoot(reg, index);
+ Check(eq, reason);
+ }
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (emit_debug_code()) {
+ Register temp = Tmp1();
+ Label ok;
+ Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
+ JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
+ JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
+ JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
+ Bind(&ok);
+ }
+}
+
+
+void MacroAssembler::AssertIsString(const Register& object) {
+ if (emit_debug_code()) {
+ Register temp = Tmp1();
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsNotAString);
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
+ Check(lo, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::Check(Condition cond, BailoutReason reason) {
+ Label ok;
+ B(cond, &ok);
+ Abort(reason);
+ // Will not return here.
+ Bind(&ok);
+}
+
+
+void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
+ Label ok;
+ Cbz(reg, &ok);
+ Abort(reason);
+ // Will not return here.
+ Bind(&ok);
+}
+
+
+void MacroAssembler::Abort(BailoutReason reason) {
+#ifdef DEBUG
+ RecordComment("Abort message: ");
+ RecordComment(GetBailoutReason(reason));
+
+ if (FLAG_trap_on_abort) {
+ Brk(0);
+ return;
+ }
+#endif
+
+ // Abort is used in some contexts where csp is the stack pointer. In order to
+ // simplify the CallRuntime code, make sure that jssp is the stack pointer.
+ // There is no risk of register corruption here because Abort doesn't return.
+ Register old_stack_pointer = StackPointer();
+ SetStackPointer(jssp);
+ Mov(jssp, old_stack_pointer);
+
+ if (use_real_aborts()) {
+ Mov(x0, Operand(Smi::FromInt(reason)));
+ Push(x0);
+
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 1);
+ } else {
+ CallRuntime(Runtime::kAbort, 1);
+ }
+ } else {
+ // Load the string to pass to Printf.
+ Label msg_address;
+ Adr(x0, &msg_address);
+
+ // Call Printf directly to report the error.
+ CallPrintf();
+
+ // We need a way to stop execution on both the simulator and real hardware,
+ // and Unreachable() is the best option.
+ Unreachable();
+
+ // Emit the message string directly in the instruction stream.
+ {
+ BlockConstPoolScope scope(this);
+ Bind(&msg_address);
+ EmitStringData(GetBailoutReason(reason));
+ }
+ }
+
+ SetStackPointer(old_stack_pointer);
+}
+
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ Ldr(scratch, GlobalObjectMemOperand());
+ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ Ldr(scratch, ContextMemOperand(scratch, Context::JS_ARRAY_MAPS_INDEX));
+ size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
+ Ldr(Tmp0(), FieldMemOperand(scratch, offset));
+ Cmp(map_in_out, Tmp0());
+ B(ne, no_map_match);
+
+ // Use the transitioned cached map.
+ offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
+ Ldr(map_in_out, FieldMemOperand(scratch, offset));
+}
+
+
+void MacroAssembler::LoadInitialArrayMap(Register function_in,
+ Register scratch,
+ Register map_out,
+ ArrayHasHoles holes) {
+ ASSERT(!AreAliased(function_in, scratch, map_out));
+ Label done;
+ Ldr(map_out, FieldMemOperand(function_in,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ if (!FLAG_smi_only_arrays) {
+ ElementsKind kind = (holes == kArrayCanHaveHoles) ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, kind, map_out,
+ scratch, &done);
+ } else if (holes == kArrayCanHaveHoles) {
+ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_HOLEY_SMI_ELEMENTS, map_out,
+ scratch, &done);
+ }
+ Bind(&done);
+}
+
+
+void MacroAssembler::LoadArrayFunction(Register function) {
+ // Load the global or builtins object from the current context.
+ Ldr(function, GlobalObjectMemOperand());
+ // Load the global context from the global or builtins object.
+ Ldr(function,
+ FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
+ // Load the array function from the native context.
+ Ldr(function, ContextMemOperand(function, Context::ARRAY_FUNCTION_INDEX));
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ Ldr(function, GlobalObjectMemOperand());
+ // Load the native context from the global or builtins object.
+ Ldr(function, FieldMemOperand(function,
+ GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ Ldr(function, ContextMemOperand(function, index));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch) {
+ // Load the initial map. The global functions all have initial maps.
+ Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (emit_debug_code()) {
+ Label ok, fail;
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+ B(&ok);
+ Bind(&fail);
+ Abort(kGlobalFunctionsMustHaveInitialMap);
+ Bind(&ok);
+ }
+}
+
+
+// This is the main Printf implementation. All other Printf variants call
+// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
+void MacroAssembler::PrintfNoPreserve(const char * format,
+ const CPURegister& arg0,
+ const CPURegister& arg1,
+ const CPURegister& arg2,
+ const CPURegister& arg3) {
+ // We cannot handle a caller-saved stack pointer. It doesn't make much sense
+ // in most cases anyway, so this restriction shouldn't be too serious.
+ ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
+
+ // We cannot print Tmp0() or Tmp1() as they're used internally by the macro
+ // assembler. We cannot print the stack pointer because it is typically used
+ // to preserve caller-saved registers (using other Printf variants which
+ // depend on this helper).
+ ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg0));
+ ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg1));
+ ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg2));
+ ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg3));
+
+ static const int kMaxArgCount = 4;
+ // Assume that we have the maximum number of arguments until we know
+ // otherwise.
+ int arg_count = kMaxArgCount;
+
+ // The provided arguments.
+ CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3};
+
+ // The PCS registers where the arguments need to end up.
+ CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg};
+
+ // Promote FP arguments to doubles, and integer arguments to X registers.
+ // Note that FP and integer arguments cannot be mixed, but we'll check
+ // AreSameSizeAndType once we've processed these promotions.
+ for (int i = 0; i < kMaxArgCount; i++) {
+ if (args[i].IsRegister()) {
+ // Note that we use x1 onwards, because x0 will hold the format string.
+ pcs[i] = Register::XRegFromCode(i + 1);
+ // For simplicity, we handle all integer arguments as X registers. An X
+ // register argument takes the same space as a W register argument in the
+ // PCS anyway. The only limitation is that we must explicitly clear the
+ // top word for W register arguments as the callee will expect it to be
+ // clear.
+ if (!args[i].Is64Bits()) {
+ const Register& as_x = args[i].X();
+ And(as_x, as_x, 0x00000000ffffffff);
+ args[i] = as_x;
+ }
+ } else if (args[i].IsFPRegister()) {
+ pcs[i] = FPRegister::DRegFromCode(i);
+ // C and C++ varargs functions (such as printf) implicitly promote float
+ // arguments to doubles.
+ if (!args[i].Is64Bits()) {
+ FPRegister s(args[i]);
+ const FPRegister& as_d = args[i].D();
+ Fcvt(as_d, s);
+ args[i] = as_d;
+ }
+ } else {
+ // This is the first empty (NoCPUReg) argument, so use it to set the
+ // argument count and bail out.
+ arg_count = i;
+ break;
+ }
+ }
+ ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
+ // Check that every remaining argument is NoCPUReg.
+ for (int i = arg_count; i < kMaxArgCount; i++) {
+ ASSERT(args[i].IsNone());
+ }
+ ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
+ args[2], args[3],
+ pcs[0], pcs[1],
+ pcs[2], pcs[3]));
+
+ // Move the arguments into the appropriate PCS registers.
+ //
+ // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is
+ // surprisingly complicated.
+ //
+ // * For even numbers of registers, we push the arguments and then pop them
+ // into their final registers. This maintains 16-byte stack alignment in
+ // case csp is the stack pointer, since we're only handling X or D
+ // registers at this point.
+ //
+ // * For odd numbers of registers, we push and pop all but one register in
+ // the same way, but the left-over register is moved directly, since we
+ // can always safely move one register without clobbering any source.
+ if (arg_count >= 4) {
+ Push(args[3], args[2], args[1], args[0]);
+ } else if (arg_count >= 2) {
+ Push(args[1], args[0]);
+ }
+
+ if ((arg_count % 2) != 0) {
+ // Move the left-over register directly.
+ const CPURegister& leftover_arg = args[arg_count - 1];
+ const CPURegister& leftover_pcs = pcs[arg_count - 1];
+ if (leftover_arg.IsRegister()) {
+ Mov(Register(leftover_pcs), Register(leftover_arg));
+ } else {
+ Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg));
+ }
+ }
+
+ if (arg_count >= 4) {
+ Pop(pcs[0], pcs[1], pcs[2], pcs[3]);
+ } else if (arg_count >= 2) {
+ Pop(pcs[0], pcs[1]);
+ }
+
+ // Load the format string into x0, as per the procedure-call standard.
+ //
+ // To make the code as portable as possible, the format string is encoded
+ // directly in the instruction stream. It might be cleaner to encode it in a
+ // literal pool, but since Printf is usually used for debugging, it is
+ // beneficial for it to be minimally dependent on other features.
+ Label format_address;
+ Adr(x0, &format_address);
+
+ // Emit the format string directly in the instruction stream.
+ { BlockConstPoolScope scope(this);
+ Label after_data;
+ B(&after_data);
+ Bind(&format_address);
+ EmitStringData(format);
+ Unreachable();
+ Bind(&after_data);
+ }
+
+ // We don't pass any arguments on the stack, but we still need to align the C
+ // stack pointer to a 16-byte boundary for PCS compliance.
+ if (!csp.Is(StackPointer())) {
+ Bic(csp, StackPointer(), 0xf);
+ }
+
+ CallPrintf(pcs[0].type());
+}
+
+
+void MacroAssembler::CallPrintf(CPURegister::RegisterType type) {
+ // A call to printf needs special handling for the simulator, since the system
+ // printf function will use a different instruction set and the procedure-call
+ // standard will not be compatible.
+#ifdef USE_SIMULATOR
+ { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
+ hlt(kImmExceptionIsPrintf);
+ dc32(type);
+ }
+#else
+ Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
+#endif
+}
+
+
+void MacroAssembler::Printf(const char * format,
+ const CPURegister& arg0,
+ const CPURegister& arg1,
+ const CPURegister& arg2,
+ const CPURegister& arg3) {
+ // Preserve all caller-saved registers as well as NZCV.
+ // If csp is the stack pointer, PushCPURegList asserts that the size of each
+ // list is a multiple of 16 bytes.
+ PushCPURegList(kCallerSaved);
+ PushCPURegList(kCallerSavedFP);
+ // Use Tmp0() as a scratch register. It is not accepted by Printf so it will
+ // never overlap an argument register.
+ Mrs(Tmp0(), NZCV);
+ Push(Tmp0(), xzr);
+
+ PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
+
+ Pop(xzr, Tmp0());
+ Msr(NZCV, Tmp0());
+ PopCPURegList(kCallerSavedFP);
+ PopCPURegList(kCallerSaved);
+}
+
+
+void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
+ // TODO(jbramley): Other architectures use the internal memcpy to copy the
+ // sequence. If this is a performance bottleneck, we should consider caching
+ // the sequence and copying it in the same way.
+ InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
+ ASSERT(jssp.Is(StackPointer()));
+ EmitFrameSetupForCodeAgePatching(this);
+}
+
+
+
+void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
+ InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
+ ASSERT(jssp.Is(StackPointer()));
+ EmitCodeAgeSequence(this, stub);
+}
+
+
+#undef __
+#define __ assm->
+
+
+void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
+ Label start;
+ __ bind(&start);
+
+ // We can do this sequence using four instructions, but the code ageing
+ // sequence that patches it needs five, so we use the extra space to try to
+ // simplify some addressing modes and remove some dependencies (compared to
+ // using two stp instructions with write-back).
+ __ sub(jssp, jssp, 4 * kXRegSizeInBytes);
+ __ sub(csp, csp, 4 * kXRegSizeInBytes);
+ __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSizeInBytes));
+ __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSizeInBytes));
+ __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
+}
+
+
+void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
+ Code * stub) {
+ Label start;
+ __ bind(&start);
+ // When the stub is called, the sequence is replaced with the young sequence
+ // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
+ // stub jumps to &start, stored in x0. The young sequence does not call the
+ // stub so there is no infinite loop here.
+ //
+ // A branch (br) is used rather than a call (blr) because this code replaces
+ // the frame setup code that would normally preserve lr.
+ __ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
+ __ adr(x0, &start);
+ __ br(ip0);
+ // IsCodeAgeSequence in codegen-a64.cc assumes that the code generated up
+ // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
+ if (stub) {
+ __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
+ }
+}
+
+
+bool MacroAssembler::IsYoungSequence(byte* sequence) {
+ // Generate a young sequence to compare with.
+ const int length = kCodeAgeSequenceSize / kInstructionSize;
+ static bool initialized = false;
+ static byte young[kCodeAgeSequenceSize];
+ if (!initialized) {
+ PatchingAssembler patcher(young, length);
+ // The young sequence is the frame setup code for FUNCTION code types. It is
+ // generated by FullCodeGenerator::Generate.
+ MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
+ initialized = true;
+ }
+
+ bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0);
+ ASSERT(is_young || IsCodeAgeSequence(sequence));
+ return is_young;
+}
+
+
+#ifdef DEBUG
+bool MacroAssembler::IsCodeAgeSequence(byte* sequence) {
+ // The old sequence varies depending on the code age. However, the code up
+ // until kCodeAgeStubEntryOffset does not change, so we can check that part to
+ // get a reasonable level of verification.
+ const int length = kCodeAgeStubEntryOffset / kInstructionSize;
+ static bool initialized = false;
+ static byte old[kCodeAgeStubEntryOffset];
+ if (!initialized) {
+ PatchingAssembler patcher(old, length);
+ MacroAssembler::EmitCodeAgeSequence(&patcher, NULL);
+ initialized = true;
+ }
+ return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0;
+}
+#endif
+
+
+#undef __
+#define __ masm->
+
+
+void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
+ const Label* smi_check) {
+ Assembler::BlockConstPoolScope scope(masm);
+ if (reg.IsValid()) {
+ ASSERT(smi_check->is_bound());
+ ASSERT(reg.Is64Bits());
+
+ // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
+ // 'check' in the other bits. The possible offset is limited in that we
+ // use BitField to pack the data, and the underlying data type is a
+ // uint32_t.
+ uint32_t delta = __ InstructionsGeneratedSince(smi_check);
+ __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
+ } else {
+ ASSERT(!smi_check->is_bound());
+
+ // An offset of 0 indicates that there is no patch site.
+ __ InlineData(0);
+ }
+}
+
+
+InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
+ : reg_(NoReg), smi_check_(NULL) {
+ InstructionSequence* inline_data = InstructionSequence::At(info);
+ ASSERT(inline_data->IsInlineData());
+ if (inline_data->IsInlineData()) {
+ uint64_t payload = inline_data->InlineData();
+ // We use BitField to decode the payload, and BitField can only handle
+ // 32-bit values.
+ ASSERT(is_uint32(payload));
+ if (payload != 0) {
+ int reg_code = RegisterBits::decode(payload);
+ reg_ = Register::XRegFromCode(reg_code);
+ uint64_t smi_check_delta = DeltaBits::decode(payload);
+ ASSERT(smi_check_delta != 0);
+ smi_check_ = inline_data - (smi_check_delta * kInstructionSize);
+ }
+ }
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/macro-assembler-a64.h b/deps/v8/src/a64/macro-assembler-a64.h
new file mode 100644
index 0000000000..7b8dd3f806
--- /dev/null
+++ b/deps/v8/src/a64/macro-assembler-a64.h
@@ -0,0 +1,2238 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_MACRO_ASSEMBLER_A64_H_
+#define V8_A64_MACRO_ASSEMBLER_A64_H_
+
+#include "v8globals.h"
+#include "globals.h"
+
+#include "a64/assembler-a64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define LS_MACRO_LIST(V) \
+ V(Ldrb, Register&, rt, LDRB_w) \
+ V(Strb, Register&, rt, STRB_w) \
+ V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
+ V(Ldrh, Register&, rt, LDRH_w) \
+ V(Strh, Register&, rt, STRH_w) \
+ V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
+ V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
+ V(Str, CPURegister&, rt, StoreOpFor(rt)) \
+ V(Ldrsw, Register&, rt, LDRSW_x)
+
+
+// ----------------------------------------------------------------------------
+// Static helper functions
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset);
+inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
+
+// Generate a MemOperand for loading a SMI from memory.
+inline MemOperand UntagSmiMemOperand(Register object, int offset);
+
+
+// ----------------------------------------------------------------------------
+// MacroAssembler
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+enum TargetAddressStorageMode {
+ CAN_INLINE_TARGET_ADDRESS,
+ NEVER_INLINE_TARGET_ADDRESS
+};
+enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
+enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
+enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
+enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
+enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
+
+class MacroAssembler : public Assembler {
+ public:
+ MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
+
+ inline Handle<Object> CodeObject();
+
+ // Instruction set functions ------------------------------------------------
+ // Logical macros.
+ inline void And(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Tst(const Register& rn, const Operand& operand);
+ void LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+
+ // Add and sub macros.
+ inline void Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Cmn(const Register& rn, const Operand& operand);
+ inline void Cmp(const Register& rn, const Operand& operand);
+ inline void Neg(const Register& rd,
+ const Operand& operand);
+ inline void Negs(const Register& rd,
+ const Operand& operand);
+
+ void AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+
+ // Add/sub with carry macros.
+ inline void Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Ngc(const Register& rd,
+ const Operand& operand);
+ inline void Ngcs(const Register& rd,
+ const Operand& operand);
+ void AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Move macros.
+ void Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
+ void Mov(const Register& rd, uint64_t imm);
+ inline void Mvn(const Register& rd, uint64_t imm);
+ void Mvn(const Register& rd, const Operand& operand);
+ static bool IsImmMovn(uint64_t imm, unsigned reg_size);
+ static bool IsImmMovz(uint64_t imm, unsigned reg_size);
+ static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+
+ // Conditional macros.
+ inline void Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ inline void Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ void ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ void Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond);
+
+ // Load/store macros.
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
+ inline void FN(const REGTYPE REG, const MemOperand& addr);
+ LS_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ void LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+
+ // V8-specific load/store helpers.
+ void Load(const Register& rt, const MemOperand& addr, Representation r);
+ void Store(const Register& rt, const MemOperand& addr, Representation r);
+
+ // Remaining instructions are simple pass-through calls to the assembler.
+ inline void Adr(const Register& rd, Label* label);
+ inline void Asr(const Register& rd, const Register& rn, unsigned shift);
+ inline void Asr(const Register& rd, const Register& rn, const Register& rm);
+ inline void B(Label* label);
+ inline void B(Condition cond, Label* label);
+ void B(Label* label, Condition cond);
+ inline void Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Bind(Label* label);
+ inline void Bl(Label* label);
+ inline void Blr(const Register& xn);
+ inline void Br(const Register& xn);
+ inline void Brk(int code);
+ void Cbnz(const Register& rt, Label* label);
+ void Cbz(const Register& rt, Label* label);
+ inline void Cinc(const Register& rd, const Register& rn, Condition cond);
+ inline void Cinv(const Register& rd, const Register& rn, Condition cond);
+ inline void Cls(const Register& rd, const Register& rn);
+ inline void Clz(const Register& rd, const Register& rn);
+ inline void Cneg(const Register& rd, const Register& rn, Condition cond);
+ inline void CzeroX(const Register& rd, Condition cond);
+ inline void CmovX(const Register& rd, const Register& rn, Condition cond);
+ inline void Cset(const Register& rd, Condition cond);
+ inline void Csetm(const Register& rd, Condition cond);
+ inline void Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Dmb(BarrierDomain domain, BarrierType type);
+ inline void Dsb(BarrierDomain domain, BarrierType type);
+ inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
+ inline void Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+ inline void Fabs(const FPRegister& fd, const FPRegister& fn);
+ inline void Fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond);
+ inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
+ inline void Fcmp(const FPRegister& fn, double value);
+ inline void Fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond);
+ inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
+ inline void Fcvtas(const Register& rd, const FPRegister& fn);
+ inline void Fcvtau(const Register& rd, const FPRegister& fn);
+ inline void Fcvtms(const Register& rd, const FPRegister& fn);
+ inline void Fcvtmu(const Register& rd, const FPRegister& fn);
+ inline void Fcvtns(const Register& rd, const FPRegister& fn);
+ inline void Fcvtnu(const Register& rd, const FPRegister& fn);
+ inline void Fcvtzs(const Register& rd, const FPRegister& fn);
+ inline void Fcvtzu(const Register& rd, const FPRegister& fn);
+ inline void Fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmov(FPRegister fd, FPRegister fn);
+ inline void Fmov(FPRegister fd, Register rn);
+ inline void Fmov(FPRegister fd, double imm);
+ inline void Fmov(Register rd, FPRegister fn);
+ inline void Fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fneg(const FPRegister& fd, const FPRegister& fn);
+ inline void Fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Frinta(const FPRegister& fd, const FPRegister& fn);
+ inline void Frintn(const FPRegister& fd, const FPRegister& fn);
+ inline void Frintz(const FPRegister& fd, const FPRegister& fn);
+ inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
+ inline void Fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Hint(SystemHint code);
+ inline void Hlt(int code);
+ inline void Isb();
+ inline void Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src);
+ inline void Ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src);
+ inline void Ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src);
+ inline void Ldr(const FPRegister& ft, double imm);
+ inline void Ldr(const Register& rt, uint64_t imm);
+ inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
+ inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
+ inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
+ inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
+ inline void Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
+ inline void Mov(const Register& rd, const Register& rm);
+ inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
+ inline void Mrs(const Register& rt, SystemRegister sysreg);
+ inline void Msr(SystemRegister sysreg, const Register& rt);
+ inline void Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Mul(const Register& rd, const Register& rn, const Register& rm);
+ inline void Nop() { nop(); }
+ inline void Rbit(const Register& rd, const Register& rn);
+ inline void Ret(const Register& xn = lr);
+ inline void Rev(const Register& rd, const Register& rn);
+ inline void Rev16(const Register& rd, const Register& rn);
+ inline void Rev32(const Register& rd, const Register& rn);
+ inline void Ror(const Register& rd, const Register& rs, unsigned shift);
+ inline void Ror(const Register& rd, const Register& rn, const Register& rm);
+ inline void Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits = 0);
+ inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
+ inline void Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Smull(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+ inline void Smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+ inline void Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst);
+ inline void Stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst);
+ inline void Sxtb(const Register& rd, const Register& rn);
+ inline void Sxth(const Register& rd, const Register& rn);
+ inline void Sxtw(const Register& rd, const Register& rn);
+ void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void Tbz(const Register& rt, unsigned bit_pos, Label* label);
+ inline void Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits = 0);
+ inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
+ inline void Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Uxtb(const Register& rd, const Register& rn);
+ inline void Uxth(const Register& rd, const Register& rn);
+ inline void Uxtw(const Register& rd, const Register& rn);
+
+ // Pseudo-instructions ------------------------------------------------------
+
+ // Compute rd = abs(rm).
+ // This function clobbers the condition flags.
+ //
+ // If rm is the minimum representable value, the result is not representable.
+ // Handlers for each case can be specified using the relevant labels.
+ void Abs(const Register& rd, const Register& rm,
+ Label * is_not_representable = NULL,
+ Label * is_representable = NULL);
+
+ // Push or pop up to 4 registers of the same width to or from the stack,
+ // using the current stack pointer as set by SetStackPointer.
+ //
+ // If an argument register is 'NoReg', all further arguments are also assumed
+ // to be 'NoReg', and are thus not pushed or popped.
+ //
+ // Arguments are ordered such that "Push(a, b);" is functionally equivalent
+ // to "Push(a); Push(b);".
+ //
+ // It is valid to push the same register more than once, and there is no
+ // restriction on the order in which registers are specified.
+ //
+ // It is not valid to pop into the same register more than once in one
+ // operation, not even into the zero register.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is csp, then it
+ // must be aligned to 16 bytes on entry and the total size of the specified
+ // registers must also be a multiple of 16 bytes.
+ //
+ // Even if the current stack pointer is not the system stack pointer (csp),
+ // Push (and derived methods) will still modify the system stack pointer in
+ // order to comply with ABI rules about accessing memory below the system
+ // stack pointer.
+ //
+ // Other than the registers passed into Pop, the stack pointer and (possibly)
+ // the system stack pointer, these methods do not modify any other registers.
+ // Scratch registers such as Tmp0() and Tmp1() are preserved.
+ void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
+ const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
+ void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
+ const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
+
+ // Alternative forms of Push and Pop, taking a RegList or CPURegList that
+ // specifies the registers that are to be pushed or popped. Higher-numbered
+ // registers are associated with higher memory addresses (as in the A32 push
+ // and pop instructions).
+ //
+ // (Push|Pop)SizeRegList allow you to specify the register size as a
+ // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
+ // supported.
+ //
+ // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
+ void PushCPURegList(CPURegList registers);
+ void PopCPURegList(CPURegList registers);
+
+ inline void PushSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PushCPURegList(CPURegList(type, reg_size, registers));
+ }
+ inline void PopSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PopCPURegList(CPURegList(type, reg_size, registers));
+ }
+ inline void PushXRegList(RegList regs) {
+ PushSizeRegList(regs, kXRegSize);
+ }
+ inline void PopXRegList(RegList regs) {
+ PopSizeRegList(regs, kXRegSize);
+ }
+ inline void PushWRegList(RegList regs) {
+ PushSizeRegList(regs, kWRegSize);
+ }
+ inline void PopWRegList(RegList regs) {
+ PopSizeRegList(regs, kWRegSize);
+ }
+ inline void PushDRegList(RegList regs) {
+ PushSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
+ }
+ inline void PopDRegList(RegList regs) {
+ PopSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
+ }
+ inline void PushSRegList(RegList regs) {
+ PushSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
+ }
+ inline void PopSRegList(RegList regs) {
+ PopSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
+ }
+
+ // Push the specified register 'count' times.
+ void PushMultipleTimes(int count, Register src);
+
+ // This is a convenience method for pushing a single Handle<Object>.
+ inline void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+ // Aliases of Push and Pop, required for V8 compatibility.
+ inline void push(Register src) {
+ Push(src);
+ }
+ inline void pop(Register dst) {
+ Pop(dst);
+ }
+
+ // Poke 'src' onto the stack. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void Poke(const CPURegister& src, const Operand& offset);
+
+ // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void Peek(const CPURegister& dst, const Operand& offset);
+
+ // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
+ // with 'src2' at a higher address than 'src1'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
+
+ // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
+ // values peeked will be adjacent, with the value in 'dst2' being from a
+ // higher address than 'dst1'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
+
+ // Claim or drop stack space without actually accessing memory.
+ //
+ // In debug mode, both of these will write invalid data into the claimed or
+ // dropped space.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then it
+ // must be aligned to 16 bytes and the size claimed or dropped must be a
+ // multiple of 16 bytes.
+ //
+ // Note that unit_size must be specified in bytes. For variants which take a
+ // Register count, the unit size must be a power of two.
+ inline void Claim(uint64_t count, uint64_t unit_size = kXRegSizeInBytes);
+ inline void Claim(const Register& count,
+ uint64_t unit_size = kXRegSizeInBytes);
+ inline void Drop(uint64_t count, uint64_t unit_size = kXRegSizeInBytes);
+ inline void Drop(const Register& count,
+ uint64_t unit_size = kXRegSizeInBytes);
+
+ // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
+ // register.
+ inline void ClaimBySMI(const Register& count_smi,
+ uint64_t unit_size = kXRegSizeInBytes);
+ inline void DropBySMI(const Register& count_smi,
+ uint64_t unit_size = kXRegSizeInBytes);
+
+ // Compare a register with an operand, and branch to label depending on the
+ // condition. May corrupt the status flags.
+ inline void CompareAndBranch(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* label);
+
+ // Test the bits of register defined by bit_pattern, and branch if ANY of
+ // those bits are set. May corrupt the status flags.
+ inline void TestAndBranchIfAnySet(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label);
+
+ // Test the bits of register defined by bit_pattern, and branch if ALL of
+ // those bits are clear (ie. not set.) May corrupt the status flags.
+ inline void TestAndBranchIfAllClear(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label);
+
+ // Insert one or more instructions into the instruction stream that encode
+ // some caller-defined data. The instructions used will be executable with no
+ // side effects.
+ inline void InlineData(uint64_t data);
+
+ // Insert an instrumentation enable marker into the instruction stream.
+ inline void EnableInstrumentation();
+
+ // Insert an instrumentation disable marker into the instruction stream.
+ inline void DisableInstrumentation();
+
+ // Insert an instrumentation event marker into the instruction stream. These
+ // will be picked up by the instrumentation system to annotate an instruction
+ // profile. The argument marker_name must be a printable two character string;
+ // it will be encoded in the event marker.
+ inline void AnnotateInstrumentation(const char* marker_name);
+
+ // If emit_debug_code() is true, emit a run-time check to ensure that
+ // StackPointer() does not point below the system stack pointer.
+ //
+ // Whilst it is architecturally legal for StackPointer() to point below csp,
+ // it can be evidence of a potential bug because the ABI forbids accesses
+ // below csp.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ //
+ // If StackPointer() is the system stack pointer, this emits no code.
+ void AssertStackConsistency();
+
+ // Preserve the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are pushed before lower-numbered registers, and
+ // thus get higher addresses.
+ // Floating-point registers are pushed before general-purpose registers, and
+ // thus get higher addresses.
+ //
+ // Note that registers are not checked for invalid values. Use this method
+ // only if you know that the GC won't try to examine the values on the stack.
+ //
+ // This method must not be called unless the current stack pointer (as set by
+ // SetStackPointer) is the system stack pointer (csp), and is aligned to
+ // ActivationFrameAlignment().
+ void PushCalleeSavedRegisters();
+
+ // Restore the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are popped after lower-numbered registers, and
+ // thus come from higher addresses.
+ // Floating-point registers are popped after general-purpose registers, and
+ // thus come from higher addresses.
+ //
+ // This method must not be called unless the current stack pointer (as set by
+ // SetStackPointer) is the system stack pointer (csp), and is aligned to
+ // ActivationFrameAlignment().
+ void PopCalleeSavedRegisters();
+
+ // Set the current stack pointer, but don't generate any code.
+ inline void SetStackPointer(const Register& stack_pointer) {
+ ASSERT(!AreAliased(stack_pointer, Tmp0(), Tmp1()));
+ sp_ = stack_pointer;
+ }
+
+ // Return the current stack pointer, as set by SetStackPointer.
+ inline const Register& StackPointer() const {
+ return sp_;
+ }
+
+ // Align csp for a frame, as per ActivationFrameAlignment, and make it the
+ // current stack pointer.
+ inline void AlignAndSetCSPForFrame() {
+ int sp_alignment = ActivationFrameAlignment();
+ // AAPCS64 mandates at least 16-byte alignment.
+ ASSERT(sp_alignment >= 16);
+ ASSERT(IsPowerOf2(sp_alignment));
+ Bic(csp, StackPointer(), sp_alignment - 1);
+ SetStackPointer(csp);
+ }
+
+ // Push the system stack pointer (csp) down to allow the same to be done to
+ // the current stack pointer (according to StackPointer()). This must be
+ // called _before_ accessing the memory.
+ //
+ // This is necessary when pushing or otherwise adding things to the stack, to
+ // satisfy the AAPCS64 constraint that the memory below the system stack
+ // pointer is not accessed.
+ //
+ // This method asserts that StackPointer() is not csp, since the call does
+ // not make sense in that context.
+ //
+ // TODO(jbramley): Currently, this method can only accept values of 'space'
+ // that can be encoded in one instruction. Refer to the implementation for
+ // details.
+ inline void BumpSystemStackPointer(const Operand& space);
+
+ // Helpers ------------------------------------------------------------------
+ // Root register.
+ inline void InitializeRootRegister();
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination,
+ Heap::RootListIndex index);
+ // Store an object to the root table.
+ void StoreRoot(Register source,
+ Heap::RootListIndex index);
+
+ // Load both TrueValue and FalseValue roots.
+ void LoadTrueFalseRoots(Register true_root, Register false_root);
+
+ void LoadHeapObject(Register dst, Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ ASSERT(object->IsSmi());
+ Mov(result, Operand(object));
+ }
+ }
+
+ static int SafepointRegisterStackIndex(int reg_code);
+
+ // This is required for compatibility with architecture independant code.
+ // Remove if not needed.
+ inline void Move(Register dst, Register src) { Mov(dst, src); }
+
+ void LoadInstanceDescriptors(Register map,
+ Register descriptors);
+ void EnumLengthUntagged(Register dst, Register map);
+ void EnumLengthSmi(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ static const uint64_t shift = Field::kShift + kSmiShift;
+ static const uint64_t setbits = CountSetBits(Field::kMask, 32);
+ Ubfx(reg, reg, shift, setbits);
+ }
+
+ // ---- SMI and Number Utilities ----
+
+ inline void SmiTag(Register dst, Register src);
+ inline void SmiTag(Register smi);
+ inline void SmiUntag(Register dst, Register src);
+ inline void SmiUntag(Register smi);
+ inline void SmiUntagToDouble(FPRegister dst,
+ Register src,
+ UntagMode mode = kNotSpeculativeUntag);
+ inline void SmiUntagToFloat(FPRegister dst,
+ Register src,
+ UntagMode mode = kNotSpeculativeUntag);
+
+ // Compute the absolute value of 'smi' and leave the result in 'smi'
+ // register. If 'smi' is the most negative SMI, the absolute value cannot
+ // be represented as a SMI and a jump to 'slow' is done.
+ void SmiAbs(const Register& smi, Label* slow);
+
+ inline void JumpIfSmi(Register value,
+ Label* smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label);
+ inline void JumpIfBothSmi(Register value1,
+ Register value2,
+ Label* both_smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfEitherSmi(Register value1,
+ Register value2,
+ Label* either_smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfEitherNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label);
+ inline void JumpIfBothNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
+ void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
+
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
+
+ void JumpForHeapNumber(Register object,
+ Register heap_number_map,
+ Label* on_heap_number,
+ Label* on_not_heap_number = NULL);
+ void JumpIfHeapNumber(Register object,
+ Label* on_heap_number,
+ Register heap_number_map = NoReg);
+ void JumpIfNotHeapNumber(Register object,
+ Label* on_not_heap_number,
+ Register heap_number_map = NoReg);
+
+ // Jump to label if the input double register contains -0.0.
+ void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
+ // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
+ // output.
+ void ClampInt32ToUint8(Register in_out);
+ void ClampInt32ToUint8(Register output, Register input);
+
+ // Saturate a double in input to an unsigned 8-bit integer in output.
+ void ClampDoubleToUint8(Register output,
+ DoubleRegister input,
+ DoubleRegister dbl_scratch);
+
+ // Try to convert a double to a signed 32-bit int.
+ // This succeeds if the result compares equal to the input, so inputs of -0.0
+ // are converted to 0 and handled as a success.
+ void TryConvertDoubleToInt32(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion,
+ Label* on_failed_conversion = NULL) {
+ ASSERT(as_int.Is32Bits());
+ TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
+ on_failed_conversion);
+ }
+
+ // Try to convert a double to a signed 64-bit int.
+ // This succeeds if the result compares equal to the input, so inputs of -0.0
+ // are converted to 0 and handled as a success.
+ void TryConvertDoubleToInt64(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion,
+ Label* on_failed_conversion = NULL) {
+ ASSERT(as_int.Is64Bits());
+ TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
+ on_failed_conversion);
+ }
+
+ // ---- Object Utilities ----
+
+ // Copy fields from 'src' to 'dst', where both are tagged objects.
+ // The 'temps' list is a list of X registers which can be used for scratch
+ // values. The temps list must include at least one register, and it must not
+ // contain Tmp0() or Tmp1().
+ //
+ // Currently, CopyFields cannot make use of more than three registers from
+ // the 'temps' list.
+ //
+ // As with several MacroAssembler methods, Tmp0() and Tmp1() will be used.
+ void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
+
+ // Copies a number of bytes from src to dst. All passed registers are
+ // clobbered. On exit src and dst will point to the place just after where the
+ // last byte was read or written and length will be zero. Hint may be used to
+ // determine which is the most efficient algorithm to use for copying.
+ void CopyBytes(Register dst,
+ Register src,
+ Register length,
+ Register scratch,
+ CopyHint hint = kCopyUnknown);
+
+ // Initialize fields with filler values. Fields starting at start_offset not
+ // including end_offset are overwritten with the value in filler. At the end
+ // of the loop, start_offset takes the value of end_offset.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
+ // ---- String Utilities ----
+
+
+ // Jump to label if either object is not a sequential ASCII string.
+ // Optionally perform a smi check on the objects first.
+ void JumpIfEitherIsNotSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure,
+ SmiCheckType smi_check = DO_SMI_CHECK);
+
+ // Check if instance type is sequential ASCII string and jump to label if
+ // it is not.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure);
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfEitherInstanceTypeIsNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ void JumpIfNotUniqueName(Register type, Label* not_unique_name);
+
+ // ---- Calling / Jumping helpers ----
+
+ // This is required for compatibility in architecture indepenedant code.
+ inline void jmp(Label* L) { B(L); }
+
+ // Passes thrown value to the handler of top of the try handler chain.
+ // Register value must be x0.
+ void Throw(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Propagates an uncatchable exception to the top of the current JS stack's
+ // handler chain. Register value must be x0.
+ void ThrowUncatchable(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason);
+
+ // Throw a message string as an exception if the value is a smi.
+ void ThrowIfSmi(const Register& value, BailoutReason reason);
+
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
+ void TailCallStub(CodeStub* stub);
+
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
+
+ // TODO(all): Why does this variant save FP regs unconditionally?
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
+
+ void TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size);
+
+ int ActivationFrameAlignment();
+
+ // Calls a C function.
+ // The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments);
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function,
+ int num_reg_arguments,
+ int num_double_arguments);
+
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions.
+ // 'stack_space' is the space to be unwound on exit (includes the call JS
+ // arguments space and the additional space allocated for the fast call).
+ // 'spill_offset' is the offset from the stack pointer where
+ // CallApiFunctionAndReturn can spill registers.
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ int spill_offset,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
+
+ // The number of register that CallApiFunctionAndReturn will need to save on
+ // the stack. The space for these registers need to be allocated in the
+ // ExitFrame before calling CallApiFunctionAndReturn.
+ static const int kCallApiFunctionSpillSpace = 4;
+
+ // Jump to a runtime routine.
+ void JumpToExternalReference(const ExternalReference& builtin);
+ // Tail call of a runtime routine (jump).
+ // Like JumpToExternalReference, but also takes care of passing the number
+ // of parameters.
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+ void CallExternalReference(const ExternalReference& ext,
+ int num_arguments);
+
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper = NullCallWrapper());
+
+ // Store the code object for the given builtin in the target register and
+ // setup the function in x1.
+ // TODO(all): Can we use another register than x1?
+ void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
+ void Jump(Register target);
+ void Jump(Address target, RelocInfo::Mode rmode);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode);
+ void Jump(intptr_t target, RelocInfo::Mode rmode);
+
+ void Call(Register target);
+ void Call(Label* target);
+ void Call(Address target, RelocInfo::Mode rmode);
+ void Call(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // For every Call variant, there is a matching CallSize function that returns
+ // the size (in bytes) of the call sequence.
+ static int CallSize(Register target);
+ static int CallSize(Label* target);
+ static int CallSize(Address target, RelocInfo::Mode rmode);
+ static int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // Registers used through the invocation chain are hard-coded.
+ // We force passing the parameters to ensure the contracts are correctly
+ // honoured by the caller.
+ // 'function' must be x1.
+ // 'actual' must use an immediate or x0.
+ // 'expected' must use an immediate or x2.
+ // 'call_kind' must be x5.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag,
+ bool* definitely_mismatches,
+ const CallWrapper& call_wrapper);
+ void InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ // Invoke the JavaScript function in the given register.
+ // Changes the current context to the context in the function before invoking.
+ void InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+
+ // ---- Floating point helpers ----
+
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-a64.cc.
+ void TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Label* not_int32);
+
+ // ---- Code generation helpers ----
+
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() const { return generating_stub_; }
+#if DEBUG
+ void set_allow_macro_instructions(bool value) {
+ allow_macro_instructions_ = value;
+ }
+ bool allow_macro_instructions() const { return allow_macro_instructions_; }
+#endif
+ bool use_real_aborts() const { return use_real_aborts_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() const { return has_frame_; }
+ bool AllowThisStubCall(CodeStub* stub);
+
+ class NoUseRealAbortsScope {
+ public:
+ explicit NoUseRealAbortsScope(MacroAssembler* masm) :
+ saved_(masm->use_real_aborts_), masm_(masm) {
+ masm_->use_real_aborts_ = false;
+ }
+ ~NoUseRealAbortsScope() {
+ masm_->use_real_aborts_ = saved_;
+ }
+ private:
+ bool saved_;
+ MacroAssembler* masm_;
+ };
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void DebugBreak();
+#endif
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new try handler and link into try handler chain.
+ void PushTryHandler(StackHandler::Kind kind, int handler_index);
+
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ // Must preserve the result register.
+ void PopTryHandler();
+
+
+ // ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. The allocated object is returned in result.
+ //
+ // If the new space is exhausted control continues at the gc_required label.
+ // In this case, the result and scratch registers may still be clobbered.
+ // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
+ void Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. The caller must make sure that no pointers
+ // are left to the object(s) no longer allocated as they would be invalid when
+ // allocation is undone.
+ void UndoAllocationInNewSpace(Register object, Register scratch);
+
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocates a heap number or jumps to the gc_required label if the young
+ // space is full and a scavenge is needed.
+ // All registers are clobbered.
+ // If no heap_number_map register is provided, the function will take care of
+ // loading it.
+ void AllocateHeapNumber(Register result,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map = NoReg);
+ void AllocateHeapNumberWithValue(Register result,
+ DoubleRegister value,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map = NoReg);
+
+ // ---------------------------------------------------------------------------
+ // Support functions.
+
+ // Try to get function prototype of a function and puts the value in the
+ // result register. Checks that the function really is a function and jumps
+ // to the miss label if the fast checks fail. The function register will be
+ // untouched; the other registers may be clobbered.
+ enum BoundFunctionAction {
+ kMissOnBoundFunction,
+ kDontMissOnBoundFunction
+ };
+
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ BoundFunctionAction action =
+ kDontMissOnBoundFunction);
+
+ // Compare object type for heap object. heap_object contains a non-Smi
+ // whose object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ // It leaves the map in the map register (unless the type_reg and map register
+ // are the same register). It leaves the heap object in the heap_object
+ // register unless the heap_object register is the same register as one of the
+ // other registers.
+ void CompareObjectType(Register heap_object,
+ Register map,
+ Register type_reg,
+ InstanceType type);
+
+
+ // Compare object type for heap object, and branch if equal (or not.)
+ // heap_object contains a non-Smi whose object type should be compared with
+ // the given type. This both sets the flags and leaves the object type in
+ // the type_reg register. It leaves the map in the map register (unless the
+ // type_reg and map register are the same register). It leaves the heap
+ // object in the heap_object register unless the heap_object register is the
+ // same register as one of the other registers.
+ void JumpIfObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_cond_pass,
+ Condition cond = eq);
+
+ void JumpIfNotObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_not_object);
+
+ // Compare instance type in a map. map contains a valid map object whose
+ // object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ void CompareInstanceType(Register map,
+ Register type_reg,
+ InstanceType type);
+
+ // Compare an object's map with the specified map and its transitioned
+ // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
+ // set with result of map compare. If multiple map compares are required, the
+ // compare sequences branches to early_success.
+ void CompareMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* early_success = NULL);
+
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMap(Register obj_map,
+ Handle<Map> map,
+ Label* early_success = NULL);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
+ void CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+
+ void CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // As above, but the map of the object is already loaded into obj_map, and is
+ // preserved.
+ void CheckMap(Register obj_map,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+ // Test the bitfield of the heap object map with mask and set the condition
+ // flags. The object register is preserved.
+ void TestMapBitfield(Register object, uint64_t mask);
+
+ // Load the elements kind field of an object, and return it in the result
+ // register.
+ void LoadElementsKind(Register result, Register object);
+
+ // Compare the object in a register to a value from the root list.
+ // Uses the Tmp0() register as scratch.
+ void CompareRoot(const Register& obj, Heap::RootListIndex index);
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_equal);
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_not_equal);
+
+ // Load and check the instance type of an object for being a unique name.
+ // Loads the type into the second argument register.
+ // The object and type arguments can be the same register; in that case it
+ // will be overwritten with the type.
+ // Fall-through if the object was a string and jump on fail otherwise.
+ inline void IsObjectNameType(Register object, Register type, Label* fail);
+
+ inline void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check the instance type in the given map to see if it corresponds to a
+ // JS object type. Jump to the fail label if this is not the case and fall
+ // through otherwise. However if fail label is NULL, no branch will be
+ // performed and the flag will be updated. You can test the flag for "le"
+ // condition to test if it is a valid JS object type.
+ inline void IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Load and check the instance type of an object for being a string.
+ // Loads the type into the second argument register.
+ // The object and type arguments can be the same register; in that case it
+ // will be overwritten with the type.
+ // Jumps to not_string or string appropriate. If the appropriate label is
+ // NULL, fall through.
+ inline void IsObjectJSStringType(Register object, Register type,
+ Label* not_string, Label* string = NULL);
+
+ // Compare the contents of a register with an operand, and branch to true,
+ // false or fall through, depending on condition.
+ void CompareAndSplit(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+
+ // Test the bits of register defined by bit_pattern, and branch to
+ // if_any_set, if_all_clear or fall_through accordingly.
+ void TestAndSplit(const Register& reg,
+ uint64_t bit_pattern,
+ Label* if_all_clear,
+ Label* if_any_set,
+ Label* fall_through);
+
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiElements(Register map, Register scratch, Label* fail);
+
+ // Check to see if number can be stored as a double in FastDoubleElements.
+ // If it can, store it at the index specified by key_reg in the array,
+ // otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ FPRegister fpscratch1,
+ FPRegister fpscratch2,
+ Label* fail,
+ int elements_offset = 0);
+
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support.
+
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ SeqStringSetCharCheckIndexType index_type,
+ Register scratch,
+ uint32_t encoding_mask);
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss);
+
+ // Hash the interger value in 'key' register.
+ // It uses the same algorithm as ComputeIntegerHash in utils.h.
+ void GetNumberHash(Register key, Register scratch);
+
+ // Load value from the dictionary.
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ // ---------------------------------------------------------------------------
+ // Frames.
+
+ // Activation support.
+ // Note that Tmp0() and Tmp1() are used as a scratch registers. This is safe
+ // because these methods are not used in Crankshaft.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
+ // Returns map with validated enum cache in object register.
+ void CheckEnumCache(Register object,
+ Register null_value,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* call_runtime);
+
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver should point to the array object.
+ // If allocation info is present, the Z flag is set (so that the eq
+ // condition will pass).
+ void TestJSArrayForAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
+ &no_memento_found);
+ B(eq, memento_found);
+ Bind(&no_memento_found);
+ }
+
+ // The stack pointer has to switch between csp and jssp when setting up and
+ // destroying the exit frame. Hence preserving/restoring the registers is
+ // slightly more complicated than simple push/pop operations.
+ void ExitFramePreserveFPRegs();
+ void ExitFrameRestoreFPRegs();
+
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
+ // Enter exit frame. Exit frames are used when calling C code from generated
+ // (JavaScript) code.
+ //
+ // The stack pointer must be jssp on entry, and will be set to csp by this
+ // function. The frame pointer is also configured, but the only other
+ // registers modified by this function are the provided scratch register, and
+ // jssp.
+ //
+ // The 'extra_space' argument can be used to allocate some space in the exit
+ // frame that will be ignored by the GC. This space will be reserved in the
+ // bottom of the frame immediately above the return address slot.
+ //
+ // Set up a stack frame and registers as follows:
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: SPOffset (new csp)
+ // fp[-16]: CodeObject()
+ // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
+ // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // Alignment padding, if necessary.
+ // csp -> csp[0]: Space reserved for the return address.
+ //
+ // This function also stores the new frame information in the top frame, so
+ // that the new frame becomes the current frame.
+ void EnterExitFrame(bool save_doubles,
+ const Register& scratch,
+ int extra_space = 0);
+
+ // Leave the current exit frame, after a C function has returned to generated
+ // (JavaScript) code.
+ //
+ // This effectively unwinds the operation of EnterExitFrame:
+ // * Preserved doubles are restored (if restore_doubles is true).
+ // * The frame information is removed from the top frame.
+ // * The exit frame is dropped.
+ // * The stack pointer is reset to jssp.
+ //
+ // The stack pointer must be csp on entry.
+ void LeaveExitFrame(bool save_doubles,
+ const Register& scratch,
+ bool restore_context);
+
+ void LoadContext(Register dst, int context_chain_length);
+
+ // ---------------------------------------------------------------------------
+ // StatsCounter support
+
+ void SetCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+
+ // ---------------------------------------------------------------------------
+ // Garbage collector support (GC).
+
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters();
+ void PopSafepointRegisters();
+
+ void PushSafepointFPRegisters();
+ void PopSafepointFPRegisters();
+
+ // Store value in register src in the safepoint stack slot for register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst) {
+ Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+ }
+
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+ }
+
+ void CheckPageFlagSet(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_any_set);
+
+ void CheckPageFlagClear(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_all_clear);
+
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
+ // Check if object is in new space and jump accordingly.
+ // Register 'object' is preserved.
+ void JumpIfNotInNewSpace(Register object,
+ Label* branch) {
+ InNewSpace(object, ne, branch);
+ }
+
+ void JumpIfInNewSpace(Register object,
+ Label* branch) {
+ InNewSpace(object, eq, branch);
+ }
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ smi_check);
+ }
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* object_is_white_and_not_data);
+
+ // Detects conservatively whether an object is data-only, i.e. it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object);
+
+ // Helper for finding the mark bits for an address.
+ // Note that the behaviour slightly differs from other architectures.
+ // On exit:
+ // - addr_reg is unchanged.
+ // - The bitmap register points at the word with the mark bits.
+ // - The shift register contains the index of the first color bit for this
+ // object in the bitmap.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register shift_reg);
+
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ int first_bit,
+ int second_bit);
+
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black);
+
+
+ // Get the location of a relocated constant (its address in the constant pool)
+ // from its load site.
+ void GetRelocatedValueLocation(Register ldr_location,
+ Register result);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging.
+
+ // Calls Abort(msg) if the condition cond is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cond, BailoutReason reason);
+ void AssertRegisterIsClear(Register reg, BailoutReason reason);
+ void AssertRegisterIsRoot(
+ Register reg,
+ Heap::RootListIndex index,
+ BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
+ void AssertFastElements(Register elements);
+
+ // Abort if the specified register contains the invalid color bit pattern.
+ // The pattern must be in bits [1:0] of 'reg' register.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ void AssertHasValidColor(const Register& reg);
+
+ // Abort if 'object' register doesn't point to a string object.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ void AssertIsString(const Register& object);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cond, BailoutReason reason);
+ void CheckRegisterIsClear(Register reg, BailoutReason reason);
+
+ // Print a message to stderr and abort execution.
+ void Abort(BailoutReason reason);
+
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
+
+ // Load the initial map for new Arrays from a JSFunction.
+ void LoadInitialArrayMap(Register function_in,
+ Register scratch,
+ Register map_out,
+ ArrayHasHoles holes);
+
+ void LoadArrayFunction(Register function);
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers function and
+ // map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch);
+
+ // --------------------------------------------------------------------------
+ // Set the registers used internally by the MacroAssembler as scratch
+ // registers. These registers are used to implement behaviours which are not
+ // directly supported by A64, and where an intermediate result is required.
+ //
+ // Both tmp0 and tmp1 may be set to any X register except for xzr, sp,
+ // and StackPointer(). Also, they must not be the same register (though they
+ // may both be NoReg).
+ //
+ // It is valid to set either or both of these registers to NoReg if you don't
+ // want the MacroAssembler to use any scratch registers. In a debug build, the
+ // Assembler will assert that any registers it uses are valid. Be aware that
+ // this check is not present in release builds. If this is a problem, use the
+ // Assembler directly.
+ void SetScratchRegisters(const Register& tmp0, const Register& tmp1) {
+ // V8 assumes the macro assembler uses ip0 and ip1 as temp registers.
+ ASSERT(tmp0.IsNone() || tmp0.Is(ip0));
+ ASSERT(tmp1.IsNone() || tmp1.Is(ip1));
+
+ ASSERT(!AreAliased(xzr, csp, tmp0, tmp1));
+ ASSERT(!AreAliased(StackPointer(), tmp0, tmp1));
+ tmp0_ = tmp0;
+ tmp1_ = tmp1;
+ }
+
+ const Register& Tmp0() const {
+ return tmp0_;
+ }
+
+ const Register& Tmp1() const {
+ return tmp1_;
+ }
+
+ const Register WTmp0() const {
+ return Register::Create(tmp0_.code(), kWRegSize);
+ }
+
+ const Register WTmp1() const {
+ return Register::Create(tmp1_.code(), kWRegSize);
+ }
+
+ void SetFPScratchRegister(const FPRegister& fptmp0) {
+ fptmp0_ = fptmp0;
+ }
+
+ const FPRegister& FPTmp0() const {
+ return fptmp0_;
+ }
+
+ const Register AppropriateTempFor(
+ const Register& target,
+ const CPURegister& forbidden = NoCPUReg) const {
+ Register candidate = forbidden.Is(Tmp0()) ? Tmp1() : Tmp0();
+ ASSERT(!candidate.Is(target));
+ return Register::Create(candidate.code(), target.SizeInBits());
+ }
+
+ const FPRegister AppropriateTempFor(
+ const FPRegister& target,
+ const CPURegister& forbidden = NoCPUReg) const {
+ USE(forbidden);
+ FPRegister candidate = FPTmp0();
+ ASSERT(!candidate.Is(forbidden));
+ ASSERT(!candidate.Is(target));
+ return FPRegister::Create(candidate.code(), target.SizeInBits());
+ }
+
+ // Like printf, but print at run-time from generated code.
+ //
+ // The caller must ensure that arguments for floating-point placeholders
+ // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
+ // placeholders are Registers.
+ //
+ // A maximum of four arguments may be given to any single Printf call. The
+ // arguments must be of the same type, but they do not need to have the same
+ // size.
+ //
+ // The following registers cannot be printed:
+ // Tmp0(), Tmp1(), StackPointer(), csp.
+ //
+ // This function automatically preserves caller-saved registers so that
+ // calling code can use Printf at any point without having to worry about
+ // corruption. The preservation mechanism generates a lot of code. If this is
+ // a problem, preserve the important registers manually and then call
+ // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
+ // implicitly preserved.
+ //
+ // Unlike many MacroAssembler functions, x8 and x9 are guaranteed to be
+ // preserved, and can be printed. This allows Printf to be used during debug
+ // code.
+ //
+ // This function assumes (and asserts) that the current stack pointer is
+ // callee-saved, not caller-saved. This is most likely the case anyway, as a
+ // caller-saved stack pointer doesn't make a lot of sense.
+ void Printf(const char * format,
+ const CPURegister& arg0 = NoCPUReg,
+ const CPURegister& arg1 = NoCPUReg,
+ const CPURegister& arg2 = NoCPUReg,
+ const CPURegister& arg3 = NoCPUReg);
+
+ // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
+ //
+ // The return code from the system printf call will be returned in x0.
+ void PrintfNoPreserve(const char * format,
+ const CPURegister& arg0 = NoCPUReg,
+ const CPURegister& arg1 = NoCPUReg,
+ const CPURegister& arg2 = NoCPUReg,
+ const CPURegister& arg3 = NoCPUReg);
+
+ // Code ageing support functions.
+
+ // Code ageing on A64 works similarly to on ARM. When V8 wants to mark a
+ // function as old, it replaces some of the function prologue (generated by
+ // FullCodeGenerator::Generate) with a call to a special stub (ultimately
+ // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
+ // function prologue to its initial young state (indicating that it has been
+ // recently run) and continues. A young function is therefore one which has a
+ // normal frame setup sequence, and an old function has a code age sequence
+ // which calls a code ageing stub.
+
+ // Set up a basic stack frame for young code (or code exempt from ageing) with
+ // type FUNCTION. It may be patched later for code ageing support. This is
+ // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
+ //
+ // This function takes an Assembler so it can be called from either a
+ // MacroAssembler or a PatchingAssembler context.
+ static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
+
+ // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
+ void EmitFrameSetupForCodeAgePatching();
+
+ // Emit a code age sequence that calls the relevant code age stub. The code
+ // generated by this sequence is expected to replace the code generated by
+ // EmitFrameSetupForCodeAgePatching, and represents an old function.
+ //
+ // If stub is NULL, this function generates the code age sequence but omits
+ // the stub address that is normally embedded in the instruction stream. This
+ // can be used by debug code to verify code age sequences.
+ static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
+
+ // Call EmitCodeAgeSequence from a MacroAssembler context.
+ void EmitCodeAgeSequence(Code* stub);
+
+ // Return true if the sequence is a young sequence geneated by
+ // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
+ // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
+ static bool IsYoungSequence(byte* sequence);
+
+#ifdef DEBUG
+ // Return true if the sequence is a code age sequence generated by
+ // EmitCodeAgeSequence.
+ static bool IsCodeAgeSequence(byte* sequence);
+#endif
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
+ private:
+ // Helpers for CopyFields.
+ // These each implement CopyFields in a different way.
+ void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2,
+ Register scratch3);
+ void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2);
+ void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
+ Register scratch1);
+
+ // The actual Push and Pop implementations. These don't generate any code
+ // other than that required for the push or pop. This allows
+ // (Push|Pop)CPURegList to bundle together run-time assertions for a large
+ // block of registers.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PushHelper(int count, int size,
+ const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3);
+ void PopHelper(int count, int size,
+ const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3);
+
+ // Perform necessary maintenance operations before a push or pop.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PrepareForPush(int count, int size);
+ void PrepareForPop(int count, int size);
+
+ // Call Printf. On a native build, a simple call will be generated, but if the
+ // simulator is being used then a suitable pseudo-instruction is used. The
+ // arguments and stack (csp) must be prepared by the caller as for a normal
+ // AAPCS64 call to 'printf'.
+ //
+ // The 'type' argument specifies the type of the optional arguments.
+ void CallPrintf(CPURegister::RegisterType type = CPURegister::kNoRegister);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry(Register exception,
+ Register object,
+ Register state,
+ Register scratch1,
+ Register scratch2);
+
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Try to convert a double to an int so that integer fast-paths may be
+ // used. Not every valid integer value is guaranteed to be caught.
+ // It supports both 32-bit and 64-bit integers depending whether 'as_int'
+ // is a W or X register.
+ //
+ // This does not distinguish between +0 and -0, so if this distinction is
+ // important it must be checked separately.
+ void TryConvertDoubleToInt(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion,
+ Label* on_failed_conversion = NULL);
+
+ bool generating_stub_;
+#if DEBUG
+ // Tell whether any of the macro instruction can be used. When false the
+ // MacroAssembler will assert if a method which can emit a variable number
+ // of instructions is called.
+ bool allow_macro_instructions_;
+#endif
+ bool has_frame_;
+
+ // The Abort method should call a V8 runtime function, but the CallRuntime
+ // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
+ // use a simpler abort mechanism that doesn't depend on CEntryStub.
+ //
+ // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
+ // being generated.
+ bool use_real_aborts_;
+
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+
+ // The register to use as a stack pointer for stack operations.
+ Register sp_;
+
+ // Scratch registers used internally by the MacroAssembler.
+ Register tmp0_;
+ Register tmp1_;
+ FPRegister fptmp0_;
+
+ void InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2);
+
+ public:
+ // Far branches resolving.
+ //
+ // The various classes of branch instructions with immediate offsets have
+ // different ranges. While the Assembler will fail to assemble a branch
+ // exceeding its range, the MacroAssembler offers a mechanism to resolve
+ // branches to too distant targets, either by tweaking the generated code to
+ // use branch instructions with wider ranges or generating veneers.
+ //
+ // Currently branches to distant targets are resolved using unconditional
+ // branch isntructions with a range of +-128MB. If that becomes too little
+ // (!), the mechanism can be extended to generate special veneers for really
+ // far targets.
+
+ // Returns true if we should emit a veneer as soon as possible for a branch
+ // which can at most reach to specified pc.
+ bool ShouldEmitVeneer(int max_reachable_pc,
+ int margin = kVeneerDistanceMargin);
+
+ // The maximum code size generated for a veneer. Currently one branch
+ // instruction. This is for code size checking purposes, and can be extended
+ // in the future for example if we decide to add nops between the veneers.
+ static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
+
+ // Emits veneers for branches that are approaching their maximum range.
+ // If need_protection is true, the veneers are protected by a branch jumping
+ // over the code.
+ void EmitVeneers(bool need_protection);
+ void EmitVeneersGuard();
+ // Checks wether veneers need to be emitted at this point.
+ void CheckVeneers(bool need_protection);
+
+ // Helps resolve branching to labels potentially out of range.
+ // If the label is not bound, it registers the information necessary to later
+ // be able to emit a veneer for this branch if necessary.
+ // If the label is bound, it returns true if the label (or the previous link
+ // in the label chain) is out of range. In that case the caller is responsible
+ // for generating appropriate code.
+ // Otherwise it returns false.
+ // This function also checks wether veneers need to be emitted.
+ bool NeedExtraInstructionsOrRegisterBranch(Label *label,
+ ImmBranchType branch_type);
+
+ private:
+ // We generate a veneer for a branch if we reach within this distance of the
+ // limit of the range.
+ static const int kVeneerDistanceMargin = 2 * KB;
+ int unresolved_branches_first_limit() const {
+ ASSERT(!unresolved_branches_.empty());
+ return unresolved_branches_.begin()->first;
+ }
+};
+
+
+// Use this scope when you need a one-to-one mapping bewteen methods and
+// instructions. This scope prevents the MacroAssembler from being called and
+// literal pools from being emitted. It also asserts the number of instructions
+// emitted is what you specified when creating the scope.
+class InstructionAccurateScope BASE_EMBEDDED {
+ public:
+ InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
+ : masm_(masm), size_(count * kInstructionSize) {
+ masm_->StartBlockConstPool();
+#ifdef DEBUG
+ if (count != 0) {
+ masm_->bind(&start_);
+ }
+ previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
+ masm_->set_allow_macro_instructions(false);
+#endif
+ }
+
+ ~InstructionAccurateScope() {
+ masm_->EndBlockConstPool();
+#ifdef DEBUG
+ if (start_.is_bound()) {
+ ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
+ }
+ masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
+#endif
+ }
+
+ private:
+ MacroAssembler* masm_;
+ size_t size_;
+#ifdef DEBUG
+ Label start_;
+ bool previous_allow_macro_instructions_;
+#endif
+};
+
+
+inline MemOperand ContextMemOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+inline MemOperand GlobalObjectMemOperand() {
+ return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+}
+
+
+// Encode and decode information about patchable inline SMI checks.
+class InlineSmiCheckInfo {
+ public:
+ explicit InlineSmiCheckInfo(Address info);
+
+ bool HasSmiCheck() const {
+ return smi_check_ != NULL;
+ }
+
+ const Register& SmiRegister() const {
+ return reg_;
+ }
+
+ Instruction* SmiCheck() const {
+ return smi_check_;
+ }
+
+ // Use MacroAssembler::InlineData to emit information about patchable inline
+ // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
+ // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
+ //
+ // The generated patch information can be read using the InlineSMICheckInfo
+ // class.
+ static void Emit(MacroAssembler* masm, const Register& reg,
+ const Label* smi_check);
+
+ // Emit information to indicate that there is no inline SMI check.
+ static void EmitNotInlined(MacroAssembler* masm) {
+ Label unbound;
+ Emit(masm, NoReg, &unbound);
+ }
+
+ private:
+ Register reg_;
+ Instruction* smi_check_;
+
+ // Fields in the data encoded by InlineData.
+
+ // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
+ // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
+ // used in a patchable check. The Emit() method checks this.
+ //
+ // Note that the total size of the fields is restricted by the underlying
+ // storage size handled by the BitField class, which is a uint32_t.
+ class RegisterBits : public BitField<unsigned, 0, 5> {};
+ class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
+};
+
+} } // namespace v8::internal
+
+#ifdef GENERATED_CODE_COVERAGE
+#error "Unsupported option"
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+#endif // V8_A64_MACRO_ASSEMBLER_A64_H_
diff --git a/deps/v8/src/a64/regexp-macro-assembler-a64.cc b/deps/v8/src/a64/regexp-macro-assembler-a64.cc
new file mode 100644
index 0000000000..00558c017b
--- /dev/null
+++ b/deps/v8/src/a64/regexp-macro-assembler-a64.cc
@@ -0,0 +1,1730 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "cpu-profiler.h"
+#include "unicode.h"
+#include "log.h"
+#include "code-stubs.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "a64/regexp-macro-assembler-a64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention:
+ * - w19 : Used to temporarely store a value before a call to C code.
+ * See CheckNotBackReferenceIgnoreCase.
+ * - x20 : Pointer to the current code object (Code*),
+ * it includes the heap object tag.
+ * - w21 : Current position in input, as negative offset from
+ * the end of the string. Please notice that this is
+ * the byte offset, not the character offset!
+ * - w22 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - x23 : Points to tip of backtrack stack.
+ * - w24 : Position of the first character minus one: non_position_value.
+ * Used to initialize capture registers.
+ * - x25 : Address at the end of the input string: input_end.
+ * Points to byte after last character in input.
+ * - x26 : Address at the start of the input string: input_start.
+ * - w27 : Where to start in the input string.
+ * - x28 : Output array pointer.
+ * - x29/fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - x16/x17 : IP registers, used by assembler. Very volatile.
+ * - csp : Points to tip of C stack.
+ *
+ * - x0-x7 : Used as a cache to store 32 bit capture registers. These
+ * registers need to be retained every time a call to C code
+ * is done.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *
+ * Location Name Description
+ * (as referred to in
+ * the code)
+ *
+ * - fp[104] isolate Address of the current isolate.
+ * - fp[96] return_address Secondary link/return address
+ * used by an exit frame if this is a
+ * native call.
+ * ^^^ csp when called ^^^
+ * - fp[88] lr Return from the RegExp code.
+ * - fp[80] r29 Old frame pointer (CalleeSaved).
+ * - fp[0..72] r19-r28 Backup of CalleeSaved registers.
+ * - fp[-8] direct_call 1 => Direct call from JavaScript code.
+ * 0 => Call through the runtime system.
+ * - fp[-16] stack_base High end of the memory area to use as
+ * the backtracking stack.
+ * - fp[-24] output_size Output may fit multiple sets of matches.
+ * - fp[-32] input Handle containing the input string.
+ * - fp[-40] success_counter
+ * ^^^^^^^^^^^^^ From here and downwards we store 32 bit values ^^^^^^^^^^^^^
+ * - fp[-44] register N Capture registers initialized with
+ * - fp[-48] register N + 1 non_position_value.
+ * ... The first kNumCachedRegisters (N) registers
+ * ... are cached in x0 to x7.
+ * ... Only positions must be stored in the first
+ * - ... num_saved_registers_ registers.
+ * - ...
+ * - register N + num_registers - 1
+ * ^^^^^^^^^ csp ^^^^^^^^^
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input,
+ * int start_offset,
+ * Address input_start,
+ * Address input_end,
+ * int* output,
+ * int output_size,
+ * Address stack_base,
+ * bool direct_call = false,
+ * Address secondary_return_address, // Only used by native call.
+ * Isolate* isolate)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in a64/simulator-a64.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the LR register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerA64::RegExpMacroAssemblerA64(
+ Mode mode,
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_() {
+ __ SetStackPointer(csp);
+ ASSERT_EQ(0, registers_to_save % 2);
+ // We can cache at most 16 W registers in x0-x7.
+ STATIC_ASSERT(kNumCachedRegisters <= 16);
+ STATIC_ASSERT((kNumCachedRegisters % 2) == 0);
+ __ B(&entry_label_); // We'll write the entry code later.
+ __ Bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerA64::~RegExpMacroAssemblerA64() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+}
+
+int RegExpMacroAssemblerA64::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerA64::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ Add(current_input_offset(),
+ current_input_offset(), by * char_size());
+ }
+}
+
+
+void RegExpMacroAssemblerA64::AdvanceRegister(int reg, int by) {
+ ASSERT((reg >= 0) && (reg < num_registers_));
+ if (by != 0) {
+ Register to_advance;
+ RegisterState register_state = GetRegisterState(reg);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(w10, register_location(reg));
+ __ Add(w10, w10, by);
+ __ Str(w10, register_location(reg));
+ break;
+ case CACHED_LSW:
+ to_advance = GetCachedRegister(reg);
+ __ Add(to_advance, to_advance, by);
+ break;
+ case CACHED_MSW:
+ to_advance = GetCachedRegister(reg);
+ __ Add(to_advance, to_advance, static_cast<int64_t>(by) << kWRegSize);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void RegExpMacroAssemblerA64::Backtrack() {
+ CheckPreemption();
+ Pop(w10);
+ __ Add(x10, code_pointer(), Operand(w10, UXTW));
+ __ Br(x10);
+}
+
+
+void RegExpMacroAssemblerA64::Bind(Label* label) {
+ __ Bind(label);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacter(uint32_t c, Label* on_equal) {
+ CompareAndBranchOrBacktrack(current_character(), c, eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacterGT(uc16 limit, Label* on_greater) {
+ CompareAndBranchOrBacktrack(current_character(), limit, hi, on_greater);
+}
+
+
+void RegExpMacroAssemblerA64::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the input string?
+ CompareAndBranchOrBacktrack(start_offset(), 0, ne, &not_at_start);
+ // If we did, are we still at the start of the input string?
+ __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, input_start());
+ BranchOrBacktrack(eq, on_at_start);
+ __ Bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerA64::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the input string?
+ CompareAndBranchOrBacktrack(start_offset(), 0, ne, on_not_at_start);
+ // If we did, are we still at the start of the input string?
+ __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, input_start());
+ BranchOrBacktrack(ne, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacterLT(uc16 limit, Label* on_less) {
+ CompareAndBranchOrBacktrack(current_character(), limit, lo, on_less);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string) {
+ // This method is only ever called from the cctests.
+
+ if (check_end_of_string) {
+ // Is last character of required match inside string.
+ CheckPosition(cp_offset + str.length() - 1, on_failure);
+ }
+
+ Register characters_address = x11;
+
+ __ Add(characters_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+ if (cp_offset != 0) {
+ __ Add(characters_address, characters_address, cp_offset * char_size());
+ }
+
+ for (int i = 0; i < str.length(); i++) {
+ if (mode_ == ASCII) {
+ __ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
+ ASSERT(str[i] <= String::kMaxOneByteCharCode);
+ } else {
+ __ Ldrh(w10, MemOperand(characters_address, 2, PostIndex));
+ }
+ CompareAndBranchOrBacktrack(w10, str[i], ne, on_failure);
+ }
+}
+
+
+void RegExpMacroAssemblerA64::CheckGreedyLoop(Label* on_equal) {
+ __ Ldr(w10, MemOperand(backtrack_stackpointer()));
+ __ Cmp(current_input_offset(), w10);
+ __ Cset(x11, eq);
+ __ Add(backtrack_stackpointer(),
+ backtrack_stackpointer(), Operand(x11, LSL, kWRegSizeInBytesLog2));
+ BranchOrBacktrack(eq, on_equal);
+}
+
+void RegExpMacroAssemblerA64::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ Register capture_start_offset = w10;
+ // Save the capture length in a callee-saved register so it will
+ // be preserved if we call a C helper.
+ Register capture_length = w19;
+ ASSERT(kCalleeSaved.IncludesAliasOf(capture_length));
+
+ // Find length of back-referenced capture.
+ ASSERT((start_reg % 2) == 0);
+ if (start_reg < kNumCachedRegisters) {
+ __ Mov(capture_start_offset.X(), GetCachedRegister(start_reg));
+ __ Lsr(x11, GetCachedRegister(start_reg), kWRegSize);
+ } else {
+ __ Ldp(w11, capture_start_offset, capture_location(start_reg, x10));
+ }
+ __ Sub(capture_length, w11, capture_start_offset); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Cbz(capture_length, &fallthrough);
+
+ // Check that there are enough characters left in the input.
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+
+ if (mode_ == ASCII) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ Register capture_start_address = x12;
+ Register capture_end_addresss = x13;
+ Register current_position_address = x14;
+
+ __ Add(capture_start_address,
+ input_end(),
+ Operand(capture_start_offset, SXTW));
+ __ Add(capture_end_addresss,
+ capture_start_address,
+ Operand(capture_length, SXTW));
+ __ Add(current_position_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+
+ Label loop;
+ __ Bind(&loop);
+ __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
+ __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
+ __ Cmp(w10, w11);
+ __ B(eq, &loop_check);
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Orr(w10, w10, 0x20); // Convert capture character to lower-case.
+ __ Orr(w11, w11, 0x20); // Also convert input character.
+ __ Cmp(w11, w10);
+ __ B(ne, &fail);
+ __ Sub(w10, w10, 'a');
+ __ Cmp(w10, 'z' - 'a'); // Is w10 a lowercase letter?
+ __ B(ls, &loop_check); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Sub(w10, w10, 224 - 'a');
+ // TODO(jbramley): Use Ccmp here.
+ __ Cmp(w10, 254 - 224);
+ __ B(hi, &fail); // Weren't Latin-1 letters.
+ __ Cmp(w10, 247 - 224); // Check for 247.
+ __ B(eq, &fail);
+
+ __ Bind(&loop_check);
+ __ Cmp(capture_start_address, capture_end_addresss);
+ __ B(lt, &loop);
+ __ B(&success);
+
+ __ Bind(&fail);
+ BranchOrBacktrack(al, on_no_match);
+
+ __ Bind(&success);
+ // Compute new value of character position after the matched part.
+ __ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
+ __ Ccmp(current_input_offset(), 0, NoFlag, eq);
+ // The current input offset should be <= 0, and fit in a W register.
+ __ Check(le, kOffsetOutOfRange);
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ int argument_count = 4;
+
+ // The cached registers need to be retained.
+ CPURegList cached_registers(CPURegister::kRegister, kXRegSize, 0, 7);
+ ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+ __ PushCPURegList(cached_registers);
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // x0: Address byte_offset1 - Address captured substring's start.
+ // x1: Address byte_offset2 - Address of current character position.
+ // w2: size_t byte_length - length of capture in bytes(!)
+ // x3: Isolate* isolate
+
+ // Address of start of capture.
+ __ Add(x0, input_end(), Operand(capture_start_offset, SXTW));
+ // Length of capture.
+ __ Mov(w2, capture_length);
+ // Address of current input position.
+ __ Add(x1, input_end(), Operand(current_input_offset(), SXTW));
+ // Isolate.
+ __ Mov(x3, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ __ CallCFunction(function, argument_count);
+ }
+
+ // Check if function returned non-zero for success or zero for failure.
+ CompareAndBranchOrBacktrack(x0, 0, eq, on_no_match);
+ // On success, increment position by length of capture.
+ __ Add(current_input_offset(), current_input_offset(), capture_length);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ }
+
+ __ Bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerA64::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ Register capture_start_address = x12;
+ Register capture_end_address = x13;
+ Register current_position_address = x14;
+ Register capture_length = w15;
+
+ // Find length of back-referenced capture.
+ ASSERT((start_reg % 2) == 0);
+ if (start_reg < kNumCachedRegisters) {
+ __ Mov(x10, GetCachedRegister(start_reg));
+ __ Lsr(x11, GetCachedRegister(start_reg), kWRegSize);
+ } else {
+ __ Ldp(w11, w10, capture_location(start_reg, x10));
+ }
+ __ Sub(capture_length, w11, w10); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Cbz(capture_length, &fallthrough);
+
+ // Check that there are enough characters left in the input.
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+
+ // Compute pointers to match string and capture string
+ __ Add(capture_start_address, input_end(), Operand(w10, SXTW));
+ __ Add(capture_end_address,
+ capture_start_address,
+ Operand(capture_length, SXTW));
+ __ Add(current_position_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+
+ Label loop;
+ __ Bind(&loop);
+ if (mode_ == ASCII) {
+ __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
+ __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ Ldrh(w10, MemOperand(capture_start_address, 2, PostIndex));
+ __ Ldrh(w11, MemOperand(current_position_address, 2, PostIndex));
+ }
+ __ Cmp(w10, w11);
+ BranchOrBacktrack(ne, on_no_match);
+ __ Cmp(capture_start_address, capture_end_address);
+ __ B(lt, &loop);
+
+ // Move current character position to position after match.
+ __ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
+ __ Ccmp(current_input_offset(), 0, NoFlag, eq);
+ // The current input offset should be <= 0, and fit in a W register.
+ __ Check(le, kOffsetOutOfRange);
+ }
+ __ Bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerA64::CheckNotCharacter(unsigned c,
+ Label* on_not_equal) {
+ CompareAndBranchOrBacktrack(current_character(), c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ And(w10, current_character(), mask);
+ CompareAndBranchOrBacktrack(w10, c, eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerA64::CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal) {
+ __ And(w10, current_character(), mask);
+ CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerA64::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
+ __ Sub(w10, current_character(), minus);
+ __ And(w10, w10, mask);
+ CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacterInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_in_range) {
+ __ Sub(w10, current_character(), from);
+ // Unsigned lower-or-same condition.
+ CompareAndBranchOrBacktrack(w10, to - from, ls, on_in_range);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacterNotInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_not_in_range) {
+ __ Sub(w10, current_character(), from);
+ // Unsigned higher condition.
+ CompareAndBranchOrBacktrack(w10, to - from, hi, on_not_in_range);
+}
+
+
+void RegExpMacroAssemblerA64::CheckBitInTable(
+ Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ Mov(x11, Operand(table));
+ if ((mode_ != ASCII) || (kTableMask != String::kMaxOneByteCharCode)) {
+ __ And(w10, current_character(), kTableMask);
+ __ Add(w10, w10, ByteArray::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ Add(w10, current_character(), ByteArray::kHeaderSize - kHeapObjectTag);
+ }
+ __ Ldrb(w11, MemOperand(x11, w10, UXTW));
+ CompareAndBranchOrBacktrack(w11, 0, ne, on_bit_set);
+}
+
+
+bool RegExpMacroAssemblerA64::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check
+ switch (type) {
+ case 's':
+ // Match space-characters
+ if (mode_ == ASCII) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ // Check for ' ' or 0x00a0.
+ __ Cmp(current_character(), ' ');
+ __ Ccmp(current_character(), 0x00a0, ZFlag, ne);
+ __ B(eq, &success);
+ // Check range 0x09..0x0d.
+ __ Sub(w10, current_character(), '\t');
+ CompareAndBranchOrBacktrack(w10, '\r' - '\t', hi, on_no_match);
+ __ Bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9').
+ __ Sub(w10, current_character(), '0');
+ CompareAndBranchOrBacktrack(w10, '9' - '0', hi, on_no_match);
+ return true;
+ case 'D':
+ // Match ASCII non-digits.
+ __ Sub(w10, current_character(), '0');
+ CompareAndBranchOrBacktrack(w10, '9' - '0', ls, on_no_match);
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Here we emit the conditional branch only once at the end to make branch
+ // prediction more efficient, even though we could branch out of here
+ // as soon as a character matches.
+ __ Cmp(current_character(), 0x0a);
+ __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ if (mode_ == UC16) {
+ __ Sub(w10, current_character(), 0x2028);
+ // If the Z flag was set we clear the flags to force a branch.
+ __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
+ // ls -> !((C==1) && (Z==0))
+ BranchOrBacktrack(ls, on_no_match);
+ } else {
+ BranchOrBacktrack(eq, on_no_match);
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // We have to check all 4 newline characters before emitting
+ // the conditional branch.
+ __ Cmp(current_character(), 0x0a);
+ __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ if (mode_ == UC16) {
+ __ Sub(w10, current_character(), 0x2028);
+ // If the Z flag was set we clear the flags to force a fall-through.
+ __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
+ // hi -> (C==1) && (Z==0)
+ BranchOrBacktrack(hi, on_no_match);
+ } else {
+ BranchOrBacktrack(ne, on_no_match);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ Mov(x10, Operand(map));
+ __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
+ CompareAndBranchOrBacktrack(w10, 0, eq, on_no_match);
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ Cmp(current_character(), 'z');
+ __ B(hi, &done);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ Mov(x10, Operand(map));
+ __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
+ CompareAndBranchOrBacktrack(w10, 0, ne, on_no_match);
+ __ Bind(&done);
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerA64::Fail() {
+ __ Mov(w0, FAILURE);
+ __ B(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
+ Label return_w0;
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ Bind(&entry_label_);
+
+ // Arguments on entry:
+ // x0: String* input
+ // x1: int start_offset
+ // x2: byte* input_start
+ // x3: byte* input_end
+ // x4: int* output array
+ // x5: int output array size
+ // x6: Address stack_base
+ // x7: int direct_call
+
+ // The stack pointer should be csp on entry.
+ // csp[8]: address of the current isolate
+ // csp[0]: secondary link/return address used by native call
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // code is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Push registers on the stack, only push the argument registers that we need.
+ CPURegList argument_registers(x0, x5, x6, x7);
+
+ CPURegList registers_to_retain = kCalleeSaved;
+ ASSERT(kCalleeSaved.Count() == 11);
+ registers_to_retain.Combine(lr);
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ PushCPURegList(registers_to_retain);
+ __ PushCPURegList(argument_registers);
+
+ // Set frame pointer in place.
+ __ Add(frame_pointer(), csp, argument_registers.Count() * kPointerSize);
+
+ // Initialize callee-saved registers.
+ __ Mov(start_offset(), w1);
+ __ Mov(input_start(), x2);
+ __ Mov(input_end(), x3);
+ __ Mov(output_array(), x4);
+
+ // Set the number of registers we will need to allocate, that is:
+ // - success_counter (X register)
+ // - (num_registers_ - kNumCachedRegisters) (W registers)
+ int num_wreg_to_allocate = num_registers_ - kNumCachedRegisters;
+ // Do not allocate registers on the stack if they can all be cached.
+ if (num_wreg_to_allocate < 0) { num_wreg_to_allocate = 0; }
+ // Make room for the success_counter.
+ num_wreg_to_allocate += 2;
+
+ // Make sure the stack alignment will be respected.
+ int alignment = masm_->ActivationFrameAlignment();
+ ASSERT_EQ(alignment % 16, 0);
+ int align_mask = (alignment / kWRegSizeInBytes) - 1;
+ num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
+
+ // Check if we have space on the stack.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ Mov(x10, Operand(stack_limit));
+ __ Ldr(x10, MemOperand(x10));
+ __ Subs(x10, csp, x10);
+
+ // Handle it if the stack pointer is already below the stack limit.
+ __ B(ls, &stack_limit_hit);
+
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Cmp(x10, num_wreg_to_allocate * kWRegSizeInBytes);
+ __ B(hs, &stack_ok);
+
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ Mov(w0, EXCEPTION);
+ __ B(&return_w0);
+
+ __ Bind(&stack_limit_hit);
+ CallCheckStackGuardState(x10);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Cbnz(w0, &return_w0);
+
+ __ Bind(&stack_ok);
+
+ // Allocate space on stack.
+ __ Claim(num_wreg_to_allocate, kWRegSizeInBytes);
+
+ // Initialize success_counter with 0.
+ __ Str(wzr, MemOperand(frame_pointer(), kSuccessCounter));
+
+ // Find negative length (offset of start relative to end).
+ __ Sub(x10, input_start(), input_end());
+ if (masm_->emit_debug_code()) {
+ // Check that the input string length is < 2^30.
+ __ Neg(x11, x10);
+ __ Cmp(x11, (1<<30) - 1);
+ __ Check(ls, kInputStringTooLong);
+ }
+ __ Mov(current_input_offset(), w10);
+
+ // The non-position value is used as a clearing value for the
+ // capture registers, it corresponds to the position of the first character
+ // minus one.
+ __ Sub(non_position_value(), current_input_offset(), char_size());
+ __ Sub(non_position_value(), non_position_value(),
+ Operand(start_offset(), LSL, (mode_ == UC16) ? 1 : 0));
+ // We can store this value twice in an X register for initializing
+ // on-stack registers later.
+ __ Orr(twice_non_position_value(),
+ non_position_value().X(),
+ Operand(non_position_value().X(), LSL, kWRegSize));
+
+ // Initialize code pointer register.
+ __ Mov(code_pointer(), Operand(masm_->CodeObject()));
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Cbnz(start_offset(), &load_char_start_regexp);
+ __ Mov(current_character(), '\n');
+ __ B(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ Bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ Bind(&start_regexp);
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) {
+ ClearRegisters(0, num_saved_registers_ - 1);
+ }
+
+ // Initialize backtrack stack pointer.
+ __ Ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackBase));
+
+ // Execute
+ __ B(&start_label_);
+
+ if (backtrack_label_.is_linked()) {
+ __ Bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ if (success_label_.is_linked()) {
+ Register first_capture_start = w15;
+
+ // Save captures when successful.
+ __ Bind(&success_label_);
+
+ if (num_saved_registers_ > 0) {
+ // V8 expects the output to be an int32_t array.
+ Register capture_start = w12;
+ Register capture_end = w13;
+ Register input_length = w14;
+
+ // Copy captures to output.
+
+ // Get string length.
+ __ Sub(x10, input_end(), input_start());
+ if (masm_->emit_debug_code()) {
+ // Check that the input string length is < 2^30.
+ __ Cmp(x10, (1<<30) - 1);
+ __ Check(ls, kInputStringTooLong);
+ }
+ // input_start has a start_offset offset on entry. We need to include
+ // it when computing the length of the whole string.
+ if (mode_ == UC16) {
+ __ Add(input_length, start_offset(), Operand(w10, LSR, 1));
+ } else {
+ __ Add(input_length, start_offset(), w10);
+ }
+
+ // Copy the results to the output array from the cached registers first.
+ for (int i = 0;
+ (i < num_saved_registers_) && (i < kNumCachedRegisters);
+ i += 2) {
+ __ Mov(capture_start.X(), GetCachedRegister(i));
+ __ Lsr(capture_end.X(), capture_start.X(), kWRegSize);
+ if ((i == 0) && global_with_zero_length_check()) {
+ // Keep capture start for the zero-length check later.
+ __ Mov(first_capture_start, capture_start);
+ }
+ // Offsets need to be relative to the start of the string.
+ if (mode_ == UC16) {
+ __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ }
+
+ // Only carry on if there are more than kNumCachedRegisters capture
+ // registers.
+ int num_registers_left_on_stack =
+ num_saved_registers_ - kNumCachedRegisters;
+ if (num_registers_left_on_stack > 0) {
+ Register base = x10;
+ // There are always an even number of capture registers. A couple of
+ // registers determine one match with two offsets.
+ ASSERT_EQ(0, num_registers_left_on_stack % 2);
+ __ Add(base, frame_pointer(), kFirstCaptureOnStack);
+
+ // We can unroll the loop here, we should not unroll for less than 2
+ // registers.
+ STATIC_ASSERT(kNumRegistersToUnroll > 2);
+ if (num_registers_left_on_stack <= kNumRegistersToUnroll) {
+ for (int i = 0; i < num_registers_left_on_stack / 2; i++) {
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ if ((i == 0) && global_with_zero_length_check()) {
+ // Keep capture start for the zero-length check later.
+ __ Mov(first_capture_start, capture_start);
+ }
+ // Offsets need to be relative to the start of the string.
+ if (mode_ == UC16) {
+ __ Add(capture_start,
+ input_length,
+ Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ }
+ } else {
+ Label loop, start;
+ __ Mov(x11, num_registers_left_on_stack);
+
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ if (global_with_zero_length_check()) {
+ __ Mov(first_capture_start, capture_start);
+ }
+ __ B(&start);
+
+ __ Bind(&loop);
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ __ Bind(&start);
+ if (mode_ == UC16) {
+ __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ __ Sub(x11, x11, 2);
+ __ Cbnz(x11, &loop);
+ }
+ }
+ }
+
+ if (global()) {
+ Register success_counter = w0;
+ Register output_size = x10;
+ // Restart matching if the regular expression is flagged as global.
+
+ // Increment success counter.
+ __ Ldr(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
+ __ Add(success_counter, success_counter, 1);
+ __ Str(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
+
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ Ldr(output_size, MemOperand(frame_pointer(), kOutputSize));
+ __ Sub(output_size, output_size, num_saved_registers_);
+ // Check whether we have enough room for another set of capture results.
+ __ Cmp(output_size, num_saved_registers_);
+ __ B(lt, &return_w0);
+
+ // The output pointer is already set to the next field in the output
+ // array.
+ // Update output size on the frame before we restart matching.
+ __ Str(output_size, MemOperand(frame_pointer(), kOutputSize));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ __ Cmp(current_input_offset(), first_capture_start);
+ // Not a zero-length match, restart.
+ __ B(ne, &load_char_start_regexp);
+ // Offset from the end is zero if we already reached the end.
+ __ Cbz(current_input_offset(), &return_w0);
+ // Advance current position after a zero-length match.
+ __ Add(current_input_offset(),
+ current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ }
+
+ __ B(&load_char_start_regexp);
+ } else {
+ __ Mov(w0, SUCCESS);
+ }
+ }
+
+ if (exit_label_.is_linked()) {
+ // Exit and return w0
+ __ Bind(&exit_label_);
+ if (global()) {
+ __ Ldr(w0, MemOperand(frame_pointer(), kSuccessCounter));
+ }
+ }
+
+ __ Bind(&return_w0);
+
+ // Set stack pointer back to first register to retain
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Mov(csp, fp);
+
+ // Restore registers.
+ __ PopCPURegList(registers_to_retain);
+
+ __ Ret();
+
+ Label exit_with_exception;
+ // Registers x0 to x7 are used to store the first captures, they need to be
+ // retained over calls to C++ code.
+ CPURegList cached_registers(CPURegister::kRegister, kXRegSize, 0, 7);
+ ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+
+ if (check_preempt_label_.is_linked()) {
+ __ Bind(&check_preempt_label_);
+ SaveLinkRegister();
+ // The cached registers need to be retained.
+ __ PushCPURegList(cached_registers);
+ CallCheckStackGuardState(x10);
+ // Returning from the regexp code restores the stack (csp <- fp)
+ // so we don't need to drop the link register from it before exiting.
+ __ Cbnz(w0, &return_w0);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ RestoreLinkRegister();
+ __ Ret();
+ }
+
+ if (stack_overflow_label_.is_linked()) {
+ __ Bind(&stack_overflow_label_);
+ SaveLinkRegister();
+ // The cached registers need to be retained.
+ __ PushCPURegList(cached_registers);
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ __ Mov(x2, Operand(ExternalReference::isolate_address(isolate())));
+ __ Add(x1, frame_pointer(), kStackBase);
+ __ Mov(x0, backtrack_stackpointer());
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(isolate());
+ __ CallCFunction(grow_stack, 3);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ // Returning from the regexp code restores the stack (csp <- fp)
+ // so we don't need to drop the link register from it before exiting.
+ __ Cbz(w0, &exit_with_exception);
+ // Otherwise use return value as new stack pointer.
+ __ Mov(backtrack_stackpointer(), x0);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ RestoreLinkRegister();
+ __ Ret();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ __ Bind(&exit_with_exception);
+ __ Mov(w0, EXCEPTION);
+ __ B(&return_w0);
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerA64::GoTo(Label* to) {
+ BranchOrBacktrack(al, to);
+}
+
+void RegExpMacroAssemblerA64::IfRegisterGE(int reg,
+ int comparand,
+ Label* if_ge) {
+ Register to_compare = GetRegister(reg, w10);
+ CompareAndBranchOrBacktrack(to_compare, comparand, ge, if_ge);
+}
+
+
+void RegExpMacroAssemblerA64::IfRegisterLT(int reg,
+ int comparand,
+ Label* if_lt) {
+ Register to_compare = GetRegister(reg, w10);
+ CompareAndBranchOrBacktrack(to_compare, comparand, lt, if_lt);
+}
+
+
+void RegExpMacroAssemblerA64::IfRegisterEqPos(int reg,
+ Label* if_eq) {
+ Register to_compare = GetRegister(reg, w10);
+ __ Cmp(to_compare, current_input_offset());
+ BranchOrBacktrack(eq, if_eq);
+}
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerA64::Implementation() {
+ return kA64Implementation;
+}
+
+
+void RegExpMacroAssemblerA64::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ // TODO(pielan): Make sure long strings are caught before this, and not
+ // just asserted in debug mode.
+ ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ // Be sane! (And ensure that an int32_t can be used to index the string)
+ ASSERT(cp_offset < (1<<30));
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerA64::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerA64::PopRegister(int register_index) {
+ Pop(w10);
+ StoreRegister(register_index, w10);
+}
+
+
+void RegExpMacroAssemblerA64::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ Mov(w10, target + Code::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ Adr(x10, label);
+ __ Sub(x10, x10, code_pointer());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(x10, kWRegMask);
+ // The code offset has to fit in a W register.
+ __ Check(ls, kOffsetOutOfRange);
+ }
+ }
+ Push(w10);
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerA64::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerA64::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ Register to_push = GetRegister(register_index, w10);
+ Push(to_push);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerA64::ReadCurrentPositionFromRegister(int reg) {
+ Register cached_register;
+ RegisterState register_state = GetRegisterState(reg);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(current_input_offset(), register_location(reg));
+ break;
+ case CACHED_LSW:
+ cached_register = GetCachedRegister(reg);
+ __ Mov(current_input_offset(), cached_register.W());
+ break;
+ case CACHED_MSW:
+ cached_register = GetCachedRegister(reg);
+ __ Lsr(current_input_offset().X(), cached_register, kWRegSize);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void RegExpMacroAssemblerA64::ReadStackPointerFromRegister(int reg) {
+ Register read_from = GetRegister(reg, w10);
+ __ Ldr(x11, MemOperand(frame_pointer(), kStackBase));
+ __ Add(backtrack_stackpointer(), x11, Operand(read_from, SXTW));
+}
+
+
+void RegExpMacroAssemblerA64::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Cmp(current_input_offset(), -by * char_size());
+ __ B(ge, &after_position);
+ __ Mov(current_input_offset(), -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ Bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerA64::SetRegister(int register_index, int to) {
+ ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ Register set_to = wzr;
+ if (to != 0) {
+ set_to = w10;
+ __ Mov(set_to, to);
+ }
+ StoreRegister(register_index, set_to);
+}
+
+
+bool RegExpMacroAssemblerA64::Succeed() {
+ __ B(&success_label_);
+ return global();
+}
+
+
+void RegExpMacroAssemblerA64::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ Register position = current_input_offset();
+ if (cp_offset != 0) {
+ position = w10;
+ __ Add(position, current_input_offset(), cp_offset * char_size());
+ }
+ StoreRegister(reg, position);
+}
+
+
+void RegExpMacroAssemblerA64::ClearRegisters(int reg_from, int reg_to) {
+ ASSERT(reg_from <= reg_to);
+ int num_registers = reg_to - reg_from + 1;
+
+ // If the first capture register is cached in a hardware register but not
+ // aligned on a 64-bit one, we need to clear the first one specifically.
+ if ((reg_from < kNumCachedRegisters) && ((reg_from % 2) != 0)) {
+ StoreRegister(reg_from, non_position_value());
+ num_registers--;
+ reg_from++;
+ }
+
+ // Clear cached registers in pairs as far as possible.
+ while ((num_registers >= 2) && (reg_from < kNumCachedRegisters)) {
+ ASSERT(GetRegisterState(reg_from) == CACHED_LSW);
+ __ Mov(GetCachedRegister(reg_from), twice_non_position_value());
+ reg_from += 2;
+ num_registers -= 2;
+ }
+
+ if ((num_registers % 2) == 1) {
+ StoreRegister(reg_from, non_position_value());
+ num_registers--;
+ reg_from++;
+ }
+
+ if (num_registers > 0) {
+ // If there are some remaining registers, they are stored on the stack.
+ ASSERT(reg_from >= kNumCachedRegisters);
+
+ // Move down the indexes of the registers on stack to get the correct offset
+ // in memory.
+ reg_from -= kNumCachedRegisters;
+ reg_to -= kNumCachedRegisters;
+ // We should not unroll the loop for less than 2 registers.
+ STATIC_ASSERT(kNumRegistersToUnroll > 2);
+ // We position the base pointer to (reg_from + 1).
+ int base_offset = kFirstRegisterOnStack -
+ kWRegSizeInBytes - (kWRegSizeInBytes * reg_from);
+ if (num_registers > kNumRegistersToUnroll) {
+ Register base = x10;
+ __ Add(base, frame_pointer(), base_offset);
+
+ Label loop;
+ __ Mov(x11, num_registers);
+ __ Bind(&loop);
+ __ Str(twice_non_position_value(),
+ MemOperand(base, -kPointerSize, PostIndex));
+ __ Sub(x11, x11, 2);
+ __ Cbnz(x11, &loop);
+ } else {
+ for (int i = reg_from; i <= reg_to; i += 2) {
+ __ Str(twice_non_position_value(),
+ MemOperand(frame_pointer(), base_offset));
+ base_offset -= kWRegSizeInBytes * 2;
+ }
+ }
+ }
+}
+
+
+void RegExpMacroAssemblerA64::WriteStackPointerToRegister(int reg) {
+ __ Ldr(x10, MemOperand(frame_pointer(), kStackBase));
+ __ Sub(x10, backtrack_stackpointer(), x10);
+ if (masm_->emit_debug_code()) {
+ __ Cmp(x10, Operand(w10, SXTW));
+ // The stack offset needs to fit in a W register.
+ __ Check(eq, kOffsetOutOfRange);
+ }
+ StoreRegister(reg, w10);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return *reinterpret_cast<T*>(re_frame + frame_offset);
+}
+
+
+int RegExpMacroAssemblerA64::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame,
+ int start_offset,
+ const byte** input_start,
+ const byte** input_end) {
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
+ // Prepare for possible GC.
+ HandleScope handles(isolate);
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInput));
+
+ // Current string.
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+
+ ASSERT(re_code->instruction_start() <= *return_address);
+ ASSERT(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+
+ if (*code_handle != re_code) { // Return address no longer valid
+ int delta = code_handle->address() - re_code->address();
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ Handle<String> subject_tmp = subject;
+ int slice_offset = 0;
+
+ // Extract the underlying string and the slice offset.
+ if (StringShape(*subject_tmp).IsCons()) {
+ subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+ } else if (StringShape(*subject_tmp).IsSliced()) {
+ SlicedString* slice = SlicedString::cast(*subject_tmp);
+ subject_tmp = Handle<String>(slice->parent());
+ slice_offset = slice->offset();
+ }
+
+ // String might have changed.
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ // If we changed between an ASCII and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ ASSERT(StringShape(*subject_tmp).IsSequential() ||
+ StringShape(*subject_tmp).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = *input_start;
+
+ // Find the current start address of the same character at the current string
+ // position.
+ const byte* new_address = StringCharacterPosition(*subject_tmp,
+ start_offset + slice_offset);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = *input_end;
+ int byte_length = static_cast<int>(end_address - start_address);
+ frame_entry<const String*>(re_frame, kInput) = *subject;
+ *input_start = new_address;
+ *input_end = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInput) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInput) = *subject;
+ }
+
+ return 0;
+}
+
+
+void RegExpMacroAssemblerA64::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ CompareAndBranchOrBacktrack(current_input_offset(),
+ -cp_offset * char_size(),
+ ge,
+ on_outside_input);
+}
+
+
+bool RegExpMacroAssemblerA64::CanReadUnaligned() {
+ // TODO(pielan): See whether or not we should disable unaligned accesses.
+ return !slow_safe();
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerA64::CallCheckStackGuardState(Register scratch) {
+ // Allocate space on the stack to store the return address. The
+ // CheckStackGuardState C++ function will override it if the code
+ // moved. Allocate extra space for 2 arguments passed by pointers.
+ // AAPCS64 requires the stack to be 16 byte aligned.
+ int alignment = masm_->ActivationFrameAlignment();
+ ASSERT_EQ(alignment % 16, 0);
+ int align_mask = (alignment / kXRegSizeInBytes) - 1;
+ int xreg_to_claim = (3 + align_mask) & ~align_mask;
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Claim(xreg_to_claim);
+
+ // CheckStackGuardState needs the end and start addresses of the input string.
+ __ Poke(input_end(), 2 * kPointerSize);
+ __ Add(x5, csp, 2 * kPointerSize);
+ __ Poke(input_start(), kPointerSize);
+ __ Add(x4, csp, kPointerSize);
+
+ __ Mov(w3, start_offset());
+ // RegExp code frame pointer.
+ __ Mov(x2, frame_pointer());
+ // Code* of self.
+ __ Mov(x1, Operand(masm_->CodeObject()));
+
+ // We need to pass a pointer to the return address as first argument.
+ // The DirectCEntry stub will place the return address on the stack before
+ // calling so the stack pointer will point to it.
+ __ Mov(x0, csp);
+
+ ExternalReference check_stack_guard_state =
+ ExternalReference::re_check_stack_guard_state(isolate());
+ __ Mov(scratch, Operand(check_stack_guard_state));
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm_, scratch);
+
+ // The input string may have been moved in memory, we need to reload it.
+ __ Peek(input_start(), kPointerSize);
+ __ Peek(input_end(), 2 * kPointerSize);
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Drop(xreg_to_claim);
+
+ // Reload the Code pointer.
+ __ Mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+void RegExpMacroAssemblerA64::BranchOrBacktrack(Condition condition,
+ Label* to) {
+ if (condition == al) { // Unconditional.
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ B(to);
+ return;
+ }
+ if (to == NULL) {
+ to = &backtrack_label_;
+ }
+ // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
+ Condition inverted_condition = InvertCondition(condition);
+ Label no_branch;
+ __ B(inverted_condition, &no_branch);
+ __ B(to);
+ __ Bind(&no_branch);
+}
+
+void RegExpMacroAssemblerA64::CompareAndBranchOrBacktrack(Register reg,
+ int immediate,
+ Condition condition,
+ Label* to) {
+ if ((immediate == 0) && ((condition == eq) || (condition == ne))) {
+ if (to == NULL) {
+ to = &backtrack_label_;
+ }
+ // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
+ Label no_branch;
+ if (condition == eq) {
+ __ Cbnz(reg, &no_branch);
+ } else {
+ __ Cbz(reg, &no_branch);
+ }
+ __ B(to);
+ __ Bind(&no_branch);
+ } else {
+ __ Cmp(reg, immediate);
+ BranchOrBacktrack(condition, to);
+ }
+}
+
+
+void RegExpMacroAssemblerA64::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ Mov(x10, Operand(stack_limit));
+ __ Ldr(x10, MemOperand(x10));
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Cmp(csp, x10);
+ CallIf(&check_preempt_label_, ls);
+}
+
+
+void RegExpMacroAssemblerA64::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit(isolate());
+ __ Mov(x10, Operand(stack_limit));
+ __ Ldr(x10, MemOperand(x10));
+ __ Cmp(backtrack_stackpointer(), x10);
+ CallIf(&stack_overflow_label_, ls);
+}
+
+
+void RegExpMacroAssemblerA64::Push(Register source) {
+ ASSERT(source.Is32Bits());
+ ASSERT(!source.is(backtrack_stackpointer()));
+ __ Str(source,
+ MemOperand(backtrack_stackpointer(),
+ -static_cast<int>(kWRegSizeInBytes),
+ PreIndex));
+}
+
+
+void RegExpMacroAssemblerA64::Pop(Register target) {
+ ASSERT(target.Is32Bits());
+ ASSERT(!target.is(backtrack_stackpointer()));
+ __ Ldr(target,
+ MemOperand(backtrack_stackpointer(), kWRegSizeInBytes, PostIndex));
+}
+
+
+Register RegExpMacroAssemblerA64::GetCachedRegister(int register_index) {
+ ASSERT(register_index < kNumCachedRegisters);
+ return Register::Create(register_index / 2, kXRegSize);
+}
+
+
+Register RegExpMacroAssemblerA64::GetRegister(int register_index,
+ Register maybe_result) {
+ ASSERT(maybe_result.Is32Bits());
+ ASSERT(register_index >= 0);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ Register result;
+ RegisterState register_state = GetRegisterState(register_index);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(maybe_result, register_location(register_index));
+ result = maybe_result;
+ break;
+ case CACHED_LSW:
+ result = GetCachedRegister(register_index).W();
+ break;
+ case CACHED_MSW:
+ __ Lsr(maybe_result.X(), GetCachedRegister(register_index), kWRegSize);
+ result = maybe_result;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ ASSERT(result.Is32Bits());
+ return result;
+}
+
+
+void RegExpMacroAssemblerA64::StoreRegister(int register_index,
+ Register source) {
+ ASSERT(source.Is32Bits());
+ ASSERT(register_index >= 0);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+
+ Register cached_register;
+ RegisterState register_state = GetRegisterState(register_index);
+ switch (register_state) {
+ case STACKED:
+ __ Str(source, register_location(register_index));
+ break;
+ case CACHED_LSW:
+ cached_register = GetCachedRegister(register_index);
+ if (!source.Is(cached_register.W())) {
+ __ Bfi(cached_register, source.X(), 0, kWRegSize);
+ }
+ break;
+ case CACHED_MSW:
+ cached_register = GetCachedRegister(register_index);
+ __ Bfi(cached_register, source.X(), kWRegSize, kWRegSize);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void RegExpMacroAssemblerA64::CallIf(Label* to, Condition condition) {
+ Label skip_call;
+ if (condition != al) __ B(&skip_call, InvertCondition(condition));
+ __ Bl(to);
+ __ Bind(&skip_call);
+}
+
+
+void RegExpMacroAssemblerA64::RestoreLinkRegister() {
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Pop(lr, xzr);
+ __ Add(lr, lr, Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerA64::SaveLinkRegister() {
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Sub(lr, lr, Operand(masm_->CodeObject()));
+ __ Push(xzr, lr);
+}
+
+
+MemOperand RegExpMacroAssemblerA64::register_location(int register_index) {
+ ASSERT(register_index < (1<<30));
+ ASSERT(register_index >= kNumCachedRegisters);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ register_index -= kNumCachedRegisters;
+ int offset = kFirstRegisterOnStack - register_index * kWRegSizeInBytes;
+ return MemOperand(frame_pointer(), offset);
+}
+
+MemOperand RegExpMacroAssemblerA64::capture_location(int register_index,
+ Register scratch) {
+ ASSERT(register_index < (1<<30));
+ ASSERT(register_index < num_saved_registers_);
+ ASSERT(register_index >= kNumCachedRegisters);
+ ASSERT_EQ(register_index % 2, 0);
+ register_index -= kNumCachedRegisters;
+ int offset = kFirstCaptureOnStack - register_index * kWRegSizeInBytes;
+ // capture_location is used with Stp instructions to load/store 2 registers.
+ // The immediate field in the encoding is limited to 7 bits (signed).
+ if (is_int7(offset)) {
+ return MemOperand(frame_pointer(), offset);
+ } else {
+ __ Add(scratch, frame_pointer(), offset);
+ return MemOperand(scratch);
+ }
+}
+
+void RegExpMacroAssemblerA64::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ Register offset = current_input_offset();
+
+ // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
+ // and the operating system running on the target allow it.
+ // If unaligned load/stores are not supported then this function must only
+ // be used to load a single character at a time.
+
+ // ARMv8 supports unaligned accesses but V8 or the kernel can decide to
+ // disable it.
+ // TODO(pielan): See whether or not we should disable unaligned accesses.
+ if (!CanReadUnaligned()) {
+ ASSERT(characters == 1);
+ }
+
+ if (cp_offset != 0) {
+ if (masm_->emit_debug_code()) {
+ __ Mov(x10, cp_offset * char_size());
+ __ Add(x10, x10, Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, Operand(w10, SXTW));
+ // The offset needs to fit in a W register.
+ __ Check(eq, kOffsetOutOfRange);
+ } else {
+ __ Add(w10, current_input_offset(), cp_offset * char_size());
+ }
+ offset = w10;
+ }
+
+ if (mode_ == ASCII) {
+ if (characters == 4) {
+ __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else if (characters == 2) {
+ __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else {
+ ASSERT(characters == 1);
+ __ Ldrb(current_character(), MemOperand(input_end(), offset, SXTW));
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ if (characters == 2) {
+ __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else {
+ ASSERT(characters == 1);
+ __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
+ }
+ }
+}
+
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/regexp-macro-assembler-a64.h b/deps/v8/src/a64/regexp-macro-assembler-a64.h
new file mode 100644
index 0000000000..0f6b44b9fe
--- /dev/null
+++ b/deps/v8/src/a64/regexp-macro-assembler-a64.h
@@ -0,0 +1,315 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
+#define V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
+
+#include "a64/assembler-a64.h"
+#include "a64/assembler-a64-inl.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerA64: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerA64(Mode mode, int registers_to_save, Zone* zone);
+ virtual ~RegExpMacroAssemblerA64();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(unsigned c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ virtual void CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(uc16 from,
+ uc16 to,
+ Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from,
+ uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame,
+ int start_offset,
+ const byte** input_start,
+ const byte** input_end);
+
+ private:
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Callee-saved registers x19-x29, where x29 is the old frame pointer.
+ static const int kCalleeSavedRegisters = 0;
+ // Return address.
+ // It is placed above the 11 callee-saved registers.
+ static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
+ static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
+ // Stack parameter placed by caller.
+ static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kDirectCall = kCalleeSavedRegisters - kPointerSize;
+ static const int kStackBase = kDirectCall - kPointerSize;
+ static const int kOutputSize = kStackBase - kPointerSize;
+ static const int kInput = kOutputSize - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessCounter = kInput - kPointerSize;
+ // First position register address on the stack. Following positions are
+ // below it. A position is a 32 bit value.
+ static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSizeInBytes;
+ // A capture is a 64 bit value holding two position.
+ static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSizeInBytes;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // When initializing registers to a non-position value we can unroll
+ // the loop. Set the limit of registers to unroll.
+ static const int kNumRegistersToUnroll = 16;
+
+ // We are using x0 to x7 as a register cache. Each hardware register must
+ // contain one capture, that is two 32 bit registers. We can cache at most
+ // 16 registers.
+ static const int kNumCachedRegisters = 16;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // Location of a 32 bit position register.
+ MemOperand register_location(int register_index);
+
+ // Location of a 64 bit capture, combining two position registers.
+ MemOperand capture_location(int register_index, Register scratch);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ Register current_input_offset() { return w21; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ Register current_character() { return w22; }
+
+ // Register holding address of the end of the input string.
+ Register input_end() { return x25; }
+
+ // Register holding address of the start of the input string.
+ Register input_start() { return x26; }
+
+ // Register holding the offset from the start of the string where we should
+ // start matching.
+ Register start_offset() { return w27; }
+
+ // Pointer to the output array's first element.
+ Register output_array() { return x28; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ Register backtrack_stackpointer() { return x23; }
+
+ // Register holding pointer to the current code object.
+ Register code_pointer() { return x20; }
+
+ // Register holding the value used for clearing capture registers.
+ Register non_position_value() { return w24; }
+ // The top 32 bit of this register is used to store this value
+ // twice. This is used for clearing more than one register at a time.
+ Register twice_non_position_value() { return x24; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument)
+ int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Condition condition, Label* to);
+
+ // Compares reg against immmediate before calling BranchOrBacktrack.
+ // It makes use of the Cbz and Cbnz instructions.
+ void CompareAndBranchOrBacktrack(Register reg,
+ int immediate,
+ Condition condition,
+ Label* to);
+
+ inline void CallIf(Label* to, Condition condition);
+
+ // Save and restore the link register on the stack in a way that
+ // is GC-safe.
+ inline void SaveLinkRegister();
+ inline void RestoreLinkRegister();
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ // This state indicates where the register actually is.
+ enum RegisterState {
+ STACKED, // Resides in memory.
+ CACHED_LSW, // Least Significant Word of a 64 bit hardware register.
+ CACHED_MSW // Most Significant Word of a 64 bit hardware register.
+ };
+
+ RegisterState GetRegisterState(int register_index) {
+ ASSERT(register_index >= 0);
+ if (register_index >= kNumCachedRegisters) {
+ return STACKED;
+ } else {
+ if ((register_index % 2) == 0) {
+ return CACHED_LSW;
+ } else {
+ return CACHED_MSW;
+ }
+ }
+ }
+
+ // Store helper that takes the state of the register into account.
+ inline void StoreRegister(int register_index, Register source);
+
+ // Returns a hardware W register that holds the value of the capture
+ // register.
+ //
+ // This function will try to use an existing cache register (w0-w7) for the
+ // result. Otherwise, it will load the value into maybe_result.
+ //
+ // If the returned register is anything other than maybe_result, calling code
+ // must not write to it.
+ inline Register GetRegister(int register_index, Register maybe_result);
+
+ // Returns the harware register (x0-x7) holding the value of the capture
+ // register.
+ // This assumes that the state of the register is not STACKED.
+ inline Register GetCachedRegister(int register_index);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1)
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+};
+
+#endif // V8_INTERPRETED_REGEXP
+
+
+}} // namespace v8::internal
+
+#endif // V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
diff --git a/deps/v8/src/a64/simulator-a64.cc b/deps/v8/src/a64/simulator-a64.cc
new file mode 100644
index 0000000000..014b71477d
--- /dev/null
+++ b/deps/v8/src/a64/simulator-a64.cc
@@ -0,0 +1,3414 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include <cmath>
+#include <cstdarg>
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "disasm.h"
+#include "assembler.h"
+#include "a64/simulator-a64.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#if defined(USE_SIMULATOR)
+
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
+#define SScanF sscanf // NOLINT
+
+
+// This is basically the same as PrintF, with a guard for FLAG_trace_sim.
+void PRINTF_CHECKING TraceSim(const char* format, ...) {
+ if (FLAG_trace_sim) {
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+ }
+}
+
+
+const Instruction* Simulator::kEndOfSimAddress = NULL;
+
+
+void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
+ int width = msb - lsb + 1;
+ ASSERT(is_uintn(bits, width) || is_intn(bits, width));
+
+ bits <<= lsb;
+ uint32_t mask = ((1 << width) - 1) << lsb;
+ ASSERT((mask & write_ignore_mask_) == 0);
+
+ value_ = (value_ & ~mask) | (bits & mask);
+}
+
+
+SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
+ case FPCR:
+ return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
+ default:
+ UNREACHABLE();
+ return SimSystemRegister();
+ }
+}
+
+
+void Simulator::Initialize(Isolate* isolate) {
+ if (isolate->simulator_initialized()) return;
+ isolate->set_simulator_initialized(true);
+ ExternalReference::set_redirector(isolate, &RedirectExternalReference);
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+ Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ ASSERT(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
+ if (sim == NULL) {
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
+ sim = new Simulator(new Decoder(), isolate);
+ isolate_data->set_simulator(sim);
+ }
+ return sim;
+}
+
+
+void Simulator::CallVoid(byte* entry, CallArgument* args) {
+ int index_x = 0;
+ int index_d = 0;
+
+ std::vector<int64_t> stack_args(0);
+ for (int i = 0; !args[i].IsEnd(); i++) {
+ CallArgument arg = args[i];
+ if (arg.IsX() && (index_x < 8)) {
+ set_xreg(index_x++, arg.bits());
+ } else if (arg.IsD() && (index_d < 8)) {
+ set_dreg_bits(index_d++, arg.bits());
+ } else {
+ ASSERT(arg.IsD() || arg.IsX());
+ stack_args.push_back(arg.bits());
+ }
+ }
+
+ // Process stack arguments, and make sure the stack is suitably aligned.
+ uintptr_t original_stack = sp();
+ uintptr_t entry_stack = original_stack -
+ stack_args.size() * sizeof(stack_args[0]);
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ char * stack = reinterpret_cast<char*>(entry_stack);
+ std::vector<int64_t>::const_iterator it;
+ for (it = stack_args.begin(); it != stack_args.end(); it++) {
+ memcpy(stack, &(*it), sizeof(*it));
+ stack += sizeof(*it);
+ }
+
+ ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack);
+ set_sp(entry_stack);
+
+ // Call the generated code.
+ set_pc(entry);
+ set_lr(kEndOfSimAddress);
+ CheckPCSComplianceAndRun();
+
+ set_sp(original_stack);
+}
+
+
+int64_t Simulator::CallInt64(byte* entry, CallArgument* args) {
+ CallVoid(entry, args);
+ return xreg(0);
+}
+
+
+double Simulator::CallDouble(byte* entry, CallArgument* args) {
+ CallVoid(entry, args);
+ return dreg(0);
+}
+
+
+int64_t Simulator::CallJS(byte* entry,
+ byte* function_entry,
+ JSFunction* func,
+ Object* revc,
+ int64_t argc,
+ Object*** argv) {
+ CallArgument args[] = {
+ CallArgument(function_entry),
+ CallArgument(func),
+ CallArgument(revc),
+ CallArgument(argc),
+ CallArgument(argv),
+ CallArgument::End()
+ };
+ return CallInt64(entry, args);
+}
+
+int64_t Simulator::CallRegExp(byte* entry,
+ String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate) {
+ CallArgument args[] = {
+ CallArgument(input),
+ CallArgument(start_offset),
+ CallArgument(input_start),
+ CallArgument(input_end),
+ CallArgument(output),
+ CallArgument(output_size),
+ CallArgument(stack_base),
+ CallArgument(direct_call),
+ CallArgument(return_address),
+ CallArgument(isolate),
+ CallArgument::End()
+ };
+ return CallInt64(entry, args);
+}
+
+
+void Simulator::CheckPCSComplianceAndRun() {
+#ifdef DEBUG
+ CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
+ CHECK_EQ(kNumberOfCalleeSavedFPRegisters, kCalleeSavedFP.Count());
+
+ int64_t saved_registers[kNumberOfCalleeSavedRegisters];
+ uint64_t saved_fpregisters[kNumberOfCalleeSavedFPRegisters];
+
+ CPURegList register_list = kCalleeSaved;
+ CPURegList fpregister_list = kCalleeSavedFP;
+
+ for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
+ // x31 is not a caller saved register, so no need to specify if we want
+ // the stack or zero.
+ saved_registers[i] = xreg(register_list.PopLowestIndex().code());
+ }
+ for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
+ saved_fpregisters[i] =
+ dreg_bits(fpregister_list.PopLowestIndex().code());
+ }
+ int64_t original_stack = sp();
+#endif
+ // Start the simulation!
+ Run();
+#ifdef DEBUG
+ CHECK_EQ(original_stack, sp());
+ // Check that callee-saved registers have been preserved.
+ register_list = kCalleeSaved;
+ fpregister_list = kCalleeSavedFP;
+ for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
+ CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
+ }
+ for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
+ ASSERT(saved_fpregisters[i] ==
+ dreg_bits(fpregister_list.PopLowestIndex().code()));
+ }
+
+ // Corrupt caller saved register minus the return regiters.
+
+ // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1
+ // for now .
+ register_list = kCallerSaved;
+ register_list.Remove(x0);
+ register_list.Remove(x1);
+
+ // In theory d0 to d7 can be used for return values, but V8 only uses d0
+ // for now .
+ fpregister_list = kCallerSavedFP;
+ fpregister_list.Remove(d0);
+
+ CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
+ CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
+#endif
+}
+
+
+#ifdef DEBUG
+// The least significant byte of the curruption value holds the corresponding
+// register's code.
+void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
+ if (list->type() == CPURegister::kRegister) {
+ while (!list->IsEmpty()) {
+ unsigned code = list->PopLowestIndex().code();
+ set_xreg(code, value | code);
+ }
+ } else {
+ ASSERT(list->type() == CPURegister::kFPRegister);
+ while (!list->IsEmpty()) {
+ unsigned code = list->PopLowestIndex().code();
+ set_dreg_bits(code, value | code);
+ }
+ }
+}
+
+
+void Simulator::CorruptAllCallerSavedCPURegisters() {
+ // Corrupt alters its parameter so copy them first.
+ CPURegList register_list = kCallerSaved;
+ CPURegList fpregister_list = kCallerSavedFP;
+
+ CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
+ CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
+}
+#endif
+
+
+// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
+// TODO(all): Insert a marker in the extra space allocated on the stack.
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ ASSERT(sizeof(uintptr_t) < 2 * kXRegSizeInBytes);
+ intptr_t new_sp = sp() - 2 * kXRegSizeInBytes;
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_sp(new_sp);
+ return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+ intptr_t current_sp = sp();
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ ASSERT(sizeof(uintptr_t) < 2 * kXRegSizeInBytes);
+ set_sp(current_sp + 2 * kXRegSizeInBytes);
+ return address;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+ // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
+ // pushing values.
+ // TODO(all): Increase the stack limit protection.
+
+ // The margin was decreased to 256 bytes, because we are intensively using
+ // the stack. The stack usage should decrease when our code improves. Then
+ // we can set it to 1024 again.
+ return reinterpret_cast<uintptr_t>(stack_limit_) + 256;
+}
+
+
+Simulator::Simulator(Decoder* decoder, Isolate* isolate, FILE* stream)
+ : decoder_(decoder), last_debugger_input_(NULL), log_parameters_(NO_PARAM),
+ isolate_(isolate) {
+ // Setup the decoder.
+ decoder_->AppendVisitor(this);
+
+ ResetState();
+
+ // Allocate and setup the simulator stack.
+ stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_);
+ stack_ = new byte[stack_size_];
+ stack_limit_ = stack_ + stack_protection_size_;
+ byte* tos = stack_ + stack_size_ - stack_protection_size_;
+ // The stack pointer must be 16 bytes aligned.
+ set_sp(reinterpret_cast<int64_t>(tos) & ~0xfUL);
+
+ stream_ = stream;
+ print_disasm_ = new PrintDisassembler(stream_);
+
+ if (FLAG_trace_sim) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ log_parameters_ = LOG_ALL;
+ }
+
+ // The debugger needs to disassemble code without the simulator executing an
+ // instruction, so we create a dedicated decoder.
+ disassembler_decoder_ = new Decoder();
+ disassembler_decoder_->AppendVisitor(print_disasm_);
+
+ if (FLAG_log_instruction_stats) {
+ instrument_ = new Instrument(FLAG_log_instruction_file,
+ FLAG_log_instruction_period);
+ decoder_->AppendVisitor(instrument_);
+ }
+}
+
+
+void Simulator::ResetState() {
+ // Reset the system registers.
+ nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
+ fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
+
+ // Reset registers to 0.
+ pc_ = NULL;
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ set_xreg(i, 0xbadbeef);
+ }
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ // Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
+ set_dreg_bits(i, 0x7ff000007f800001UL);
+ }
+ // Returning to address 0 exits the Simulator.
+ set_lr(kEndOfSimAddress);
+
+ // Reset debug helpers.
+ breakpoints_.empty();
+ break_on_next_= false;
+}
+
+
+Simulator::~Simulator() {
+ delete[] stack_;
+ if (FLAG_log_instruction_stats) {
+ delete instrument_;
+ }
+ delete disassembler_decoder_;
+ delete print_disasm_;
+ DeleteArray(last_debugger_input_);
+}
+
+
+void Simulator::Run() {
+ pc_modified_ = false;
+ while (pc_ != kEndOfSimAddress) {
+ ExecuteInstruction();
+ }
+}
+
+
+void Simulator::RunFrom(Instruction* start) {
+ set_pc(start);
+ Run();
+}
+
+
+void Simulator::CheckStackAlignment() {
+ // TODO(aleram): The sp alignment check to perform depends on the processor
+ // state. Check the specifications for more details.
+}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a svc (Supervisor Call) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection {
+ public:
+ Redirection(void* external_function, ExternalReference::Type type)
+ : external_function_(external_function),
+ type_(type),
+ next_(NULL) {
+ redirect_call_.SetInstructionBits(
+ HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
+ Isolate* isolate = Isolate::Current();
+ next_ = isolate->simulator_redirection();
+ // TODO(all): Simulator flush I cache
+ isolate->set_simulator_redirection(this);
+ }
+
+ void* address_of_redirect_call() {
+ return reinterpret_cast<void*>(&redirect_call_);
+ }
+
+ void* external_function() { return external_function_; }
+ ExternalReference::Type type() { return type_; }
+
+ static Redirection* Get(void* external_function,
+ ExternalReference::Type type) {
+ Isolate* isolate = Isolate::Current();
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
+ if (current->external_function_ == external_function) {
+ ASSERT_EQ(current->type(), type);
+ return current;
+ }
+ }
+ return new Redirection(external_function, type);
+ }
+
+ static Redirection* FromHltInstruction(Instruction* redirect_call) {
+ char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
+ char* addr_of_redirection =
+ addr_of_hlt - OFFSET_OF(Redirection, redirect_call_);
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ static void* ReverseRedirection(int64_t reg) {
+ Redirection* redirection =
+ FromHltInstruction(reinterpret_cast<Instruction*>(reg));
+ return redirection->external_function();
+ }
+
+ private:
+ void* external_function_;
+ Instruction redirect_call_;
+ ExternalReference::Type type_;
+ Redirection* next_;
+};
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(external_function, type);
+ return redirection->address_of_redirect_call();
+}
+
+
+const char* Simulator::xreg_names[] = {
+"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
+"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
+
+const char* Simulator::wreg_names[] = {
+"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
+"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
+"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
+"w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
+
+const char* Simulator::sreg_names[] = {
+"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
+
+const char* Simulator::dreg_names[] = {
+"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+
+const char* Simulator::vreg_names[] = {
+"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
+
+
+const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
+ ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return wreg_names[code];
+}
+
+
+const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
+ ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return xreg_names[code];
+}
+
+
+const char* Simulator::SRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return sreg_names[code];
+}
+
+
+const char* Simulator::DRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return dreg_names[code];
+}
+
+
+const char* Simulator::VRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return vreg_names[code];
+}
+
+
+int Simulator::CodeFromName(const char* name) {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if ((strcmp(xreg_names[i], name) == 0) ||
+ (strcmp(wreg_names[i], name) == 0)) {
+ return i;
+ }
+ }
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if ((strcmp(vreg_names[i], name) == 0) ||
+ (strcmp(dreg_names[i], name) == 0) ||
+ (strcmp(sreg_names[i], name) == 0)) {
+ return i;
+ }
+ }
+ if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
+ return kSPRegInternalCode;
+ }
+ return -1;
+}
+
+
+// Helpers ---------------------------------------------------------------------
+int64_t Simulator::AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ int64_t src1,
+ int64_t src2,
+ int64_t carry_in) {
+ ASSERT((carry_in == 0) || (carry_in == 1));
+ ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
+
+ uint64_t u1, u2;
+ int64_t result;
+ int64_t signed_sum = src1 + src2 + carry_in;
+
+ uint32_t N, Z, C, V;
+
+ if (reg_size == kWRegSize) {
+ u1 = static_cast<uint64_t>(src1) & kWRegMask;
+ u2 = static_cast<uint64_t>(src2) & kWRegMask;
+
+ result = signed_sum & kWRegMask;
+ // Compute the C flag by comparing the sum to the max unsigned integer.
+ C = ((kWMaxUInt - u1) < (u2 + carry_in)) ||
+ ((kWMaxUInt - u1 - carry_in) < u2);
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ int64_t s_src1 = src1 << (kXRegSize - kWRegSize);
+ int64_t s_src2 = src2 << (kXRegSize - kWRegSize);
+ int64_t s_result = result << (kXRegSize - kWRegSize);
+ V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0);
+
+ } else {
+ u1 = static_cast<uint64_t>(src1);
+ u2 = static_cast<uint64_t>(src2);
+
+ result = signed_sum;
+ // Compute the C flag by comparing the sum to the max unsigned integer.
+ C = ((kXMaxUInt - u1) < (u2 + carry_in)) ||
+ ((kXMaxUInt - u1 - carry_in) < u2);
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0);
+ }
+
+ N = CalcNFlag(result, reg_size);
+ Z = CalcZFlag(result);
+
+ if (set_flags) {
+ nzcv().SetN(N);
+ nzcv().SetZ(Z);
+ nzcv().SetC(C);
+ nzcv().SetV(V);
+ }
+ return result;
+}
+
+
+int64_t Simulator::ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount) {
+ if (amount == 0) {
+ return value;
+ }
+ int64_t mask = reg_size == kXRegSize ? kXRegMask : kWRegMask;
+ switch (shift_type) {
+ case LSL:
+ return (value << amount) & mask;
+ case LSR:
+ return static_cast<uint64_t>(value) >> amount;
+ case ASR: {
+ // Shift used to restore the sign.
+ unsigned s_shift = kXRegSize - reg_size;
+ // Value with its sign restored.
+ int64_t s_value = (value << s_shift) >> s_shift;
+ return (s_value >> amount) & mask;
+ }
+ case ROR: {
+ if (reg_size == kWRegSize) {
+ value &= kWRegMask;
+ }
+ return (static_cast<uint64_t>(value) >> amount) |
+ ((value & ((1L << amount) - 1L)) << (reg_size - amount));
+ }
+ default:
+ UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int64_t Simulator::ExtendValue(unsigned reg_size,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift) {
+ switch (extend_type) {
+ case UXTB:
+ value &= kByteMask;
+ break;
+ case UXTH:
+ value &= kHalfWordMask;
+ break;
+ case UXTW:
+ value &= kWordMask;
+ break;
+ case SXTB:
+ value = (value << 56) >> 56;
+ break;
+ case SXTH:
+ value = (value << 48) >> 48;
+ break;
+ case SXTW:
+ value = (value << 32) >> 32;
+ break;
+ case UXTX:
+ case SXTX:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ int64_t mask = (reg_size == kXRegSize) ? kXRegMask : kWRegMask;
+ return (value << left_shift) & mask;
+}
+
+
+void Simulator::FPCompare(double val0, double val1) {
+ AssertSupportedFPCR();
+
+ // TODO(jbramley): This assumes that the C++ implementation handles
+ // comparisons in the way that we expect (as per AssertSupportedFPCR()).
+ if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
+ nzcv().SetRawValue(FPUnorderedFlag);
+ } else if (val0 < val1) {
+ nzcv().SetRawValue(FPLessThanFlag);
+ } else if (val0 > val1) {
+ nzcv().SetRawValue(FPGreaterThanFlag);
+ } else if (val0 == val1) {
+ nzcv().SetRawValue(FPEqualFlag);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void Simulator::SetBreakpoint(Instruction* location) {
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if (breakpoints_.at(i).location == location) {
+ PrintF("Existing breakpoint at %p was %s\n",
+ reinterpret_cast<void*>(location),
+ breakpoints_.at(i).enabled ? "disabled" : "enabled");
+ breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
+ return;
+ }
+ }
+ Breakpoint new_breakpoint = {location, true};
+ breakpoints_.push_back(new_breakpoint);
+ PrintF("Set a breakpoint at %p\n", reinterpret_cast<void*>(location));
+}
+
+
+void Simulator::ListBreakpoints() {
+ PrintF("Breakpoints:\n");
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ PrintF("%p : %s\n",
+ reinterpret_cast<void*>(breakpoints_.at(i).location),
+ breakpoints_.at(i).enabled ? "enabled" : "disabled");
+ }
+}
+
+
+void Simulator::CheckBreakpoints() {
+ bool hit_a_breakpoint = false;
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if ((breakpoints_.at(i).location == pc_) &&
+ breakpoints_.at(i).enabled) {
+ hit_a_breakpoint = true;
+ // Disable this breakpoint.
+ breakpoints_.at(i).enabled = false;
+ }
+ }
+ if (hit_a_breakpoint) {
+ PrintF("Hit and disabled a breakpoint at %p.\n",
+ reinterpret_cast<void*>(pc_));
+ Debug();
+ }
+}
+
+
+void Simulator::CheckBreakNext() {
+ // If the current instruction is a BL, insert a breakpoint just after it.
+ if (break_on_next_ && pc_->IsBranchAndLinkToRegister()) {
+ SetBreakpoint(pc_->NextInstruction());
+ break_on_next_ = false;
+ }
+}
+
+
+void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
+ Instruction* end = start->InstructionAtOffset(count * kInstructionSize);
+ for (Instruction* pc = start; pc < end; pc = pc->NextInstruction()) {
+ disassembler_decoder_->Decode(pc);
+ }
+}
+
+
+void Simulator::PrintSystemRegisters(bool print_all) {
+ static bool first_run = true;
+
+ // Define some colour codes to use for the register dump.
+ // TODO(jbramley): Find a more elegant way of defining these.
+ char const * const clr_normal = (FLAG_log_colour) ? ("\033[m") : ("");
+ char const * const clr_flag_name = (FLAG_log_colour) ? ("\033[1;30m") : ("");
+ char const * const clr_flag_value = (FLAG_log_colour) ? ("\033[1;37m") : ("");
+
+ static SimSystemRegister last_nzcv;
+ if (print_all || first_run || (last_nzcv.RawValue() != nzcv().RawValue())) {
+ fprintf(stream_, "# %sFLAGS: %sN:%d Z:%d C:%d V:%d%s\n",
+ clr_flag_name,
+ clr_flag_value,
+ N(), Z(), C(), V(),
+ clr_normal);
+ }
+ last_nzcv = nzcv();
+
+ static SimSystemRegister last_fpcr;
+ if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) {
+ static const char * rmode[] = {
+ "0b00 (Round to Nearest)",
+ "0b01 (Round towards Plus Infinity)",
+ "0b10 (Round towards Minus Infinity)",
+ "0b11 (Round towards Zero)"
+ };
+ ASSERT(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0])));
+ fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
+ clr_flag_name,
+ clr_flag_value,
+ fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
+ clr_normal);
+ }
+ last_fpcr = fpcr();
+
+ first_run = false;
+}
+
+
+void Simulator::PrintRegisters(bool print_all_regs) {
+ static bool first_run = true;
+ static int64_t last_regs[kNumberOfRegisters];
+
+ // Define some colour codes to use for the register dump.
+ // TODO(jbramley): Find a more elegant way of defining these.
+ char const * const clr_normal = (FLAG_log_colour) ? ("\033[m") : ("");
+ char const * const clr_reg_name = (FLAG_log_colour) ? ("\033[1;34m") : ("");
+ char const * const clr_reg_value = (FLAG_log_colour) ? ("\033[1;36m") : ("");
+
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (print_all_regs || first_run ||
+ (last_regs[i] != xreg(i, Reg31IsStackPointer))) {
+ fprintf(stream_,
+ "# %s%4s:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name,
+ XRegNameForCode(i, Reg31IsStackPointer),
+ clr_reg_value,
+ xreg(i, Reg31IsStackPointer),
+ clr_normal);
+ }
+ // Cache the new register value so the next run can detect any changes.
+ last_regs[i] = xreg(i, Reg31IsStackPointer);
+ }
+ first_run = false;
+}
+
+
+void Simulator::PrintFPRegisters(bool print_all_regs) {
+ static bool first_run = true;
+ static uint64_t last_regs[kNumberOfFPRegisters];
+
+ // Define some colour codes to use for the register dump.
+ // TODO(jbramley): Find a more elegant way of defining these.
+ char const * const clr_normal = (FLAG_log_colour) ? ("\033[m") : ("");
+ char const * const clr_reg_name = (FLAG_log_colour) ? ("\033[1;33m") : ("");
+ char const * const clr_reg_value = (FLAG_log_colour) ? ("\033[1;35m") : ("");
+
+ // Print as many rows of registers as necessary, keeping each individual
+ // register in the same column each time (to make it easy to visually scan
+ // for changes).
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if (print_all_regs || first_run || (last_regs[i] != dreg_bits(i))) {
+ fprintf(stream_,
+ "# %s %4s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
+ clr_reg_name,
+ VRegNameForCode(i),
+ clr_reg_value,
+ dreg_bits(i),
+ clr_normal,
+ clr_reg_name,
+ DRegNameForCode(i),
+ clr_reg_value,
+ dreg(i),
+ clr_reg_name,
+ SRegNameForCode(i),
+ clr_reg_value,
+ sreg(i),
+ clr_normal);
+ }
+ // Cache the new register value so the next run can detect any changes.
+ last_regs[i] = dreg_bits(i);
+ }
+ first_run = false;
+}
+
+
+void Simulator::PrintProcessorState() {
+ PrintSystemRegisters();
+ PrintRegisters();
+ PrintFPRegisters();
+}
+
+
+void Simulator::PrintWrite(uint8_t* address,
+ uint64_t value,
+ unsigned num_bytes) {
+ // Define some color codes to use for memory logging.
+ const char* const clr_normal = (FLAG_log_colour) ? ("\033[m")
+ : ("");
+ const char* const clr_memory_value = (FLAG_log_colour) ? ("\033[1;32m")
+ : ("");
+ const char* const clr_memory_address = (FLAG_log_colour) ? ("\033[32m")
+ : ("");
+
+ // The template is "# value -> address". The template is not directly used
+ // in the printf since compilers tend to struggle with the parametrized
+ // width (%0*).
+ const char* format = "# %s0x%0*" PRIx64 "%s -> %s0x%016" PRIx64 "%s\n";
+ fprintf(stream_,
+ format,
+ clr_memory_value,
+ num_bytes * 2, // The width in hexa characters.
+ value,
+ clr_normal,
+ clr_memory_address,
+ address,
+ clr_normal);
+}
+
+
+// Visitors---------------------------------------------------------------------
+
+void Simulator::VisitUnimplemented(Instruction* instr) {
+ fprintf(stream_, "Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<void*>(instr), instr->InstructionBits());
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitUnallocated(Instruction* instr) {
+ fprintf(stream_, "Unallocated instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<void*>(instr), instr->InstructionBits());
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitPCRelAddressing(Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR:
+ set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
+ break;
+ case ADRP: // Not implemented in the assembler.
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranch(Instruction* instr) {
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case BL:
+ set_lr(instr->NextInstruction());
+ // Fall through.
+ case B:
+ set_pc(instr->ImmPCOffsetTarget());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitConditionalBranch(Instruction* instr) {
+ ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
+ if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ Instruction* target = reg<Instruction*>(instr->Rn());
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BLR: {
+ set_lr(instr->NextInstruction());
+ if (instr->Rn() == 31) {
+ // BLR XZR is used as a guard for the constant pool. We should never hit
+ // this, but if we do trap to allow debugging.
+ Debug();
+ }
+ // Fall through.
+ }
+ case BR:
+ case RET: set_pc(target); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitTestBranch(Instruction* instr) {
+ unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40();
+ bool take_branch = ((xreg(instr->Rt()) & (1UL << bit_pos)) == 0);
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: break;
+ case TBNZ: take_branch = !take_branch; break;
+ default: UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitCompareBranch(Instruction* instr) {
+ unsigned rt = instr->Rt();
+ bool take_branch = false;
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w: take_branch = (wreg(rt) == 0); break;
+ case CBZ_x: take_branch = (xreg(rt) == 0); break;
+ case CBNZ_w: take_branch = (wreg(rt) != 0); break;
+ case CBNZ_x: take_branch = (xreg(rt) != 0); break;
+ default: UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::AddSubHelper(Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ bool set_flags = instr->FlagsUpdate();
+ int64_t new_val = 0;
+ Instr operation = instr->Mask(AddSubOpMask);
+
+ switch (operation) {
+ case ADD:
+ case ADDS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ op2);
+ break;
+ }
+ case SUB:
+ case SUBS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ ~op2,
+ 1);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ set_reg(reg_size, instr->Rd(), new_val, instr->RdMode());
+}
+
+
+void Simulator::VisitAddSubShifted(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = ShiftOperand(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Shift>(instr->ShiftDP()),
+ instr->ImmDPShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubImmediate(Instruction* instr) {
+ int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubExtended(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = ExtendValue(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Extend>(instr->ExtendMode()),
+ instr->ImmExtendShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubWithCarry(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = reg(reg_size, instr->Rm());
+ int64_t new_val;
+
+ if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
+ op2 = ~op2;
+ }
+
+ new_val = AddWithCarry(reg_size,
+ instr->FlagsUpdate(),
+ reg(reg_size, instr->Rn()),
+ op2,
+ C());
+
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitLogicalShifted(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ Shift shift_type = static_cast<Shift>(instr->ShiftDP());
+ unsigned shift_amount = instr->ImmDPShift();
+ int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type,
+ shift_amount);
+ if (instr->Mask(NOT) == NOT) {
+ op2 = ~op2;
+ }
+ LogicalHelper(instr, op2);
+}
+
+
+void Simulator::VisitLogicalImmediate(Instruction* instr) {
+ LogicalHelper(instr, instr->ImmLogical());
+}
+
+
+void Simulator::LogicalHelper(Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op1 = reg(reg_size, instr->Rn());
+ int64_t result = 0;
+ bool update_flags = false;
+
+ // Switch on the logical operation, stripping out the NOT bit, as it has a
+ // different meaning for logical immediate instructions.
+ switch (instr->Mask(LogicalOpMask & ~NOT)) {
+ case ANDS: update_flags = true; // Fall through.
+ case AND: result = op1 & op2; break;
+ case ORR: result = op1 | op2; break;
+ case EOR: result = op1 ^ op2; break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ if (update_flags) {
+ nzcv().SetN(CalcNFlag(result, reg_size));
+ nzcv().SetZ(CalcZFlag(result));
+ nzcv().SetC(0);
+ nzcv().SetV(0);
+ }
+
+ set_reg(reg_size, instr->Rd(), result, instr->RdMode());
+}
+
+
+void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ ConditionalCompareHelper(instr, reg(reg_size, instr->Rm()));
+}
+
+
+void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
+ ConditionalCompareHelper(instr, instr->ImmCondCmp());
+}
+
+
+void Simulator::ConditionalCompareHelper(Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op1 = reg(reg_size, instr->Rn());
+
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ // If the condition passes, set the status flags to the result of comparing
+ // the operands.
+ if (instr->Mask(ConditionalCompareMask) == CCMP) {
+ AddWithCarry(reg_size, true, op1, ~op2, 1);
+ } else {
+ ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
+ AddWithCarry(reg_size, true, op1, op2, 0);
+ }
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ }
+}
+
+
+void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ int offset = instr->ImmLSUnsigned() << instr->SizeLS();
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), Offset);
+}
+
+
+void Simulator::VisitLoadStorePreIndex(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
+}
+
+
+void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
+ unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
+
+ int64_t offset = ExtendValue(kXRegSize, xreg(instr->Rm()), ext,
+ shift_amount);
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::LoadStoreHelper(Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode) {
+ unsigned srcdst = instr->Rt();
+ unsigned addr_reg = instr->Rn();
+ uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
+ int num_bytes = 1 << instr->SizeLS();
+ uint8_t* stack = NULL;
+
+ // Handle the writeback for stores before the store. On a CPU the writeback
+ // and the store are atomic, but when running on the simulator it is possible
+ // to be interrupted in between. The simulator is not thread safe and V8 does
+ // not require it to be to run JavaScript therefore the profiler may sample
+ // the "simulated" CPU in the middle of load/store with writeback. The code
+ // below ensures that push operations are safe even when interrupted: the
+ // stack pointer will be decremented before adding an element to the stack.
+ if (instr->IsStore()) {
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+
+ // For store the address post writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+ }
+
+ LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break;
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break;
+ case LDRSB_w: {
+ set_wreg(srcdst, ExtendValue(kWRegSize, MemoryRead8(address), SXTB));
+ break;
+ }
+ case LDRSB_x: {
+ set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead8(address), SXTB));
+ break;
+ }
+ case LDRSH_w: {
+ set_wreg(srcdst, ExtendValue(kWRegSize, MemoryRead16(address), SXTH));
+ break;
+ }
+ case LDRSH_x: {
+ set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead16(address), SXTH));
+ break;
+ }
+ case LDRSW_x: {
+ set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead32(address), SXTW));
+ break;
+ }
+ case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
+ case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break;
+ case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break;
+ case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break;
+ default: UNIMPLEMENTED();
+ }
+
+ // Handle the writeback for loads after the load to ensure safe pop
+ // operation even when interrupted in the middle of it. The stack pointer
+ // is only updated after the load so pop(fp) will never break the invariant
+ // sp <= fp expected while walking the stack in the sampler.
+ if (instr->IsLoad()) {
+ // For loads the address pre writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+ }
+
+ // Accesses below the stack pointer (but above the platform stack limit) are
+ // not allowed in the ABI.
+ CheckMemoryAccess(address, stack);
+}
+
+
+void Simulator::VisitLoadStorePairOffset(Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::VisitLoadStorePairPreIndex(Instruction* instr) {
+ LoadStorePairHelper(instr, PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
+ LoadStorePairHelper(instr, PostIndex);
+}
+
+
+void Simulator::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::LoadStorePairHelper(Instruction* instr,
+ AddrMode addrmode) {
+ unsigned rt = instr->Rt();
+ unsigned rt2 = instr->Rt2();
+ unsigned addr_reg = instr->Rn();
+ int offset = instr->ImmLSPair() << instr->SizeLSPair();
+ uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
+ uint8_t* stack = NULL;
+
+ // Handle the writeback for stores before the store. On a CPU the writeback
+ // and the store are atomic, but when running on the simulator it is possible
+ // to be interrupted in between. The simulator is not thread safe and V8 does
+ // not require it to be to run JavaScript therefore the profiler may sample
+ // the "simulated" CPU in the middle of load/store with writeback. The code
+ // below ensures that push operations are safe even when interrupted: the
+ // stack pointer will be decremented before adding an element to the stack.
+ if (instr->IsStore()) {
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+
+ // For store the address post writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+ }
+
+ LoadStorePairOp op =
+ static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
+
+ // 'rt' and 'rt2' can only be aliased for stores.
+ ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
+
+ switch (op) {
+ case LDP_w: {
+ set_wreg(rt, MemoryRead32(address));
+ set_wreg(rt2, MemoryRead32(address + kWRegSizeInBytes));
+ break;
+ }
+ case LDP_s: {
+ set_sreg(rt, MemoryReadFP32(address));
+ set_sreg(rt2, MemoryReadFP32(address + kSRegSizeInBytes));
+ break;
+ }
+ case LDP_x: {
+ set_xreg(rt, MemoryRead64(address));
+ set_xreg(rt2, MemoryRead64(address + kXRegSizeInBytes));
+ break;
+ }
+ case LDP_d: {
+ set_dreg(rt, MemoryReadFP64(address));
+ set_dreg(rt2, MemoryReadFP64(address + kDRegSizeInBytes));
+ break;
+ }
+ case LDPSW_x: {
+ set_xreg(rt, ExtendValue(kXRegSize, MemoryRead32(address), SXTW));
+ set_xreg(rt2, ExtendValue(kXRegSize,
+ MemoryRead32(address + kWRegSizeInBytes), SXTW));
+ break;
+ }
+ case STP_w: {
+ MemoryWrite32(address, wreg(rt));
+ MemoryWrite32(address + kWRegSizeInBytes, wreg(rt2));
+ break;
+ }
+ case STP_s: {
+ MemoryWriteFP32(address, sreg(rt));
+ MemoryWriteFP32(address + kSRegSizeInBytes, sreg(rt2));
+ break;
+ }
+ case STP_x: {
+ MemoryWrite64(address, xreg(rt));
+ MemoryWrite64(address + kXRegSizeInBytes, xreg(rt2));
+ break;
+ }
+ case STP_d: {
+ MemoryWriteFP64(address, dreg(rt));
+ MemoryWriteFP64(address + kDRegSizeInBytes, dreg(rt2));
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ // Handle the writeback for loads after the load to ensure safe pop
+ // operation even when interrupted in the middle of it. The stack pointer
+ // is only updated after the load so pop(fp) will never break the invariant
+ // sp <= fp expected while walking the stack in the sampler.
+ if (instr->IsLoad()) {
+ // For loads the address pre writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+ }
+
+ // Accesses below the stack pointer (but above the platform stack limit) are
+ // not allowed in the ABI.
+ CheckMemoryAccess(address, stack);
+}
+
+
+void Simulator::VisitLoadLiteral(Instruction* instr) {
+ uint8_t* address = instr->LiteralAddress();
+ unsigned rt = instr->Rt();
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: set_wreg(rt, MemoryRead32(address)); break;
+ case LDR_x_lit: set_xreg(rt, MemoryRead64(address)); break;
+ case LDR_s_lit: set_sreg(rt, MemoryReadFP32(address)); break;
+ case LDR_d_lit: set_dreg(rt, MemoryReadFP64(address)); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
+ int64_t address = xreg(addr_reg, Reg31IsStackPointer);
+ if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
+ // When the base register is SP the stack pointer is required to be
+ // quadword aligned prior to the address calculation and write-backs.
+ // Misalignment will cause a stack alignment fault.
+ FATAL("ALIGNMENT EXCEPTION");
+ }
+
+ if ((addrmode == Offset) || (addrmode == PreIndex)) {
+ address += offset;
+ }
+
+ return reinterpret_cast<uint8_t*>(address);
+}
+
+
+void Simulator::LoadStoreWriteBack(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
+ ASSERT(offset != 0);
+ uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
+ set_reg(addr_reg, address + offset, Reg31IsStackPointer);
+ }
+}
+
+
+void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
+ if ((address >= stack_limit_) && (address < stack)) {
+ fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
+ fprintf(stream_, " sp is here: 0x%16p\n", stack);
+ fprintf(stream_, " access was here: 0x%16p\n", address);
+ fprintf(stream_, " stack limit is here: 0x%16p\n", stack_limit_);
+ fprintf(stream_, "\n");
+ FATAL("ACCESS BELOW STACK POINTER");
+ }
+}
+
+
+uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
+ ASSERT(address != NULL);
+ ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
+ uint64_t read = 0;
+ memcpy(&read, address, num_bytes);
+ return read;
+}
+
+
+uint8_t Simulator::MemoryRead8(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint8_t));
+}
+
+
+uint16_t Simulator::MemoryRead16(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint16_t));
+}
+
+
+uint32_t Simulator::MemoryRead32(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint32_t));
+}
+
+
+float Simulator::MemoryReadFP32(uint8_t* address) {
+ return rawbits_to_float(MemoryRead32(address));
+}
+
+
+uint64_t Simulator::MemoryRead64(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint64_t));
+}
+
+
+double Simulator::MemoryReadFP64(uint8_t* address) {
+ return rawbits_to_double(MemoryRead64(address));
+}
+
+
+void Simulator::MemoryWrite(uint8_t* address,
+ uint64_t value,
+ unsigned num_bytes) {
+ ASSERT(address != NULL);
+ ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
+
+ LogWrite(address, value, num_bytes);
+ memcpy(address, &value, num_bytes);
+}
+
+
+void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) {
+ MemoryWrite(address, value, sizeof(uint32_t));
+}
+
+
+void Simulator::MemoryWriteFP32(uint8_t* address, float value) {
+ MemoryWrite32(address, float_to_rawbits(value));
+}
+
+
+void Simulator::MemoryWrite64(uint8_t* address, uint64_t value) {
+ MemoryWrite(address, value, sizeof(uint64_t));
+}
+
+
+void Simulator::MemoryWriteFP64(uint8_t* address, double value) {
+ MemoryWrite64(address, double_to_rawbits(value));
+}
+
+
+void Simulator::VisitMoveWideImmediate(Instruction* instr) {
+ MoveWideImmediateOp mov_op =
+ static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
+ int64_t new_xn_val = 0;
+
+ bool is_64_bits = instr->SixtyFourBits() == 1;
+ // Shift is limited for W operations.
+ ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
+
+ // Get the shifted immediate.
+ int64_t shift = instr->ShiftMoveWide() * 16;
+ int64_t shifted_imm16 = instr->ImmMoveWide() << shift;
+
+ // Compute the new value.
+ switch (mov_op) {
+ case MOVN_w:
+ case MOVN_x: {
+ new_xn_val = ~shifted_imm16;
+ if (!is_64_bits) new_xn_val &= kWRegMask;
+ break;
+ }
+ case MOVK_w:
+ case MOVK_x: {
+ unsigned reg_code = instr->Rd();
+ int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
+ : wreg(reg_code);
+ new_xn_val = (prev_xn_val & ~(0xffffL << shift)) | shifted_imm16;
+ break;
+ }
+ case MOVZ_w:
+ case MOVZ_x: {
+ new_xn_val = shifted_imm16;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ // Update the destination register.
+ set_xreg(instr->Rd(), new_xn_val);
+}
+
+
+void Simulator::VisitConditionalSelect(Instruction* instr) {
+ uint64_t new_val = xreg(instr->Rn());
+
+ if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
+ new_val = xreg(instr->Rm());
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: break;
+ case CSINC_w:
+ case CSINC_x: new_val++; break;
+ case CSINV_w:
+ case CSINV_x: new_val = ~new_val; break;
+ case CSNEG_w:
+ case CSNEG_x: new_val = -new_val; break;
+ default: UNIMPLEMENTED();
+ }
+ }
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitDataProcessing1Source(Instruction* instr) {
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSize)); break;
+ case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSize)); break;
+ case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break;
+ case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break;
+ case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break;
+ case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break;
+ case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break;
+ case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSize)); break;
+ case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSize)); break;
+ case CLS_w: {
+ set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSize));
+ break;
+ }
+ case CLS_x: {
+ set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSize));
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
+ ASSERT((num_bits == kWRegSize) || (num_bits == kXRegSize));
+ uint64_t result = 0;
+ for (unsigned i = 0; i < num_bits; i++) {
+ result = (result << 1) | (value & 1);
+ value >>= 1;
+ }
+ return result;
+}
+
+
+uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
+ // Split the 64-bit value into an 8-bit array, where b[0] is the least
+ // significant byte, and b[7] is the most significant.
+ uint8_t bytes[8];
+ uint64_t mask = 0xff00000000000000UL;
+ for (int i = 7; i >= 0; i--) {
+ bytes[i] = (value & mask) >> (i * 8);
+ mask >>= 8;
+ }
+
+ // Permutation tables for REV instructions.
+ // permute_table[Reverse16] is used by REV16_x, REV16_w
+ // permute_table[Reverse32] is used by REV32_x, REV_w
+ // permute_table[Reverse64] is used by REV_x
+ ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
+ static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
+ {4, 5, 6, 7, 0, 1, 2, 3},
+ {0, 1, 2, 3, 4, 5, 6, 7} };
+ uint64_t result = 0;
+ for (int i = 0; i < 8; i++) {
+ result <<= 8;
+ result |= bytes[permute_table[mode][i]];
+ }
+ return result;
+}
+
+
+void Simulator::VisitDataProcessing2Source(Instruction* instr) {
+ // TODO(mcapewel) move these to a higher level file, as they are global
+ // assumptions.
+ ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
+ ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
+
+ Shift shift_op = NO_SHIFT;
+ int64_t result = 0;
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ case SDIV_w: {
+ int32_t rn = wreg(instr->Rn());
+ int32_t rm = wreg(instr->Rm());
+ if ((rn == kWMinInt) && (rm == -1)) {
+ result = kWMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case SDIV_x: {
+ int64_t rn = xreg(instr->Rn());
+ int64_t rm = xreg(instr->Rm());
+ if ((rn == kXMinInt) && (rm == -1)) {
+ result = kXMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_w: {
+ uint32_t rn = static_cast<uint32_t>(wreg(instr->Rn()));
+ uint32_t rm = static_cast<uint32_t>(wreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_x: {
+ uint64_t rn = static_cast<uint64_t>(xreg(instr->Rn()));
+ uint64_t rm = static_cast<uint64_t>(xreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case LSLV_w:
+ case LSLV_x: shift_op = LSL; break;
+ case LSRV_w:
+ case LSRV_x: shift_op = LSR; break;
+ case ASRV_w:
+ case ASRV_x: shift_op = ASR; break;
+ case RORV_w:
+ case RORV_x: shift_op = ROR; break;
+ default: UNIMPLEMENTED();
+ }
+
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ if (shift_op != NO_SHIFT) {
+ // Shift distance encoded in the least-significant five/six bits of the
+ // register.
+ int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f;
+ unsigned shift = wreg(instr->Rm()) & mask;
+ result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op,
+ shift);
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+// The algorithm used is described in section 8.2 of
+// Hacker's Delight, by Henry S. Warren, Jr.
+// It assumes that a right shift on a signed integer is an arithmetic shift.
+static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
+ uint64_t u0, v0, w0;
+ int64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xffffffffL;
+ u1 = u >> 32;
+ v0 = v & 0xffffffffL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xffffffffL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+
+void Simulator::VisitDataProcessing3Source(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+
+ int64_t result = 0;
+ // Extract and sign- or zero-extend 32-bit arguments for widening operations.
+ uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
+ uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
+ int64_t rn_s32 = reg<int32_t>(instr->Rn());
+ int64_t rm_s32 = reg<int32_t>(instr->Rm());
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x:
+ result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case MSUB_w:
+ case MSUB_x:
+ result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break;
+ case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break;
+ case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
+ case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
+ case SMULH_x:
+ ASSERT(instr->Ra() == kZeroRegCode);
+ result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
+ break;
+ default: UNIMPLEMENTED();
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitBitfield(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask;
+ int64_t R = instr->ImmR();
+ int64_t S = instr->ImmS();
+ int64_t diff = S - R;
+ int64_t mask;
+ if (diff >= 0) {
+ mask = diff < reg_size - 1 ? (1L << (diff + 1)) - 1
+ : reg_mask;
+ } else {
+ mask = ((1L << (S + 1)) - 1);
+ mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
+ diff += reg_size;
+ }
+
+ // inzero indicates if the extracted bitfield is inserted into the
+ // destination register value or in zero.
+ // If extend is true, extend the sign of the extracted bitfield.
+ bool inzero = false;
+ bool extend = false;
+ switch (instr->Mask(BitfieldMask)) {
+ case BFM_x:
+ case BFM_w:
+ break;
+ case SBFM_x:
+ case SBFM_w:
+ inzero = true;
+ extend = true;
+ break;
+ case UBFM_x:
+ case UBFM_w:
+ inzero = true;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd());
+ int64_t src = reg(reg_size, instr->Rn());
+ // Rotate source bitfield into place.
+ int64_t result = (static_cast<uint64_t>(src) >> R) | (src << (reg_size - R));
+ // Determine the sign extension.
+ int64_t topbits = ((1L << (reg_size - diff - 1)) - 1) << (diff + 1);
+ int64_t signbits = extend && ((src >> S) & 1) ? topbits : 0;
+
+ // Merge sign extension, dest/zero and bitfield.
+ result = signbits | (result & mask) | (dst & ~mask);
+
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitExtract(Instruction* instr) {
+ unsigned lsb = instr->ImmS();
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
+ : kWRegSize;
+ set_reg(reg_size,
+ instr->Rd(),
+ (static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb) |
+ (reg(reg_size, instr->Rn()) << (reg_size - lsb)));
+}
+
+
+void Simulator::VisitFPImmediate(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dest = instr->Rd();
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break;
+ case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPIntegerConvert(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ FPRounding round = RMode();
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break;
+ case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break;
+ case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break;
+ case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break;
+ case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break;
+ case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break;
+ case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break;
+ case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break;
+ case FCVTMS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break;
+ case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break;
+ case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break;
+ case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break;
+ case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break;
+ case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break;
+ case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break;
+ case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break;
+ case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break;
+ case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break;
+ case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break;
+ case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break;
+ case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break;
+ case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break;
+ case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break;
+ case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break;
+ case FMOV_ws: set_wreg(dst, sreg_bits(src)); break;
+ case FMOV_xd: set_xreg(dst, dreg_bits(src)); break;
+ case FMOV_sw: set_sreg_bits(dst, wreg(src)); break;
+ case FMOV_dx: set_dreg_bits(dst, xreg(src)); break;
+
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break;
+ case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break;
+ case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break;
+ case UCVTF_dw: {
+ set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), 0, round));
+ break;
+ }
+ case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break;
+ case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break;
+ case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break;
+ case UCVTF_sw: {
+ set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), 0, round));
+ break;
+ }
+
+ default: UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+ int fbits = 64 - instr->FPScale();
+
+ FPRounding round = RMode();
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx_fixed:
+ set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
+ break;
+ case SCVTF_dw_fixed:
+ set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
+ break;
+ case UCVTF_dx_fixed:
+ set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
+ break;
+ case UCVTF_dw_fixed: {
+ set_dreg(dst,
+ UFixedToDouble(reg<uint32_t>(src), fbits, round));
+ break;
+ }
+ case SCVTF_sx_fixed:
+ set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
+ break;
+ case SCVTF_sw_fixed:
+ set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
+ break;
+ case UCVTF_sx_fixed:
+ set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
+ break;
+ case UCVTF_sw_fixed: {
+ set_sreg(dst,
+ UFixedToFloat(reg<uint32_t>(src), fbits, round));
+ break;
+ }
+ default: UNREACHABLE();
+ }
+}
+
+
+int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxInt) {
+ return kWMaxInt;
+ } else if (value < kWMinInt) {
+ return kWMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int32_t>(value);
+}
+
+
+int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxInt) {
+ return kXMaxInt;
+ } else if (value < kXMinInt) {
+ return kXMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int64_t>(value);
+}
+
+
+uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxUInt) {
+ return kWMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
+}
+
+
+uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxUInt) {
+ return kXMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
+}
+
+
+void Simulator::VisitFPCompare(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned reg_size = instr->FPType() == FP32 ? kSRegSize : kDRegSize;
+ double fn_val = fpreg(reg_size, instr->Rn());
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s:
+ case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm())); break;
+ case FCMP_s_zero:
+ case FCMP_d_zero: FPCompare(fn_val, 0.0); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalCompare(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: {
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ // If the condition passes, set the status flags to the result of
+ // comparing the operands.
+ unsigned reg_size = instr->FPType() == FP32 ? kSRegSize : kDRegSize;
+ FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm()));
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ }
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalSelect(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ Instr selected;
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ selected = instr->Rn();
+ } else {
+ selected = instr->Rm();
+ }
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
+ case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ case FMOV_s: set_sreg(fd, sreg(fn)); break;
+ case FMOV_d: set_dreg(fd, dreg(fn)); break;
+ case FABS_s: set_sreg(fd, std::fabs(sreg(fn))); break;
+ case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break;
+ case FNEG_s: set_sreg(fd, -sreg(fn)); break;
+ case FNEG_d: set_dreg(fd, -dreg(fn)); break;
+ case FSQRT_s: set_sreg(fd, std::sqrt(sreg(fn))); break;
+ case FSQRT_d: set_dreg(fd, std::sqrt(dreg(fn))); break;
+ case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break;
+ case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break;
+ case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
+ case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break;
+ case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break;
+ case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn), FPZero)); break;
+ case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); break;
+ case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+// Assemble the specified IEEE-754 components into the target type and apply
+// appropriate rounding.
+// sign: 0 = positive, 1 = negative
+// exponent: Unbiased IEEE-754 exponent.
+// mantissa: The mantissa of the input. The top bit (which is not encoded for
+// normal IEEE-754 values) must not be omitted. This bit has the
+// value 'pow(2, exponent)'.
+//
+// The input value is assumed to be a normalized value. That is, the input may
+// not be infinity or NaN. If the source value is subnormal, it must be
+// normalized before calling this function such that the highest set bit in the
+// mantissa has the value 'pow(2, exponent)'.
+//
+// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
+// calling a templated FPRound.
+template <class T, int ebits, int mbits>
+static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
+ FPRounding round_mode) {
+ ASSERT((sign == 0) || (sign == 1));
+
+ // Only the FPTieEven rounding mode is implemented.
+ ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ // Rounding can promote subnormals to normals, and normals to infinities. For
+ // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
+ // encodable as a float, but rounding based on the low-order mantissa bits
+ // could make it overflow. With ties-to-even rounding, this value would become
+ // an infinity.
+
+ // ---- Rounding Method ----
+ //
+ // The exponent is irrelevant in the rounding operation, so we treat the
+ // lowest-order bit that will fit into the result ('onebit') as having
+ // the value '1'. Similarly, the highest-order bit that won't fit into
+ // the result ('halfbit') has the value '0.5'. The 'point' sits between
+ // 'onebit' and 'halfbit':
+ //
+ // These bits fit into the result.
+ // |---------------------|
+ // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ // ||
+ // / |
+ // / halfbit
+ // onebit
+ //
+ // For subnormal outputs, the range of representable bits is smaller and
+ // the position of onebit and halfbit depends on the exponent of the
+ // input, but the method is otherwise similar.
+ //
+ // onebit(frac)
+ // |
+ // | halfbit(frac) halfbit(adjusted)
+ // | / /
+ // | | |
+ // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
+ // 0b00.0... -> 0b00.0... -> 0b00
+ // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
+ // 0b00.1... -> 0b00.1... -> 0b01
+ // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
+ // 0b01.0... -> 0b01.0... -> 0b01
+ // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
+ // 0b01.1... -> 0b01.1... -> 0b10
+ // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
+ // 0b10.0... -> 0b10.0... -> 0b10
+ // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
+ // 0b10.1... -> 0b10.1... -> 0b11
+ // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
+ // ... / | / |
+ // / | / |
+ // / |
+ // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
+ //
+ // mantissa = (mantissa >> shift) + halfbit(adjusted);
+
+ static const int mantissa_offset = 0;
+ static const int exponent_offset = mantissa_offset + mbits;
+ static const int sign_offset = exponent_offset + ebits;
+ STATIC_ASSERT(sign_offset == (sizeof(T) * kByteSize - 1));
+
+ // Bail out early for zero inputs.
+ if (mantissa == 0) {
+ return sign << sign_offset;
+ }
+
+ // If all bits in the exponent are set, the value is infinite or NaN.
+ // This is true for all binary IEEE-754 formats.
+ static const int infinite_exponent = (1 << ebits) - 1;
+ static const int max_normal_exponent = infinite_exponent - 1;
+
+ // Apply the exponent bias to encode it for the result. Doing this early makes
+ // it easy to detect values that will be infinite or subnormal.
+ exponent += max_normal_exponent >> 1;
+
+ if (exponent > max_normal_exponent) {
+ // Overflow: The input is too large for the result type to represent. The
+ // FPTieEven rounding mode handles overflows using infinities.
+ exponent = infinite_exponent;
+ mantissa = 0;
+ return (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ (mantissa << mantissa_offset);
+ }
+
+ // Calculate the shift required to move the top mantissa bit to the proper
+ // place in the destination type.
+ const int highest_significant_bit = 63 - CountLeadingZeros(mantissa, 64);
+ int shift = highest_significant_bit - mbits;
+
+ if (exponent <= 0) {
+ // The output will be subnormal (before rounding).
+
+ // For subnormal outputs, the shift must be adjusted by the exponent. The +1
+ // is necessary because the exponent of a subnormal value (encoded as 0) is
+ // the same as the exponent of the smallest normal value (encoded as 1).
+ shift += -exponent + 1;
+
+ // Handle inputs that would produce a zero output.
+ //
+ // Shifts higher than highest_significant_bit+1 will always produce a zero
+ // result. A shift of exactly highest_significant_bit+1 might produce a
+ // non-zero result after rounding.
+ if (shift > (highest_significant_bit + 1)) {
+ // The result will always be +/-0.0.
+ return sign << sign_offset;
+ }
+
+ // Properly encode the exponent for a subnormal output.
+ exponent = 0;
+ } else {
+ // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
+ // normal values.
+ mantissa &= ~(1UL << highest_significant_bit);
+ }
+
+ if (shift > 0) {
+ // We have to shift the mantissa to the right. Some precision is lost, so we
+ // need to apply rounding.
+ uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
+ uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1;
+ uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
+ T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
+
+ T result = (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset);
+
+ // A very large mantissa can overflow during rounding. If this happens, the
+ // exponent should be incremented and the mantissa set to 1.0 (encoded as
+ // 0). Applying halfbit_adjusted after assembling the float has the nice
+ // side-effect that this case is handled for free.
+ //
+ // This also handles cases where a very large finite value overflows to
+ // infinity, or where a very large subnormal value overflows to become
+ // normal.
+ return result + halfbit_adjusted;
+ } else {
+ // We have to shift the mantissa to the left (or not at all). The input
+ // mantissa is exactly representable in the output mantissa, so apply no
+ // rounding correction.
+ return (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa << -shift) << mantissa_offset);
+ }
+}
+
+
+// See FPRound for a description of this function.
+static inline double FPRoundToDouble(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ int64_t bits =
+ FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return rawbits_to_double(bits);
+}
+
+
+// See FPRound for a description of this function.
+static inline float FPRoundToFloat(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ int32_t bits =
+ FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return rawbits_to_float(bits);
+}
+
+
+double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToDouble(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToDouble(-src, fbits, round);
+ }
+}
+
+
+double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
+ const int64_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToDouble(0, exponent, src, round);
+}
+
+
+float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToFloat(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToFloat(-src, fbits, round);
+ }
+}
+
+
+float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0f;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
+ const int32_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToFloat(0, exponent, src, round);
+}
+
+
+double Simulator::FPRoundInt(double value, FPRounding round_mode) {
+ if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
+ (value == kFP64NegativeInfinity) || std::isnan(value)) {
+ return value;
+ }
+
+ double int_result = floor(value);
+ double error = value - int_result;
+ switch (round_mode) {
+ case FPTieAway: {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is positive, round up.
+ if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPTieEven: {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ if ((error > 0.5) ||
+ ((error == 0.5) && (fmod(int_result, 2) != 0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPZero: {
+ // If value > 0 then we take floor(value)
+ // otherwise, ceil(value)
+ if (value < 0) {
+ int_result = ceil(value);
+ }
+ break;
+ }
+ case FPNegativeInfinity: {
+ // We always use floor(value).
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+ return int_result;
+}
+
+
+double Simulator::FPToDouble(float value) {
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ // Convert NaNs as the processor would, assuming that FPCR.DN (default
+ // NaN) is not set:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred entirely, except that the top
+ // bit is forced to '1', making the result a quiet NaN. The unused
+ // (low-order) payload bits are set to 0.
+ uint32_t raw = float_to_rawbits(value);
+
+ uint64_t sign = raw >> 31;
+ uint64_t exponent = (1 << 11) - 1;
+ uint64_t payload = unsigned_bitextract_64(21, 0, raw);
+ payload <<= (52 - 23); // The unused low-order bits should be 0.
+ payload |= (1L << 51); // Force a quiet NaN.
+
+ return rawbits_to_double((sign << 63) | (exponent << 52) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_NORMAL:
+ case FP_SUBNORMAL:
+ case FP_INFINITE: {
+ // All other inputs are preserved in a standard cast, because every value
+ // representable using an IEEE-754 float is also representable using an
+ // IEEE-754 double.
+ return static_cast<double>(value);
+ }
+ }
+
+ UNREACHABLE();
+ return static_cast<double>(value);
+}
+
+
+float Simulator::FPToFloat(double value, FPRounding round_mode) {
+ // Only the FPTieEven rounding mode is implemented.
+ ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ // Convert NaNs as the processor would, assuming that FPCR.DN (default
+ // NaN) is not set:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ uint64_t raw = double_to_rawbits(value);
+
+ uint32_t sign = raw >> 63;
+ uint32_t exponent = (1 << 8) - 1;
+ uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw);
+ payload |= (1 << 22); // Force a quiet NaN.
+
+ return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_INFINITE: {
+ // In a C++ cast, any value representable in the target type will be
+ // unchanged. This is always the case for +/-0.0 and infinities.
+ return static_cast<float>(value);
+ }
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-float as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+ uint64_t raw = double_to_rawbits(value);
+ // Extract the IEEE-754 double components.
+ uint32_t sign = raw >> 63;
+ // Extract the exponent and remove the IEEE-754 encoding bias.
+ int32_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
+ // Extract the mantissa and add the implicit '1' bit.
+ uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
+ if (std::fpclassify(value) == FP_NORMAL) {
+ mantissa |= (1UL << 52);
+ }
+ return FPRoundToFloat(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ UNREACHABLE();
+ return value;
+}
+
+
+void Simulator::VisitFPDataProcessing2Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ case FADD_s: set_sreg(fd, sreg(fn) + sreg(fm)); break;
+ case FADD_d: set_dreg(fd, dreg(fn) + dreg(fm)); break;
+ case FSUB_s: set_sreg(fd, sreg(fn) - sreg(fm)); break;
+ case FSUB_d: set_dreg(fd, dreg(fn) - dreg(fm)); break;
+ case FMUL_s: set_sreg(fd, sreg(fn) * sreg(fm)); break;
+ case FMUL_d: set_dreg(fd, dreg(fn) * dreg(fm)); break;
+ case FDIV_s: set_sreg(fd, sreg(fn) / sreg(fm)); break;
+ case FDIV_d: set_dreg(fd, dreg(fn) / dreg(fm)); break;
+ case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break;
+ case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break;
+ case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break;
+ case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break;
+ case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); break;
+ case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); break;
+ case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); break;
+ case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ unsigned fa = instr->Ra();
+
+ // The C99 (and C++11) fma function performs a fused multiply-accumulate.
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ // fd = fa +/- (fn * fm)
+ case FMADD_s: set_sreg(fd, fmaf(sreg(fn), sreg(fm), sreg(fa))); break;
+ case FMSUB_s: set_sreg(fd, fmaf(-sreg(fn), sreg(fm), sreg(fa))); break;
+ case FMADD_d: set_dreg(fd, fma(dreg(fn), dreg(fm), dreg(fa))); break;
+ case FMSUB_d: set_dreg(fd, fma(-dreg(fn), dreg(fm), dreg(fa))); break;
+ // Variants of the above where the result is negated.
+ case FNMADD_s: set_sreg(fd, -fmaf(sreg(fn), sreg(fm), sreg(fa))); break;
+ case FNMSUB_s: set_sreg(fd, -fmaf(-sreg(fn), sreg(fm), sreg(fa))); break;
+ case FNMADD_d: set_dreg(fd, -fma(dreg(fn), dreg(fm), dreg(fa))); break;
+ case FNMSUB_d: set_dreg(fd, -fma(-dreg(fn), dreg(fm), dreg(fa))); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMax(T a, T b) {
+ if (IsSignallingNaN(a)) {
+ return a;
+ } else if (IsSignallingNaN(b)) {
+ return b;
+ } else if (std::isnan(a)) {
+ ASSERT(IsQuietNaN(a));
+ return a;
+ } else if (std::isnan(b)) {
+ ASSERT(IsQuietNaN(b));
+ return b;
+ }
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return +0.0.
+ return 0.0;
+ } else {
+ return (a > b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMaxNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64NegativeInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64NegativeInfinity;
+ }
+ return FPMax(a, b);
+}
+
+template <typename T>
+T Simulator::FPMin(T a, T b) {
+ if (IsSignallingNaN(a)) {
+ return a;
+ } else if (IsSignallingNaN(b)) {
+ return b;
+ } else if (std::isnan(a)) {
+ ASSERT(IsQuietNaN(a));
+ return a;
+ } else if (std::isnan(b)) {
+ ASSERT(IsQuietNaN(b));
+ return b;
+ }
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return -0.0.
+ return -0.0;
+ } else {
+ return (a < b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMinNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64PositiveInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64PositiveInfinity;
+ }
+ return FPMin(a, b);
+}
+
+
+void Simulator::VisitSystem(Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break;
+ case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break;
+ default: UNIMPLEMENTED();
+ }
+ break;
+ }
+ case MSR: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break;
+ case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break;
+ default: UNIMPLEMENTED();
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: break;
+ default: UNIMPLEMENTED();
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ __sync_synchronize();
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+
+bool Simulator::GetValue(const char* desc, int64_t* value) {
+ int regnum = CodeFromName(desc);
+ if (regnum >= 0) {
+ unsigned code = regnum;
+ if (code == kZeroRegCode) {
+ // Catch the zero register and return 0.
+ *value = 0;
+ return true;
+ } else if (code == kSPRegInternalCode) {
+ // Translate the stack pointer code to 31, for Reg31IsStackPointer.
+ code = 31;
+ }
+ if (desc[0] == 'w') {
+ *value = wreg(code, Reg31IsStackPointer);
+ } else {
+ *value = xreg(code, Reg31IsStackPointer);
+ }
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" SCNx64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ } else {
+ return SScanF(desc, "%" SCNu64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+}
+
+
+bool Simulator::PrintValue(const char* desc) {
+ // Define some colour codes to use for the register dump.
+ // TODO(jbramley): Find a more elegant way of defining these.
+ char const * const clr_normal = FLAG_log_colour ? "\033[m" : "";
+ char const * const clr_reg_name = FLAG_log_colour ? "\033[1;34m" : "";
+ char const * const clr_reg_value = FLAG_log_colour ? "\033[1;36m" : "";
+ char const * const clr_fpreg_name = FLAG_log_colour ? "\033[1;33m" : "";
+ char const * const clr_fpreg_value = FLAG_log_colour ? "\033[1;35m" : "";
+
+ if (strcmp(desc, "csp") == 0) {
+ ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+ PrintF("%s csp:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
+ return true;
+ } else if (strcmp(desc, "wcsp") == 0) {
+ ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+ PrintF("%s wcsp:%s 0x%08" PRIx32 "%s\n",
+ clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
+ return true;
+ }
+
+ int i = CodeFromName(desc);
+ STATIC_ASSERT(kNumberOfRegisters == kNumberOfFPRegisters);
+ if (i < 0 || static_cast<unsigned>(i) >= kNumberOfFPRegisters) return false;
+
+ if (desc[0] == 'v') {
+ PrintF("%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
+ clr_fpreg_name, VRegNameForCode(i),
+ clr_fpreg_value, double_to_rawbits(dreg(i)),
+ clr_normal,
+ clr_fpreg_name, DRegNameForCode(i),
+ clr_fpreg_value, dreg(i),
+ clr_fpreg_name, SRegNameForCode(i),
+ clr_fpreg_value, sreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 'd') {
+ PrintF("%s %s:%s %g%s\n",
+ clr_fpreg_name, DRegNameForCode(i),
+ clr_fpreg_value, dreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 's') {
+ PrintF("%s %s:%s %g%s\n",
+ clr_fpreg_name, SRegNameForCode(i),
+ clr_fpreg_value, sreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 'w') {
+ PrintF("%s %s:%s 0x%08" PRIx32 "%s\n",
+ clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
+ return true;
+ } else {
+ // X register names have a wide variety of starting characters, but anything
+ // else will be an X register.
+ PrintF("%s %s:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
+ return true;
+ }
+}
+
+
+void Simulator::Debug() {
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ bool done = false;
+ bool cleared_log_disasm_bit = false;
+
+ while (!done) {
+ // Disassemble the next instruction to execute before doing anything else.
+ PrintInstructionsAt(pc_, 1);
+ // Read the command line.
+ char* line = ReadLine("sim> ");
+ if (line == NULL) {
+ break;
+ } else {
+ // Repeat last command by default.
+ char* last_input = last_debugger_input();
+ if (strcmp(line, "\n") == 0 && (last_input != NULL)) {
+ DeleteArray(line);
+ line = last_input;
+ } else {
+ // Update the latest command ran
+ set_last_debugger_input(line);
+ }
+
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+
+ // stepi / si ------------------------------------------------------------
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ // We are about to execute instructions, after which by default we
+ // should increment the pc_. If it was set when reaching this debug
+ // instruction, it has not been cleared because this instruction has not
+ // completed yet. So clear it manually.
+ pc_modified_ = false;
+
+ if (argc == 1) {
+ ExecuteInstruction();
+ } else {
+ int64_t number_of_instructions_to_execute = 1;
+ GetValue(arg1, &number_of_instructions_to_execute);
+
+ set_log_parameters(log_parameters() | LOG_DISASM);
+ while (number_of_instructions_to_execute-- > 0) {
+ ExecuteInstruction();
+ }
+ set_log_parameters(log_parameters() & ~LOG_DISASM);
+ PrintF("\n");
+ }
+
+ // If it was necessary, the pc has already been updated or incremented
+ // when executing the instruction. So we do not want it to be updated
+ // again. It will be cleared when exiting.
+ pc_modified_ = true;
+
+ // next / n --------------------------------------------------------------
+ } else if ((strcmp(cmd, "next") == 0) || (strcmp(cmd, "n") == 0)) {
+ // Tell the simulator to break after the next executed BL.
+ break_on_next_ = true;
+ // Continue.
+ done = true;
+
+ // continue / cont / c ---------------------------------------------------
+ } else if ((strcmp(cmd, "continue") == 0) ||
+ (strcmp(cmd, "cont") == 0) ||
+ (strcmp(cmd, "c") == 0)) {
+ // Leave the debugger shell.
+ done = true;
+
+ // disassemble / disasm / di ---------------------------------------------
+ } else if (strcmp(cmd, "disassemble") == 0 ||
+ strcmp(cmd, "disasm") == 0 ||
+ strcmp(cmd, "di") == 0) {
+ int64_t n_of_instrs_to_disasm = 10; // default value.
+ int64_t address = reinterpret_cast<int64_t>(pc_); // default value.
+ if (argc >= 2) { // disasm <n of instrs>
+ GetValue(arg1, &n_of_instrs_to_disasm);
+ }
+ if (argc >= 3) { // disasm <n of instrs> <address>
+ GetValue(arg2, &address);
+ }
+
+ // Disassemble.
+ PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
+ n_of_instrs_to_disasm);
+ PrintF("\n");
+
+ // print / p -------------------------------------------------------------
+ } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
+ if (argc == 2) {
+ if (strcmp(arg1, "all") == 0) {
+ // TODO(all): better support for printing in the debugger.
+ PrintRegisters(true);
+ PrintFPRegisters(true);
+ } else {
+ if (!PrintValue(arg1)) {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ PrintF(
+ "print <register>\n"
+ " Print the content of a register. (alias 'p')\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n");
+ }
+
+ // printobject / po ------------------------------------------------------
+ } else if ((strcmp(cmd, "printobject") == 0) ||
+ (strcmp(cmd, "po") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ Object* obj = reinterpret_cast<Object*>(value);
+ PrintF("%s: \n", arg1);
+#ifdef DEBUG
+ obj->PrintLn();
+#else
+ obj->ShortPrint();
+ PrintF("\n");
+#endif
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("printobject <value>\n"
+ "printobject <register>\n"
+ " Print details about the value. (alias 'po')\n");
+ }
+
+ // stack / mem ----------------------------------------------------------
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = NULL;
+ int64_t* end = NULL;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(jssp());
+
+ } else { // "mem"
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words = 0;
+ if (argc == next_arg) {
+ words = 10;
+ } else if (argc == next_arg + 1) {
+ if (!GetValue(argv[next_arg], &words)) {
+ PrintF("%s unrecognized\n", argv[next_arg]);
+ PrintF("Printing 10 double words by default");
+ words = 10;
+ }
+ } else {
+ UNREACHABLE();
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
+ reinterpret_cast<uint64_t>(cur), *cur, *cur);
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+ int64_t value = *cur;
+ Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ PrintF(" (");
+ if ((value & kSmiTagMask) == 0) {
+ STATIC_ASSERT(kSmiValueSize == 32);
+ int32_t untagged = (value >> kSmiShift) & 0xffffffff;
+ PrintF("smi %" PRId32, untagged);
+ } else {
+ obj->ShortPrint();
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ cur++;
+ }
+
+ // trace / t -------------------------------------------------------------
+ } else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
+ if ((log_parameters() & (LOG_DISASM | LOG_REGS)) !=
+ (LOG_DISASM | LOG_REGS)) {
+ PrintF("Enabling disassembly and registers tracing\n");
+ set_log_parameters(log_parameters() | LOG_DISASM | LOG_REGS);
+ } else {
+ PrintF("Disabling disassembly and registers tracing\n");
+ set_log_parameters(log_parameters() & ~(LOG_DISASM | LOG_REGS));
+ }
+
+ // break / b -------------------------------------------------------------
+ } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ SetBreakpoint(reinterpret_cast<Instruction*>(value));
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ ListBreakpoints();
+ PrintF("Use `break <address>` to set or disable a breakpoint\n");
+ }
+
+ // gdb -------------------------------------------------------------------
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("Relinquishing control to gdb.\n");
+ OS::DebugBreak();
+ PrintF("Regaining control from gdb.\n");
+
+ // sysregs ---------------------------------------------------------------
+ } else if (strcmp(cmd, "sysregs") == 0) {
+ PrintSystemRegisters();
+
+ // help / h --------------------------------------------------------------
+ } else if (strcmp(cmd, "help") == 0 || strcmp(cmd, "h") == 0) {
+ PrintF(
+ "stepi / si\n"
+ " stepi <n>\n"
+ " Step <n> instructions.\n"
+ "next / n\n"
+ " Continue execution until a BL instruction is reached.\n"
+ " At this point a breakpoint is set just after this BL.\n"
+ " Then execution is resumed. It will probably later hit the\n"
+ " breakpoint just set.\n"
+ "continue / cont / c\n"
+ " Continue execution from here.\n"
+ "disassemble / disasm / di\n"
+ " disassemble <n> <address>\n"
+ " Disassemble <n> instructions from current <address>.\n"
+ " By default <n> is 20 and <address> is the current pc.\n"
+ "print / p\n"
+ " print <register>\n"
+ " Print the content of a register.\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n"
+ "printobject / po\n"
+ " printobject <value>\n"
+ " printobject <register>\n"
+ " Print details about the value.\n"
+ "stack\n"
+ " stack [<words>]\n"
+ " Dump stack content, default dump 10 words\n"
+ "mem\n"
+ " mem <address> [<words>]\n"
+ " Dump memory content, default dump 10 words\n"
+ "trace / t\n"
+ " Toggle disassembly and register tracing\n"
+ "break / b\n"
+ " break : list all breakpoints\n"
+ " break <address> : set / enable / disable a breakpoint.\n"
+ "gdb\n"
+ " Enter gdb.\n"
+ "sysregs\n"
+ " Print all system registers (including NZCV).\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ PrintF("Use 'help' for more information.\n");
+ }
+ }
+ if (cleared_log_disasm_bit == true) {
+ set_log_parameters(log_parameters_ | LOG_DISASM);
+ }
+ }
+}
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair structure.
+// The simulator assumes all runtime calls return two 64-bits values. If they
+// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
+struct ObjectPair {
+ int64_t res0;
+ int64_t res1;
+};
+
+
+typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3,
+ int64_t arg4,
+ int64_t arg5,
+ int64_t arg6,
+ int64_t arg7);
+
+typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
+typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
+typedef double (*SimulatorRuntimeFPCall)(double arg1);
+typedef double (*SimulatorRuntimeFPIntCall)(double arg1, int32_t arg2);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
+ void* arg2);
+
+void Simulator::VisitException(Instruction* instr) {
+ // Define some colour codes to use for log messages.
+ // TODO(jbramley): Find a more elegant way of defining these.
+ char const* const clr_normal = (FLAG_log_colour) ? ("\033[m")
+ : ("");
+ char const* const clr_debug_number = (FLAG_log_colour) ? ("\033[1;33m")
+ : ("");
+ char const* const clr_debug_message = (FLAG_log_colour) ? ("\033[0;33m")
+ : ("");
+ char const* const clr_printf = (FLAG_log_colour) ? ("\033[0;32m")
+ : ("");
+
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: {
+ if (instr->ImmException() == kImmExceptionIsDebug) {
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t code;
+ uint32_t parameters;
+ char const * message;
+
+ ASSERT(sizeof(*pc_) == 1);
+ memcpy(&code, pc_ + kDebugCodeOffset, sizeof(code));
+ memcpy(&parameters, pc_ + kDebugParamsOffset, sizeof(parameters));
+ message = reinterpret_cast<char const *>(pc_ + kDebugMessageOffset);
+
+ // Always print something when we hit a debug point that breaks.
+ // We are going to break, so printing something is not an issue in
+ // terms of speed.
+ if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
+ if (message != NULL) {
+ PrintF("%sDebugger hit %d: %s%s%s\n",
+ clr_debug_number,
+ code,
+ clr_debug_message,
+ message,
+ clr_normal);
+ } else {
+ PrintF("%sDebugger hit %d.%s\n",
+ clr_debug_number,
+ code,
+ clr_normal);
+ }
+ }
+
+ // Other options.
+ switch (parameters & kDebuggerTracingDirectivesMask) {
+ case TRACE_ENABLE:
+ set_log_parameters(log_parameters() | parameters);
+ if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); }
+ if (parameters & LOG_REGS) { PrintRegisters(); }
+ if (parameters & LOG_FP_REGS) { PrintFPRegisters(); }
+ break;
+ case TRACE_DISABLE:
+ set_log_parameters(log_parameters() & ~parameters);
+ break;
+ case TRACE_OVERRIDE:
+ set_log_parameters(parameters);
+ break;
+ default:
+ // We don't support a one-shot LOG_DISASM.
+ ASSERT((parameters & LOG_DISASM) == 0);
+ // Don't print information that is already being traced.
+ parameters &= ~log_parameters();
+ // Print the requested information.
+ if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
+ if (parameters & LOG_REGS) PrintRegisters(true);
+ if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
+ }
+
+ // The stop parameters are inlined in the code. Skip them:
+ // - Skip to the end of the message string.
+ pc_ += kDebugMessageOffset + strlen(message) + 1;
+ // - Advance to the next aligned location.
+ pc_ = AlignUp(pc_, kInstructionSize);
+ // - Verify that the unreachable marker is present.
+ ASSERT(pc_->Mask(ExceptionMask) == HLT);
+ ASSERT(pc_->ImmException() == kImmExceptionIsUnreachable);
+ // - Skip past the unreachable marker.
+ set_pc(pc_->NextInstruction());
+
+ // Check if the debugger should break.
+ if (parameters & BREAK) Debug();
+
+ } else if (instr->ImmException() == kImmExceptionIsRedirectedCall) {
+ // TODO(all): Extract the call redirection code into a separate
+ // function.
+
+ Redirection* redirection = Redirection::FromHltInstruction(instr);
+
+ // The called C code might itself call simulated code, so any
+ // caller-saved registers (including lr) could still be clobbered by a
+ // redirected call.
+ Instruction* return_address = lr();
+
+ // TODO(jbramley): Make external_function() a template so that we don't
+ // have to explicitly cast the result for each redirection type.
+ int64_t external =
+ reinterpret_cast<int64_t>(redirection->external_function());
+
+ TraceSim("Call to host function at %p\n",
+ reinterpret_cast<void*>(redirection->external_function()));
+
+ // SP must be 16 bytes aligned at the call interface.
+ bool stack_alignment_exception = ((sp() & 0xf) != 0);
+ if (stack_alignment_exception) {
+ TraceSim(" with unaligned stack 0x%016" PRIx64 ".\n", sp());
+ FATAL("ALIGNMENT EXCEPTION");
+ }
+
+ switch (redirection->type()) {
+ default:
+ TraceSim("Type: Unknown.\n");
+ UNREACHABLE();
+ break;
+
+ case ExternalReference::BUILTIN_CALL: {
+ // MaybeObject* f(v8::internal::Arguments).
+ TraceSim("Type: BUILTIN_CALL\n");
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+
+ // We don't know how many arguments are being passed, but we can
+ // pass 8 without touching the stack. They will be ignored by the
+ // host function if they aren't used.
+ TraceSim("Arguments: "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64,
+ xreg(0), xreg(1), xreg(2), xreg(3),
+ xreg(4), xreg(5), xreg(6), xreg(7));
+ ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
+ xreg(4), xreg(5), xreg(6), xreg(7));
+ TraceSim("Returned: {0x%" PRIx64 ", 0x%" PRIx64"}\n",
+ result.res0, result.res1);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_xreg(0, result.res0);
+ set_xreg(1, result.res1);
+ break;
+ }
+
+ case ExternalReference::DIRECT_API_CALL: {
+ // void f(v8::FunctionCallbackInfo&)
+ TraceSim("Type: DIRECT_API_CALL\n");
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0));
+ target(xreg(0));
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ // int f(double, double)
+ TraceSim("Type: BUILTIN_COMPARE_CALL\n");
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
+ int64_t result = target(dreg(0), dreg(1));
+ TraceSim("Returned: %" PRId64 "\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_xreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_CALL: {
+ // double f(double)
+ TraceSim("Type: BUILTIN_FP_CALL\n");
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ TraceSim("Argument: %f\n", dreg(0));
+ double result = target(dreg(0));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ // double f(double, double)
+ TraceSim("Type: BUILTIN_FP_FP_CALL\n");
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
+ double result = target(dreg(0), dreg(1));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ // double f(double, int)
+ TraceSim("Type: BUILTIN_FP_INT_CALL\n");
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ TraceSim("Arguments: %f, %d\n", dreg(0), wreg(0));
+ double result = target(dreg(0), wreg(0));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::DIRECT_GETTER_CALL: {
+ // void f(Local<String> property, PropertyCallbackInfo& info)
+ TraceSim("Type: DIRECT_GETTER_CALL\n");
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n",
+ xreg(0), xreg(1));
+ target(xreg(0), xreg(1));
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::PROFILING_API_CALL: {
+ // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
+ TraceSim("Type: PROFILING_API_CALL\n");
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ void* arg1 = Redirection::ReverseRedirection(xreg(1));
+ TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1);
+ target(xreg(0), arg1);
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::PROFILING_GETTER_CALL: {
+ // void f(Local<String> property, PropertyCallbackInfo& info,
+ // AccessorGetterCallback callback)
+ TraceSim("Type: PROFILING_GETTER_CALL\n");
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
+ external);
+ void* arg2 = Redirection::ReverseRedirection(xreg(2));
+ TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 ", %p\n",
+ xreg(0), xreg(1), arg2);
+ target(xreg(0), xreg(1), arg2);
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+ }
+
+ set_lr(return_address);
+ set_pc(return_address);
+ } else if (instr->ImmException() == kImmExceptionIsPrintf) {
+ // Read the argument encoded inline in the instruction stream.
+ uint32_t type;
+ ASSERT(sizeof(*pc_) == 1);
+ memcpy(&type, pc_ + kPrintfTypeOffset, sizeof(type));
+
+ const char* format = reg<const char*>(0);
+
+ // Pass all of the relevant PCS registers onto printf. It doesn't
+ // matter if we pass too many as the extra ones won't be read.
+ int result;
+ fputs(clr_printf, stream_);
+ if (type == CPURegister::kRegister) {
+ result = fprintf(stream_, format,
+ xreg(1), xreg(2), xreg(3), xreg(4),
+ xreg(5), xreg(6), xreg(7));
+ } else if (type == CPURegister::kFPRegister) {
+ result = fprintf(stream_, format,
+ dreg(0), dreg(1), dreg(2), dreg(3),
+ dreg(4), dreg(5), dreg(6), dreg(7));
+ } else {
+ ASSERT(type == CPURegister::kNoRegister);
+ result = fprintf(stream_, "%s", format);
+ }
+ fputs(clr_normal, stream_);
+ set_xreg(0, result);
+
+ // TODO(jbramley): Consider clobbering all caller-saved registers here.
+
+ // The printf parameters are inlined in the code, so skip them.
+ set_pc(pc_->InstructionAtOffset(kPrintfLength));
+
+ // Set LR as if we'd just called a native printf function.
+ set_lr(pc());
+
+ } else if (instr->ImmException() == kImmExceptionIsUnreachable) {
+ fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n",
+ reinterpret_cast<void*>(pc_));
+ abort();
+
+ } else {
+ OS::DebugBreak();
+ }
+ break;
+ }
+
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+#endif // USE_SIMULATOR
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/simulator-a64.h b/deps/v8/src/a64/simulator-a64.h
new file mode 100644
index 0000000000..535f287096
--- /dev/null
+++ b/deps/v8/src/a64/simulator-a64.h
@@ -0,0 +1,868 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_SIMULATOR_A64_H_
+#define V8_A64_SIMULATOR_A64_H_
+
+#include <stdarg.h>
+#include <vector>
+
+#include "v8.h"
+
+#include "globals.h"
+#include "utils.h"
+#include "allocation.h"
+#include "assembler.h"
+#include "a64/assembler-a64.h"
+#include "a64/decoder-a64.h"
+#include "a64/disasm-a64.h"
+#include "a64/instrument-a64.h"
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+namespace v8 {
+namespace internal {
+
+#if !defined(USE_SIMULATOR)
+
+// Running without a simulator on a native A64 platform.
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ (entry(p0, p1, p2, p3, p4))
+
+typedef int (*a64_regexp_matcher)(String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate);
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type a64_regexp_matcher.
+// The ninth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ (FUNCTION_CAST<a64_regexp_matcher>(entry)( \
+ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
+
+// Running without a simulator there is nothing to do.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
+ return c_limit;
+ }
+
+ static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static void UnregisterCTryCatch() { }
+};
+
+#else // !defined(USE_SIMULATOR)
+
+enum ReverseByteMode {
+ Reverse16 = 0,
+ Reverse32 = 1,
+ Reverse64 = 2
+};
+
+
+// The proper way to initialize a simulated system register (such as NZCV) is as
+// follows:
+// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
+class SimSystemRegister {
+ public:
+ // The default constructor represents a register which has no writable bits.
+ // It is not possible to set its value to anything other than 0.
+ SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
+
+ uint32_t RawValue() const {
+ return value_;
+ }
+
+ void SetRawValue(uint32_t new_value) {
+ value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, value_);
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ return signed_bitextract_32(msb, lsb, value_);
+ }
+
+ void SetBits(int msb, int lsb, uint32_t bits);
+
+ // Default system register values.
+ static SimSystemRegister DefaultValueFor(SystemRegister id);
+
+#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ uint32_t Name() const { return Func(HighBit, LowBit); } \
+ void Set##Name(uint32_t bits) { SetBits(HighBit, LowBit, bits); }
+#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
+ static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
+
+ SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
+
+#undef DEFINE_ZERO_BITS
+#undef DEFINE_GETTER
+
+ protected:
+ // Most system registers only implement a few of the bits in the word. Other
+ // bits are "read-as-zero, write-ignored". The write_ignore_mask argument
+ // describes the bits which are not modifiable.
+ SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
+ : value_(value), write_ignore_mask_(write_ignore_mask) { }
+
+ uint32_t value_;
+ uint32_t write_ignore_mask_;
+};
+
+
+// Represent a register (r0-r31, v0-v31).
+template<int kSizeInBytes>
+class SimRegisterBase {
+ public:
+ template<typename T>
+ void Set(T new_value, unsigned size = sizeof(T)) {
+ ASSERT(size <= kSizeInBytes);
+ ASSERT(size <= sizeof(new_value));
+ // All AArch64 registers are zero-extending; Writing a W register clears the
+ // top bits of the corresponding X register.
+ memset(value_, 0, kSizeInBytes);
+ memcpy(value_, &new_value, size);
+ }
+
+ // Copy 'size' bytes of the register to the result, and zero-extend to fill
+ // the result.
+ template<typename T>
+ T Get(unsigned size = sizeof(T)) const {
+ ASSERT(size <= kSizeInBytes);
+ T result;
+ memset(&result, 0, sizeof(result));
+ memcpy(&result, value_, size);
+ return result;
+ }
+
+ protected:
+ uint8_t value_[kSizeInBytes];
+};
+typedef SimRegisterBase<kXRegSizeInBytes> SimRegister; // r0-r31
+typedef SimRegisterBase<kDRegSizeInBytes> SimFPRegister; // v0-v31
+
+
+class Simulator : public DecoderVisitor {
+ public:
+ explicit Simulator(Decoder* decoder,
+ Isolate* isolate = NULL,
+ FILE* stream = stderr);
+ ~Simulator();
+
+ // System functions.
+
+ static void Initialize(Isolate* isolate);
+
+ static Simulator* current(v8::internal::Isolate* isolate);
+
+ class CallArgument;
+
+ // Call an arbitrary function taking an arbitrary number of arguments. The
+ // varargs list must be a set of arguments with type CallArgument, and
+ // terminated by CallArgument::End().
+ void CallVoid(byte* entry, CallArgument* args);
+
+ // Like CallVoid, but expect a return value.
+ int64_t CallInt64(byte* entry, CallArgument* args);
+ double CallDouble(byte* entry, CallArgument* args);
+
+ // V8 calls into generated JS code with 5 parameters and into
+ // generated RegExp code with 10 parameters. These are convenience functions,
+ // which set up the simulator state and grab the result on return.
+ int64_t CallJS(byte* entry,
+ byte* function_entry,
+ JSFunction* func,
+ Object* revc,
+ int64_t argc,
+ Object*** argv);
+ int64_t CallRegExp(byte* entry,
+ String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate);
+
+ // A wrapper class that stores an argument for one of the above Call
+ // functions.
+ //
+ // Only arguments up to 64 bits in size are supported.
+ class CallArgument {
+ public:
+ template<typename T>
+ explicit CallArgument(T argument) {
+ ASSERT(sizeof(argument) <= sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = X_ARG;
+ }
+
+ explicit CallArgument(double argument) {
+ ASSERT(sizeof(argument) == sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = D_ARG;
+ }
+
+ explicit CallArgument(float argument) {
+ // TODO(all): CallArgument(float) is untested, remove this check once
+ // tested.
+ UNIMPLEMENTED();
+ // Make the D register a NaN to try to trap errors if the callee expects a
+ // double. If it expects a float, the callee should ignore the top word.
+ ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
+ memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
+ // Write the float payload to the S register.
+ ASSERT(sizeof(argument) <= sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = D_ARG;
+ }
+
+ // This indicates the end of the arguments list, so that CallArgument
+ // objects can be passed into varargs functions.
+ static CallArgument End() { return CallArgument(); }
+
+ int64_t bits() const { return bits_; }
+ bool IsEnd() const { return type_ == NO_ARG; }
+ bool IsX() const { return type_ == X_ARG; }
+ bool IsD() const { return type_ == D_ARG; }
+
+ private:
+ enum CallArgumentType { X_ARG, D_ARG, NO_ARG };
+
+ // All arguments are aligned to at least 64 bits and we don't support
+ // passing bigger arguments, so the payload size can be fixed at 64 bits.
+ int64_t bits_;
+ CallArgumentType type_;
+
+ CallArgument() { type_ = NO_ARG; }
+ };
+
+
+ // Start the debugging command line.
+ void Debug();
+
+ bool GetValue(const char* desc, int64_t* value);
+
+ bool PrintValue(const char* desc);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit() const;
+
+ void ResetState();
+
+ // Runtime call support.
+ static void* RedirectExternalReference(void* external_function,
+ ExternalReference::Type type);
+
+ // Run the simulator.
+ static const Instruction* kEndOfSimAddress;
+ void DecodeInstruction();
+ void Run();
+ void RunFrom(Instruction* start);
+
+ // Simulation helpers.
+ template <typename T>
+ void set_pc(T new_pc) {
+ ASSERT(sizeof(T) == sizeof(pc_));
+ memcpy(&pc_, &new_pc, sizeof(T));
+ pc_modified_ = true;
+ }
+ Instruction* pc() { return pc_; }
+
+ void increment_pc() {
+ if (!pc_modified_) {
+ pc_ = pc_->NextInstruction();
+ }
+
+ pc_modified_ = false;
+ }
+
+ void ExecuteInstruction() {
+ ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
+ CheckBreakNext();
+ decoder_->Decode(pc_);
+ LogProcessorState();
+ increment_pc();
+ CheckBreakpoints();
+ }
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ // Register accessors.
+
+ // Return 'size' bits of the value of an integer register, as the specified
+ // type. The value is zero-extended to fill the result.
+ //
+ // The only supported values of 'size' are kXRegSize and kWRegSize.
+ template<typename T>
+ T reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ unsigned size_in_bytes = size / 8;
+ ASSERT(size_in_bytes <= sizeof(T));
+ ASSERT((size == kXRegSize) || (size == kWRegSize));
+ ASSERT(code < kNumberOfRegisters);
+
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ T result;
+ memset(&result, 0, sizeof(result));
+ return result;
+ }
+ return registers_[code].Get<T>(size_in_bytes);
+ }
+
+ // Like reg(), but infer the access size from the template type.
+ template<typename T>
+ T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<T>(sizeof(T) * 8, code, r31mode);
+ }
+
+ // Common specialized accessors for the reg() template.
+ int32_t wreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int32_t>(code, r31mode);
+ }
+
+ int64_t xreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(code, r31mode);
+ }
+
+ int64_t reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(size, code, r31mode);
+ }
+
+ // Write 'size' bits of 'value' into an integer register. The value is
+ // zero-extended. This behaviour matches AArch64 register writes.
+ //
+ // The only supported values of 'size' are kXRegSize and kWRegSize.
+ template<typename T>
+ void set_reg(unsigned size, unsigned code, T value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ unsigned size_in_bytes = size / 8;
+ ASSERT(size_in_bytes <= sizeof(T));
+ ASSERT((size == kXRegSize) || (size == kWRegSize));
+ ASSERT(code < kNumberOfRegisters);
+
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ return;
+ }
+ return registers_[code].Set(value, size_in_bytes);
+ }
+
+ // Like set_reg(), but infer the access size from the template type.
+ template<typename T>
+ void set_reg(unsigned code, T value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(sizeof(value) * 8, code, value, r31mode);
+ }
+
+ // Common specialized accessors for the set_reg() template.
+ void set_wreg(unsigned code, int32_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(kWRegSize, code, value, r31mode);
+ }
+
+ void set_xreg(unsigned code, int64_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(kXRegSize, code, value, r31mode);
+ }
+
+ // Commonly-used special cases.
+ template<typename T>
+ void set_lr(T value) {
+ ASSERT(sizeof(T) == kPointerSize);
+ set_reg(kLinkRegCode, value);
+ }
+
+ template<typename T>
+ void set_sp(T value) {
+ ASSERT(sizeof(T) == kPointerSize);
+ set_reg(31, value, Reg31IsStackPointer);
+ }
+
+ int64_t sp() { return xreg(31, Reg31IsStackPointer); }
+ int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
+ int64_t fp() {
+ return xreg(kFramePointerRegCode, Reg31IsStackPointer);
+ }
+ Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
+
+ Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
+
+ // Return 'size' bits of the value of a floating-point register, as the
+ // specified type. The value is zero-extended to fill the result.
+ //
+ // The only supported values of 'size' are kDRegSize and kSRegSize.
+ template<typename T>
+ T fpreg(unsigned size, unsigned code) const {
+ unsigned size_in_bytes = size / 8;
+ ASSERT(size_in_bytes <= sizeof(T));
+ ASSERT((size == kDRegSize) || (size == kSRegSize));
+ ASSERT(code < kNumberOfFPRegisters);
+ return fpregisters_[code].Get<T>(size_in_bytes);
+ }
+
+ // Like fpreg(), but infer the access size from the template type.
+ template<typename T>
+ T fpreg(unsigned code) const {
+ return fpreg<T>(sizeof(T) * 8, code);
+ }
+
+ // Common specialized accessors for the fpreg() template.
+ float sreg(unsigned code) const {
+ return fpreg<float>(code);
+ }
+
+ uint32_t sreg_bits(unsigned code) const {
+ return fpreg<uint32_t>(code);
+ }
+
+ double dreg(unsigned code) const {
+ return fpreg<double>(code);
+ }
+
+ uint64_t dreg_bits(unsigned code) const {
+ return fpreg<uint64_t>(code);
+ }
+
+ double fpreg(unsigned size, unsigned code) const {
+ switch (size) {
+ case kSRegSize: return sreg(code);
+ case kDRegSize: return dreg(code);
+ default:
+ UNREACHABLE();
+ return 0.0;
+ }
+ }
+
+ // Write 'value' into a floating-point register. The value is zero-extended.
+ // This behaviour matches AArch64 register writes.
+ template<typename T>
+ void set_fpreg(unsigned code, T value) {
+ ASSERT((sizeof(value) == kDRegSizeInBytes) ||
+ (sizeof(value) == kSRegSizeInBytes));
+ ASSERT(code < kNumberOfFPRegisters);
+ fpregisters_[code].Set(value, sizeof(value));
+ }
+
+ // Common specialized accessors for the set_fpreg() template.
+ void set_sreg(unsigned code, float value) {
+ set_fpreg(code, value);
+ }
+
+ void set_sreg_bits(unsigned code, uint32_t value) {
+ set_fpreg(code, value);
+ }
+
+ void set_dreg(unsigned code, double value) {
+ set_fpreg(code, value);
+ }
+
+ void set_dreg_bits(unsigned code, uint64_t value) {
+ set_fpreg(code, value);
+ }
+
+ bool N() { return nzcv_.N() != 0; }
+ bool Z() { return nzcv_.Z() != 0; }
+ bool C() { return nzcv_.C() != 0; }
+ bool V() { return nzcv_.V() != 0; }
+ SimSystemRegister& nzcv() { return nzcv_; }
+
+ // TODO(jbramley): Find a way to make the fpcr_ members return the proper
+ // types, so this accessor is not necessary.
+ FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); }
+ SimSystemRegister& fpcr() { return fpcr_; }
+
+ // Debug helpers
+
+ // Simulator breakpoints.
+ struct Breakpoint {
+ Instruction* location;
+ bool enabled;
+ };
+ std::vector<Breakpoint> breakpoints_;
+ void SetBreakpoint(Instruction* breakpoint);
+ void ListBreakpoints();
+ void CheckBreakpoints();
+
+ // Helpers for the 'next' command.
+ // When this is set, the Simulator will insert a breakpoint after the next BL
+ // instruction it meets.
+ bool break_on_next_;
+ // Check if the Simulator should insert a break after the current instruction
+ // for the 'next' command.
+ void CheckBreakNext();
+
+ // Disassemble instruction at the given address.
+ void PrintInstructionsAt(Instruction* pc, uint64_t count);
+
+ void PrintSystemRegisters(bool print_all = false);
+ void PrintRegisters(bool print_all_regs = false);
+ void PrintFPRegisters(bool print_all_regs = false);
+ void PrintProcessorState();
+ void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+ void LogSystemRegisters() {
+ if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
+ }
+ void LogRegisters() {
+ if (log_parameters_ & LOG_REGS) PrintRegisters();
+ }
+ void LogFPRegisters() {
+ if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
+ }
+ void LogProcessorState() {
+ LogSystemRegisters();
+ LogRegisters();
+ LogFPRegisters();
+ }
+ void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
+ if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
+ }
+
+ int log_parameters() { return log_parameters_; }
+ void set_log_parameters(int new_parameters) {
+ if (new_parameters & LOG_DISASM) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ } else {
+ decoder_->RemoveVisitor(print_disasm_);
+ }
+ log_parameters_ = new_parameters;
+ }
+
+ static inline const char* WRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static inline const char* XRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static inline const char* SRegNameForCode(unsigned code);
+ static inline const char* DRegNameForCode(unsigned code);
+ static inline const char* VRegNameForCode(unsigned code);
+ static inline int CodeFromName(const char* name);
+
+ protected:
+ // Simulation helpers ------------------------------------
+ bool ConditionPassed(Condition cond) {
+ switch (cond) {
+ case eq:
+ return Z();
+ case ne:
+ return !Z();
+ case hs:
+ return C();
+ case lo:
+ return !C();
+ case mi:
+ return N();
+ case pl:
+ return !N();
+ case vs:
+ return V();
+ case vc:
+ return !V();
+ case hi:
+ return C() && !Z();
+ case ls:
+ return !(C() && !Z());
+ case ge:
+ return N() == V();
+ case lt:
+ return N() != V();
+ case gt:
+ return !Z() && (N() == V());
+ case le:
+ return !(!Z() && (N() == V()));
+ case nv: // Fall through.
+ case al:
+ return true;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ }
+
+ bool ConditionFailed(Condition cond) {
+ return !ConditionPassed(cond);
+ }
+
+ void AddSubHelper(Instruction* instr, int64_t op2);
+ int64_t AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ int64_t src1,
+ int64_t src2,
+ int64_t carry_in = 0);
+ void LogicalHelper(Instruction* instr, int64_t op2);
+ void ConditionalCompareHelper(Instruction* instr, int64_t op2);
+ void LoadStoreHelper(Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
+ uint8_t* LoadStoreAddress(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStoreWriteBack(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
+
+ uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
+ uint8_t MemoryRead8(uint8_t* address);
+ uint16_t MemoryRead16(uint8_t* address);
+ uint32_t MemoryRead32(uint8_t* address);
+ float MemoryReadFP32(uint8_t* address);
+ uint64_t MemoryRead64(uint8_t* address);
+ double MemoryReadFP64(uint8_t* address);
+
+ void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+ void MemoryWrite32(uint8_t* address, uint32_t value);
+ void MemoryWriteFP32(uint8_t* address, float value);
+ void MemoryWrite64(uint8_t* address, uint64_t value);
+ void MemoryWriteFP64(uint8_t* address, double value);
+
+ int64_t ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t Rotate(unsigned reg_width,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t ExtendValue(unsigned reg_width,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift = 0);
+
+ uint64_t ReverseBits(uint64_t value, unsigned num_bits);
+ uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
+
+ void FPCompare(double val0, double val1);
+ double FPRoundInt(double value, FPRounding round_mode);
+ double FPToDouble(float value);
+ float FPToFloat(double value, FPRounding round_mode);
+ double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
+ double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
+ float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
+ float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
+ int32_t FPToInt32(double value, FPRounding rmode);
+ int64_t FPToInt64(double value, FPRounding rmode);
+ uint32_t FPToUInt32(double value, FPRounding rmode);
+ uint64_t FPToUInt64(double value, FPRounding rmode);
+
+ template <typename T>
+ T FPMax(T a, T b);
+
+ template <typename T>
+ T FPMin(T a, T b);
+
+ template <typename T>
+ T FPMaxNM(T a, T b);
+
+ template <typename T>
+ T FPMinNM(T a, T b);
+
+ void CheckStackAlignment();
+
+ inline void CheckPCSComplianceAndRun();
+
+#ifdef DEBUG
+ // Corruption values should have their least significant byte cleared to
+ // allow the code of the register being corrupted to be inserted.
+ static const uint64_t kCallerSavedRegisterCorruptionValue =
+ 0xca11edc0de000000UL;
+ // This value is a NaN in both 32-bit and 64-bit FP.
+ static const uint64_t kCallerSavedFPRegisterCorruptionValue =
+ 0x7ff000007f801000UL;
+ // This value is a mix of 32/64-bits NaN and "verbose" immediate.
+ static const uint64_t kDefaultCPURegisterCorruptionValue =
+ 0x7ffbad007f8bad00UL;
+
+ void CorruptRegisters(CPURegList* list,
+ uint64_t value = kDefaultCPURegisterCorruptionValue);
+ void CorruptAllCallerSavedCPURegisters();
+#endif
+
+ // Processor state ---------------------------------------
+
+ // Output stream.
+ FILE* stream_;
+ PrintDisassembler* print_disasm_;
+
+ // Instrumentation.
+ Instrument* instrument_;
+
+ // General purpose registers. Register 31 is the stack pointer.
+ SimRegister registers_[kNumberOfRegisters];
+
+ // Floating point registers
+ SimFPRegister fpregisters_[kNumberOfFPRegisters];
+
+ // Processor state
+ // bits[31, 27]: Condition flags N, Z, C, and V.
+ // (Negative, Zero, Carry, Overflow)
+ SimSystemRegister nzcv_;
+
+ // Floating-Point Control Register
+ SimSystemRegister fpcr_;
+
+ // Only a subset of FPCR features are supported by the simulator. This helper
+ // checks that the FPCR settings are supported.
+ //
+ // This is checked when floating-point instructions are executed, not when
+ // FPCR is set. This allows generated code to modify FPCR for external
+ // functions, or to save and restore it when entering and leaving generated
+ // code.
+ void AssertSupportedFPCR() {
+ ASSERT(fpcr().DN() == 0); // No default-NaN support.
+ ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
+ ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
+
+ // The simulator does not support half-precision operations so fpcr().AHP()
+ // is irrelevant, and is not checked here.
+ }
+
+ static int CalcNFlag(uint64_t result, unsigned reg_size) {
+ return (result >> (reg_size - 1)) & 1;
+ }
+
+ static int CalcZFlag(uint64_t result) {
+ return result == 0;
+ }
+
+ static const uint32_t kConditionFlagsMask = 0xf0000000;
+
+ // Stack
+ byte* stack_;
+ static const intptr_t stack_protection_size_ = KB;
+ intptr_t stack_size_;
+ byte* stack_limit_;
+ // TODO(aleram): protect the stack.
+
+ Decoder* decoder_;
+ Decoder* disassembler_decoder_;
+
+ // Indicates if the pc has been modified by the instruction and should not be
+ // automatically incremented.
+ bool pc_modified_;
+ Instruction* pc_;
+
+ static const char* xreg_names[];
+ static const char* wreg_names[];
+ static const char* sreg_names[];
+ static const char* dreg_names[];
+ static const char* vreg_names[];
+
+ // Debugger input.
+ void set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+ }
+ char* last_debugger_input() { return last_debugger_input_; }
+ char* last_debugger_input_;
+
+ private:
+ int log_parameters_;
+ Isolate* isolate_;
+};
+
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
+ FUNCTION_ADDR(entry), \
+ p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ Simulator::current(Isolate::Current())->CallRegExp( \
+ entry, \
+ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.
+// See also 'class SimulatorStack' in arm/simulator-arm.h.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit();
+ }
+
+ static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(Isolate::Current());
+ return sim->PushAddress(try_catch_address);
+ }
+
+ static void UnregisterCTryCatch() {
+ Simulator::current(Isolate::Current())->PopAddress();
+ }
+};
+
+#endif // !defined(USE_SIMULATOR)
+
+} } // namespace v8::internal
+
+#endif // V8_A64_SIMULATOR_A64_H_
diff --git a/deps/v8/src/a64/stub-cache-a64.cc b/deps/v8/src/a64/stub-cache-a64.cc
new file mode 100644
index 0000000000..57c03e8b96
--- /dev/null
+++ b/deps/v8/src/a64/stub-cache-a64.cc
@@ -0,0 +1,1548 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "ic-inl.h"
+#include "codegen.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(!AreAliased(receiver, scratch0, scratch1));
+ ASSERT(name->IsUniqueName());
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
+ __ B(ne, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
+
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
+ __ Bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+// Probe primary or secondary table.
+// If the entry is found in the cache, the generated code jump to the first
+// instruction of the stub in the cache.
+// If there is a miss the code fall trough.
+//
+// 'receiver', 'name' and 'offset' registers are preserved on miss.
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register receiver,
+ Register name,
+ Register offset,
+ Register scratch,
+ Register scratch2,
+ Register scratch3) {
+ // Some code below relies on the fact that the Entry struct contains
+ // 3 pointers (name, code, map).
+ STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
+
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
+ uintptr_t value_off_addr =
+ reinterpret_cast<uintptr_t>(value_offset.address());
+ uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
+
+ Label miss;
+
+ ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3));
+
+ // Multiply by 3 because there are 3 fields per entry.
+ __ Add(scratch3, offset, Operand(offset, LSL, 1));
+
+ // Calculate the base address of the entry.
+ __ Mov(scratch, Operand(key_offset));
+ __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
+
+ // Check that the key in the entry matches the name.
+ __ Ldr(scratch2, MemOperand(scratch));
+ __ Cmp(name, scratch2);
+ __ B(ne, &miss);
+
+ // Check the map matches.
+ __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
+ __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Cmp(scratch2, scratch3);
+ __ B(ne, &miss);
+
+ // Get the code entry from the cache.
+ __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
+ __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
+ __ Cmp(scratch2.W(), flags);
+ __ B(ne, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ B(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ B(&miss);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(scratch);
+
+ // Miss: fall through.
+ __ Bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra,
+ Register extra2,
+ Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure the flags does not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
+
+ // Make sure extra and extra2 registers are valid.
+ ASSERT(!extra.is(no_reg));
+ ASSERT(!extra2.is(no_reg));
+ ASSERT(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
+ extra2, extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Compute the hash for primary table.
+ __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Add(scratch, scratch, extra);
+ __ Eor(scratch, scratch, flags);
+ // We shift out the last two bits because they are not part of the hash.
+ __ Ubfx(scratch, scratch, kHeapObjectTagSize,
+ CountTrailingZeros(kPrimaryTableSize, 64));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Primary miss: Compute hash for secondary table.
+ __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
+ __ Add(scratch, scratch, flags >> kHeapObjectTagSize);
+ __ And(scratch, scratch, kSecondaryTableSize - 1);
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
+ extra2, extra3);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ // Load the global or builtins object from the current context.
+ __ Ldr(prototype, GlobalObjectMemOperand());
+ // Load the native context from the global or builtins object.
+ __ Ldr(prototype,
+ FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ __ Ldr(prototype, ContextMemOperand(prototype, index));
+ // Load the initial map. The global functions all have initial maps.
+ __ Ldr(prototype,
+ FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
+ Isolate* isolate = masm->isolate();
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ __ Ldr(scratch, GlobalObjectMemOperand());
+ __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ Ldr(scratch, ContextMemOperand(scratch, index));
+ __ Cmp(scratch, Operand(function));
+ __ B(ne, miss);
+
+ // Load its initial map. The global functions all have initial maps.
+ __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
+ // Load the prototype from the initial map.
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ USE(representation);
+ if (inobject) {
+ int offset = index * kPointerSize;
+ __ Ldr(dst, FieldMemOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ __ Ldr(dst, FieldMemOperand(dst, offset));
+ }
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ ASSERT(!AreAliased(receiver, scratch));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss_label);
+
+ // Check that the object is a JS array.
+ __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE,
+ miss_label);
+
+ // Load length directly from the JS array.
+ __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Ret();
+}
+
+
+// Generate code to check if an object is a string. If the object is a
+// heap object, its map's instance type is left in the scratch1 register.
+static void GenerateStringCheck(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Label* smi,
+ Label* non_string_object) {
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, smi);
+
+ // Get the object's instance type filed.
+ __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ // Check if the "not string" bit is set.
+ __ Tbnz(scratch1, MaskToBit(kNotStringTag), non_string_object);
+}
+
+
+// Generate code to load the length from a string object and return the length.
+// If the receiver object is not a string or a wrapped string object the
+// execution continues at the miss label. The register containing the
+// receiver is not clobbered if the receiver is not a string.
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ // Input registers can't alias because we don't want to clobber the
+ // receiver register if the object is not a string.
+ ASSERT(!AreAliased(receiver, scratch1, scratch2));
+
+ Label check_wrapper;
+
+ // Check if the object is a string leaving the instance type in the
+ // scratch1 register.
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
+
+ // Load length directly from the string.
+ __ Ldr(x0, FieldMemOperand(receiver, String::kLengthOffset));
+ __ Ret();
+
+ // Check if the object is a JSValue wrapper.
+ __ Bind(&check_wrapper);
+ __ Cmp(scratch1, Operand(JS_VALUE_TYPE));
+ __ B(ne, miss);
+
+ // Unwrap the value and check if the wrapped value is a string.
+ __ Ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, miss, miss);
+ __ Ldr(x0, FieldMemOperand(scratch1, String::kLengthOffset));
+ __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ // TryGetFunctionPrototype can't put the result directly in x0 because the
+ // 3 inputs registers can't alias and we call this function from
+ // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
+ // move the result in x0.
+ __ Mov(x0, scratch1);
+ __ Ret();
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ Mov(scratch, Operand(cell));
+ __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
+}
+
+
+void StoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Generate StoreTransition code, value is passed in x0 register.
+// When leaving generated code after success, the receiver_reg and storage_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name registers
+// have their original values.
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
+ Label exit;
+
+ ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg,
+ scratch1, scratch2, scratch3));
+
+ // We don't need scratch3.
+ scratch3 = NoReg;
+
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ LoadObject(scratch1, constant);
+ __ Cmp(value_reg, scratch1);
+ __ B(ne, miss_label);
+ } else if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2);
+
+ // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register?
+ // It's only used in Fcmp, but it's not really safe to use it like this.
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntagToDouble(fp_scratch, value_reg);
+ __ B(&do_store);
+
+ __ Bind(&heap_number);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ Bind(&do_store);
+ __ Str(fp_scratch, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if ((details.type() == FIELD) &&
+ (object->map()->unused_property_fields() == 0)) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ Mov(scratch1, Operand(transition));
+ __ Push(receiver_reg, scratch1, value_reg);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3,
+ 1);
+ return;
+ }
+
+ // Update the map of the object.
+ __ Mov(scratch1, Operand(transition));
+ __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ scratch2,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ // TODO(jbramley): This construct appears in several places in this
+ // function. Try to clean it up, perhaps using a result_reg.
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ Str(storage_reg, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ Str(value_reg, FieldMemOperand(receiver_reg, offset));
+ }
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ Mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ storage_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ Str(storage_reg, FieldMemOperand(scratch1, offset));
+ } else {
+ __ Str(value_reg, FieldMemOperand(scratch1, offset));
+ }
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ Mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ storage_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ __ Bind(&exit);
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+}
+
+
+// Generate StoreField code, value is passed in x0 register.
+// When leaving generated code after success, the receiver_reg and name_reg may
+// be clobbered. Upon branch to miss_label, the receiver and name registers have
+// their original values.
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // x0 : value
+ Label exit;
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ // Load the double storage.
+ if (index < 0) {
+ int offset = (index * kPointerSize) + object->map()->instance_size();
+ __ Ldr(scratch1, FieldMemOperand(receiver_reg, offset));
+ } else {
+ int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Ldr(scratch1, FieldMemOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register?
+ // It's only used in Fcmp, but it's not really safe to use it like this.
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntagToDouble(fp_scratch, value_reg);
+ __ B(&do_store);
+
+ __ Bind(&heap_number);
+ __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ Bind(&do_store);
+ __ Str(fp_scratch, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
+
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+ return;
+ }
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ Str(value_reg, FieldMemOperand(receiver_reg, offset));
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ Mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Str(value_reg, FieldMemOperand(scratch1, offset));
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ Mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ __ Bind(&exit);
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+}
+
+
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ Bind(label);
+ __ Mov(this->name(), Operand(name));
+ }
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
+
+ __ Push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ Mov(scratch, Operand(interceptor));
+ __ Push(scratch, receiver, holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch,
+ bool is_store,
+ int argc,
+ Register* values) {
+ ASSERT(!AreAliased(receiver, scratch));
+ __ Push(receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ // TODO(jbramley): Push these in as few Push() calls as possible.
+ Register arg = values[argc-1-i];
+ ASSERT(!AreAliased(receiver, scratch, arg));
+ __ Push(arg);
+ }
+
+ ASSERT(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = x0;
+ Register call_data = x4;
+ Register holder = x2;
+ Register api_function_address = x1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Mov(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadObject(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ LoadObject(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ LoadObject(call_data, api_call_info);
+ __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ LoadObject(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
+ ExternalReference ref = ExternalReference(&fun,
+ ExternalReference::DIRECT_API_CALL,
+ masm->isolate());
+ __ Mov(api_function_address, Operand(ref));
+
+ // Jump to stub.
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Handle<Name> name,
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
+ // Make sure that the type feedback oracle harvests the receiver map.
+ // TODO(svenpanne) Remove this hack when all ICs are reworked.
+ __ Mov(scratch1, Operand(receiver_map));
+
+ // object_reg and holder_reg registers can alias.
+ ASSERT(!AreAliased(object_reg, scratch1, scratch2));
+ ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) {
+ current = Handle<JSObject>::cast(type->AsConstant());
+ }
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ ASSERT(current.is_null() ||
+ (current->property_dictionary()->FindEntry(*name) ==
+ NameDictionary::kNotFound));
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
+
+ __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ bool need_map = (depth != 1 || check == CHECK_ALL_MAPS) ||
+ heap()->InNewSpace(*prototype);
+ Register map_reg = NoReg;
+ if (need_map) {
+ map_reg = scratch1;
+ __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (heap()->InNewSpace(*prototype)) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ Mov(reg, Operand(prototype));
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ // Check the holder map.
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ B(&success);
+
+ __ Bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ __ Bind(&success);
+ }
+}
+
+
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ B(&success);
+
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ __ Bind(&success);
+ }
+}
+
+
+Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<Object> callback) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
+
+ // TODO(jbramely): HandlerFrontendHeader returns its result in scratch1(), so
+ // we can't use it below, but that isn't very obvious. Is there a better way
+ // of handling this?
+
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!AreAliased(reg, scratch2(), scratch3(), scratch4()));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch4();
+ __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
+ &probe_done,
+ dictionary,
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ Bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3();
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
+ __ Cmp(scratch2(), Operand(callback));
+ __ B(ne, &miss);
+ }
+
+ HandlerFrontendFooter(name, &miss);
+ return reg;
+}
+
+
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
+ __ Mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
+}
+
+
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ LoadObject(x0, value);
+ __ Ret();
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
+ ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+ // Build ExecutableAccessorInfo::args_ list on the stack and push property
+ // name below the exit frame to make GC aware of them and store pointers to
+ // them.
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+
+ __ Push(receiver());
+
+ if (heap()->InNewSpace(callback->data())) {
+ __ Mov(scratch3(), Operand(callback));
+ __ Ldr(scratch3(), FieldMemOperand(scratch3(),
+ ExecutableAccessorInfo::kDataOffset));
+ } else {
+ __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
+ }
+ // TODO(jbramley): Find another scratch register and combine the pushes
+ // together. Can we use scratch1() here?
+ __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
+ __ Push(scratch3(), scratch4());
+ __ Mov(scratch3(), Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch4(), scratch3(), reg, name());
+
+ Register args_addr = scratch2();
+ __ Add(args_addr, __ StackPointer(), kPointerSize);
+
+ // Stack at this point:
+ // sp[40] callback data
+ // sp[32] undefined
+ // sp[24] undefined
+ // sp[16] isolate
+ // args_addr -> sp[8] reg
+ // sp[0] name
+
+ // Abi for CallApiGetter.
+ Register getter_address_reg = x2;
+
+ // Set up the call.
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ Mov(getter_address_reg, Operand(ref));
+
+ CallApiGetterStub stub;
+ __ TailCallStub(&stub);
+}
+
+
+void LoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<Object> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ ASSERT(!AreAliased(receiver(), this->name(),
+ scratch1(), scratch2(), scratch3()));
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added later.
+ bool compile_followup_inline = false;
+ if (lookup->IsFound() && lookup->IsCacheable()) {
+ if (lookup->IsField()) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from
+ // the holder and it is needed should the interceptor return without any
+ // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+ // the FIELD case might cause a miss during the prototype check.
+ bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+ bool must_preserve_receiver_reg = !receiver().Is(holder_reg) &&
+ (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
+ }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ JumpIfRoot(x0,
+ Heap::kNoInterceptorResultSentinelRootIndex,
+ &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
+
+ __ Bind(&interceptor_failed);
+ if (must_preserve_receiver_reg) {
+ __ Pop(this->name(), holder_reg, receiver());
+ } else {
+ __ Pop(this->name(), holder_reg);
+ }
+ // Leave the internal frame.
+ }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ PushInterceptorArguments(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
+ isolate());
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
+ }
+}
+
+
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
+ Label success;
+ // Check that the object is a boolean.
+ // TODO(all): Optimize this like LCodeGen::DoDeferredTaggedToI.
+ __ JumpIfRoot(object, Heap::kTrueValueRootIndex, &success);
+ __ JumpIfNotRoot(object, Heap::kFalseValueRootIndex, miss);
+ __ Bind(&success);
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ ASM_LOCATION("StoreStubCompiler::CompileStoreCallback");
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
+
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+
+ // TODO(jbramley): Make Push take more than four arguments and combine these
+ // two calls.
+ __ Push(receiver(), holder_reg);
+ __ Mov(scratch1(), Operand(callback));
+ __ Mov(scratch2(), Operand(name));
+ __ Push(scratch1(), scratch2(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<HeapType> type,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Register value = x0;
+ Register receiver = x1;
+ Label miss;
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ Push(value);
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver, value);
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ Pop(value);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name) {
+ Label miss;
+
+ ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor");
+
+ __ Push(receiver(), this->name(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
+
+ // Return undefined if maps of the full prototype chain are still the
+ // same and no global property with this name contains a value.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ Ret();
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+// TODO(all): The so-called scratch registers are significant in some cases. For
+// example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for
+// KeyedStoreCompiler::transition_map(). We should verify which registers are
+// actually scratch registers, and which are important. For now, we use the same
+// assignments as ARM to remain on the safe side.
+
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { x0, x2, x3, x1, x4, x5 };
+ return registers;
+}
+
+
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name/key, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { x1, x0, x2, x3, x4, x5 };
+ return registers;
+}
+
+
+Register* StoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { x1, x2, x0, x3, x4, x5 };
+ return registers;
+}
+
+
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { x2, x1, x0, x3, x4, x5 };
+ return registers;
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> getter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<HeapType> type,
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
+ bool is_dont_delete) {
+ Label miss;
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
+
+ // Get the value from the cell.
+ __ Mov(x3, Operand(cell));
+ __ Ldr(x4, FieldMemOperand(x3, Cell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (!is_dont_delete) {
+ __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+ __ Mov(x0, x4);
+ __ Ret();
+
+ HandlerFrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
+ TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ Register map_reg = scratch1();
+ __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ Label try_next;
+ __ Cmp(map_reg, Operand(map));
+ __ B(ne, &try_next);
+ if (type->Is(HeapType::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ Bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
+ __ Bind(&try_next);
+ }
+ }
+ ASSERT(number_of_handled_maps != 0);
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
+}
+
+
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+
+ ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic");
+
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; i++) {
+ __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
+
+ Label skip;
+ __ B(&skip, ne);
+ if (!transitioned_maps->at(i).is_null()) {
+ // This argument is used by the handler stub. For example, see
+ // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
+ __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
+ }
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ Bind(&skip);
+ }
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow, miss;
+
+ Register result = x0;
+ Register key = x0;
+ Register receiver = x1;
+
+ __ JumpIfNotSmi(key, &miss);
+ __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6);
+ __ Ret();
+
+ __ Bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
+
+ // Miss case, call the runtime.
+ __ Bind(&miss);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/utils-a64.cc b/deps/v8/src/a64/utils-a64.cc
new file mode 100644
index 0000000000..7e710d770e
--- /dev/null
+++ b/deps/v8/src/a64/utils-a64.cc
@@ -0,0 +1,112 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#if V8_TARGET_ARCH_A64
+
+#include "a64/utils-a64.h"
+
+
+namespace v8 {
+namespace internal {
+
+#define __ assm->
+
+
+int CountLeadingZeros(uint64_t value, int width) {
+ // TODO(jbramley): Optimize this for A64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ int count = 0;
+ uint64_t bit_test = 1UL << (width - 1);
+ while ((count < width) && ((bit_test & value) == 0)) {
+ count++;
+ bit_test >>= 1;
+ }
+ return count;
+}
+
+
+int CountLeadingSignBits(int64_t value, int width) {
+ // TODO(jbramley): Optimize this for A64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ if (value >= 0) {
+ return CountLeadingZeros(value, width) - 1;
+ } else {
+ return CountLeadingZeros(~value, width) - 1;
+ }
+}
+
+
+int CountTrailingZeros(uint64_t value, int width) {
+ // TODO(jbramley): Optimize this for A64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ int count = 0;
+ while ((count < width) && (((value >> count) & 1) == 0)) {
+ count++;
+ }
+ return count;
+}
+
+
+int CountSetBits(uint64_t value, int width) {
+ // TODO(jbramley): Would it be useful to allow other widths? The
+ // implementation already supports them.
+ ASSERT((width == 32) || (width == 64));
+
+ // Mask out unused bits to ensure that they are not counted.
+ value &= (0xffffffffffffffffUL >> (64-width));
+
+ // Add up the set bits.
+ // The algorithm works by adding pairs of bit fields together iteratively,
+ // where the size of each bit field doubles each time.
+ // An example for an 8-bit value:
+ // Bits: h g f e d c b a
+ // \ | \ | \ | \ |
+ // value = h+g f+e d+c b+a
+ // \ | \ |
+ // value = h+g+f+e d+c+b+a
+ // \ |
+ // value = h+g+f+e+d+c+b+a
+ value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
+ value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
+ value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
+ value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
+ value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
+ value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
+
+ return value;
+}
+
+
+int MaskToBit(uint64_t mask) {
+ ASSERT(CountSetBits(mask, 64) == 1);
+ return CountTrailingZeros(mask, 64);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/utils-a64.h b/deps/v8/src/a64/utils-a64.h
new file mode 100644
index 0000000000..16c51a9c8b
--- /dev/null
+++ b/deps/v8/src/a64/utils-a64.h
@@ -0,0 +1,109 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_UTILS_A64_H_
+#define V8_A64_UTILS_A64_H_
+
+#include <cmath>
+#include "v8.h"
+#include "a64/constants-a64.h"
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+namespace v8 {
+namespace internal {
+
+// Floating point representation.
+static inline uint32_t float_to_rawbits(float value) {
+ uint32_t bits = 0;
+ memcpy(&bits, &value, 4);
+ return bits;
+}
+
+
+static inline uint64_t double_to_rawbits(double value) {
+ uint64_t bits = 0;
+ memcpy(&bits, &value, 8);
+ return bits;
+}
+
+
+static inline float rawbits_to_float(uint32_t bits) {
+ float value = 0.0;
+ memcpy(&value, &bits, 4);
+ return value;
+}
+
+
+static inline double rawbits_to_double(uint64_t bits) {
+ double value = 0.0;
+ memcpy(&value, &bits, 8);
+ return value;
+}
+
+
+// Bits counting.
+int CountLeadingZeros(uint64_t value, int width);
+int CountLeadingSignBits(int64_t value, int width);
+int CountTrailingZeros(uint64_t value, int width);
+int CountSetBits(uint64_t value, int width);
+int MaskToBit(uint64_t mask);
+
+
+// NaN tests.
+inline bool IsSignallingNaN(double num) {
+ const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
+ uint64_t raw = double_to_rawbits(num);
+ if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+inline bool IsSignallingNaN(float num) {
+ const uint64_t kFP32QuietNaNMask = 0x00400000UL;
+ uint32_t raw = float_to_rawbits(num);
+ if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+template <typename T>
+inline bool IsQuietNaN(T num) {
+ return std::isnan(num) && !IsSignallingNaN(num);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_A64_UTILS_A64_H_
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 50232661c1..47b0a85633 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "accessors.h"
+#include "compiler.h"
#include "contexts.h"
#include "deoptimizer.h"
#include "execution.h"
@@ -90,10 +91,22 @@ static V8_INLINE bool CheckForName(Handle<String> name,
}
-bool Accessors::IsJSObjectFieldAccessor(
- Handle<Map> map, Handle<String> name,
- int* object_offset) {
- Isolate* isolate = map->GetIsolate();
+// Returns true for properties that are accessors to object fields.
+// If true, *object_offset contains offset of object field.
+template <class T>
+bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
+ Handle<String> name,
+ int* object_offset) {
+ Isolate* isolate = name->GetIsolate();
+
+ if (type->Is(T::String())) {
+ return CheckForName(name, isolate->heap()->length_string(),
+ String::kLengthOffset, object_offset);
+ }
+
+ if (!type->IsClass()) return false;
+ Handle<Map> map = type->AsClass();
+
switch (map->instance_type()) {
case JS_ARRAY_TYPE:
return
@@ -121,18 +134,24 @@ bool Accessors::IsJSObjectFieldAccessor(
JSDataView::kByteOffsetOffset, object_offset) ||
CheckForName(name, isolate->heap()->buffer_string(),
JSDataView::kBufferOffset, object_offset);
- default: {
- if (map->instance_type() < FIRST_NONSTRING_TYPE) {
- return
- CheckForName(name, isolate->heap()->length_string(),
- String::kLengthOffset, object_offset);
- }
+ default:
return false;
- }
}
}
+template
+bool Accessors::IsJSObjectFieldAccessor<Type>(Type* type,
+ Handle<String> name,
+ int* object_offset);
+
+
+template
+bool Accessors::IsJSObjectFieldAccessor<HeapType>(Handle<HeapType> type,
+ Handle<String> name,
+ int* object_offset);
+
+
//
// Accessors::ArrayLength
//
@@ -148,45 +167,49 @@ MaybeObject* Accessors::ArrayGetLength(Isolate* isolate,
// The helper function will 'flatten' Number objects.
-Object* Accessors::FlattenNumber(Isolate* isolate, Object* value) {
+Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
+ Handle<Object> value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
- JSValue* wrapper = JSValue::cast(value);
+ Handle<JSValue> wrapper = Handle<JSValue>::cast(value);
ASSERT(wrapper->GetIsolate()->context()->native_context()->number_function()->
has_initial_map());
- Map* number_map = isolate->context()->native_context()->
- number_function()->initial_map();
- if (wrapper->map() == number_map) return wrapper->value();
+ if (wrapper->map() ==
+ isolate->context()->native_context()->number_function()->initial_map()) {
+ return handle(wrapper->value(), isolate);
+ }
+
return value;
}
MaybeObject* Accessors::ArraySetLength(Isolate* isolate,
- JSObject* object,
- Object* value,
+ JSObject* object_raw,
+ Object* value_raw,
void*) {
+ HandleScope scope(isolate);
+ Handle<JSObject> object(object_raw, isolate);
+ Handle<Object> value(value_raw, isolate);
+
// This means one of the object's prototypes is a JSArray and the
// object does not have a 'length' property. Calling SetProperty
// causes an infinite loop.
if (!object->IsJSArray()) {
- return object->SetLocalPropertyIgnoreAttributesTrampoline(
- isolate->heap()->length_string(), value, NONE);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(object,
+ isolate->factory()->length_string(), value, NONE);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
value = FlattenNumber(isolate, value);
- // Need to call methods that may trigger GC.
- HandleScope scope(isolate);
-
- // Protect raw pointers.
- Handle<JSArray> array_handle(JSArray::cast(object), isolate);
- Handle<Object> value_handle(value, isolate);
+ Handle<JSArray> array_handle = Handle<JSArray>::cast(object);
bool has_exception;
Handle<Object> uint32_v =
- Execution::ToUint32(isolate, value_handle, &has_exception);
+ Execution::ToUint32(isolate, value, &has_exception);
if (has_exception) return Failure::Exception();
Handle<Object> number_v =
- Execution::ToNumber(isolate, value_handle, &has_exception);
+ Execution::ToNumber(isolate, value, &has_exception);
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
@@ -578,26 +601,28 @@ MaybeObject* Accessors::FunctionGetPrototype(Isolate* isolate,
MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
- JSObject* object,
+ JSObject* object_raw,
Object* value_raw,
void*) {
- Heap* heap = isolate->heap();
- JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
- if (function_raw == NULL) return heap->undefined_value();
- if (!function_raw->should_have_prototype()) {
- // Since we hit this accessor, object will have no prototype property.
- return object->SetLocalPropertyIgnoreAttributesTrampoline(
- heap->prototype_string(), value_raw, NONE);
- }
+ JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object_raw);
+ if (function_raw == NULL) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
Handle<JSFunction> function(function_raw, isolate);
+ Handle<JSObject> object(object_raw, isolate);
Handle<Object> value(value_raw, isolate);
+ if (!function->should_have_prototype()) {
+ // Since we hit this accessor, object will have no prototype property.
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(object,
+ isolate->factory()->prototype_string(), value, NONE);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
+ }
Handle<Object> old_value;
bool is_observed =
FLAG_harmony_observation &&
- *function == object &&
+ *function == *object &&
function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
@@ -611,7 +636,7 @@ MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
if (is_observed && !old_value->SameValue(*value)) {
JSObject::EnqueueChangeRecord(
- function, "updated", isolate->factory()->prototype_string(), old_value);
+ function, "update", isolate->factory()->prototype_string(), old_value);
}
return *function;
@@ -642,9 +667,9 @@ MaybeObject* Accessors::FunctionGetLength(Isolate* isolate,
// If the function isn't compiled yet, the length is not computed correctly
// yet. Compile it now and return the right length.
HandleScope scope(isolate);
- Handle<JSFunction> handle(function);
- if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
- return Smi::FromInt(handle->shared()->length());
+ Handle<JSFunction> function_handle(function);
+ if (Compiler::EnsureCompiled(function_handle, KEEP_EXCEPTION)) {
+ return Smi::FromInt(function_handle->shared()->length());
}
return Failure::Exception();
}
@@ -699,21 +724,22 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
int inlined_frame_index) {
Isolate* isolate = inlined_function->GetIsolate();
Factory* factory = isolate->factory();
- Vector<SlotRef> args_slots =
- SlotRef::ComputeSlotMappingForArguments(
- frame,
- inlined_frame_index,
- inlined_function->shared()->formal_parameter_count());
- int args_count = args_slots.length();
+ SlotRefValueBuilder slot_refs(
+ frame,
+ inlined_frame_index,
+ inlined_function->shared()->formal_parameter_count());
+
+ int args_count = slot_refs.args_length();
Handle<JSObject> arguments =
factory->NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count);
+ slot_refs.Prepare(isolate);
for (int i = 0; i < args_count; ++i) {
- Handle<Object> value = args_slots[i].GetValue(isolate);
+ Handle<Object> value = slot_refs.GetNext(isolate, 0);
array->set(i, *value);
}
+ slot_refs.Finish(isolate);
arguments->set_elements(*array);
- args_slots.Dispose();
// Return the freshly allocated arguments object.
return *arguments;
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index b2dee27932..d157aeaadf 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -88,9 +88,10 @@ class Accessors : public AllStatic {
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
- static bool IsJSObjectFieldAccessor(
- Handle<Map> map, Handle<String> name,
- int* object_offset);
+ template <class T>
+ static bool IsJSObjectFieldAccessor(typename T::TypeHandle type,
+ Handle<String> name,
+ int* object_offset);
private:
@@ -149,7 +150,7 @@ class Accessors : public AllStatic {
void*);
// Helper functions.
- static Object* FlattenNumber(Isolate* isolate, Object* value);
+ static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value);
static MaybeObject* IllegalSetter(Isolate* isolate,
JSObject*,
Object*,
diff --git a/deps/v8/src/allocation-site-scopes.cc b/deps/v8/src/allocation-site-scopes.cc
index 8097045b27..bbfb39b122 100644
--- a/deps/v8/src/allocation-site-scopes.cc
+++ b/deps/v8/src/allocation-site-scopes.cc
@@ -83,26 +83,20 @@ void AllocationSiteCreationContext::ExitScope(
}
-Handle<AllocationSite> AllocationSiteUsageContext::EnterNewScope() {
- if (top().is_null()) {
- InitializeTraversal(top_site_);
- } else {
- // Advance current site
- Object* nested_site = current()->nested_site();
- // Something is wrong if we advance to the end of the list here.
- ASSERT(nested_site->IsAllocationSite());
- update_current_site(AllocationSite::cast(nested_site));
+bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
+ if (activated_ && AllocationSite::CanTrack(object->map()->instance_type())) {
+ if (FLAG_allocation_site_pretenuring ||
+ AllocationSite::GetMode(object->GetElementsKind()) ==
+ TRACK_ALLOCATION_SITE) {
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("*** Creating Memento for %s %p\n",
+ object->IsJSArray() ? "JSArray" : "JSObject",
+ static_cast<void*>(*object));
+ }
+ return true;
+ }
}
- return Handle<AllocationSite>(*current(), isolate());
-}
-
-
-void AllocationSiteUsageContext::ExitScope(
- Handle<AllocationSite> scope_site,
- Handle<JSObject> object) {
- // This assert ensures that we are pointing at the right sub-object in a
- // recursive walk of a nested literal.
- ASSERT(object.is_null() || *object == scope_site->transition_info());
+ return false;
}
} } // namespace v8::internal
diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/allocation-site-scopes.h
index 1c3afdf369..a195b27d85 100644
--- a/deps/v8/src/allocation-site-scopes.h
+++ b/deps/v8/src/allocation-site-scopes.h
@@ -41,31 +41,22 @@ namespace internal {
// boilerplate with AllocationSite and AllocationMemento support.
class AllocationSiteContext {
public:
- AllocationSiteContext(Isolate* isolate, bool activated) {
+ explicit AllocationSiteContext(Isolate* isolate) {
isolate_ = isolate;
- activated_ = activated;
};
- virtual ~AllocationSiteContext() {}
Handle<AllocationSite> top() { return top_; }
Handle<AllocationSite> current() { return current_; }
- // If activated, then recursively create mementos
- bool activated() const { return activated_; }
+ bool ShouldCreateMemento(Handle<JSObject> object) { return false; }
- // Returns the AllocationSite that matches this scope.
- virtual Handle<AllocationSite> EnterNewScope() = 0;
-
- // scope_site should be the handle returned by the matching EnterNewScope()
- virtual void ExitScope(Handle<AllocationSite> scope_site,
- Handle<JSObject> object) = 0;
+ Isolate* isolate() { return isolate_; }
protected:
void update_current_site(AllocationSite* site) {
*(current_.location()) = site;
}
- Isolate* isolate() { return isolate_; }
void InitializeTraversal(Handle<AllocationSite> site) {
top_ = site;
current_ = Handle<AllocationSite>(*top_, isolate());
@@ -75,7 +66,6 @@ class AllocationSiteContext {
Isolate* isolate_;
Handle<AllocationSite> top_;
Handle<AllocationSite> current_;
- bool activated_;
};
@@ -84,11 +74,10 @@ class AllocationSiteContext {
class AllocationSiteCreationContext : public AllocationSiteContext {
public:
explicit AllocationSiteCreationContext(Isolate* isolate)
- : AllocationSiteContext(isolate, true) { }
+ : AllocationSiteContext(isolate) { }
- virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE;
- virtual void ExitScope(Handle<AllocationSite> site,
- Handle<JSObject> object) V8_OVERRIDE;
+ Handle<AllocationSite> EnterNewScope();
+ void ExitScope(Handle<AllocationSite> site, Handle<JSObject> object);
};
@@ -98,15 +87,35 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
public:
AllocationSiteUsageContext(Isolate* isolate, Handle<AllocationSite> site,
bool activated)
- : AllocationSiteContext(isolate, activated),
- top_site_(site) { }
+ : AllocationSiteContext(isolate),
+ top_site_(site),
+ activated_(activated) { }
+
+ inline Handle<AllocationSite> EnterNewScope() {
+ if (top().is_null()) {
+ InitializeTraversal(top_site_);
+ } else {
+ // Advance current site
+ Object* nested_site = current()->nested_site();
+ // Something is wrong if we advance to the end of the list here.
+ ASSERT(nested_site->IsAllocationSite());
+ update_current_site(AllocationSite::cast(nested_site));
+ }
+ return Handle<AllocationSite>(*current(), isolate());
+ }
+
+ inline void ExitScope(Handle<AllocationSite> scope_site,
+ Handle<JSObject> object) {
+ // This assert ensures that we are pointing at the right sub-object in a
+ // recursive walk of a nested literal.
+ ASSERT(object.is_null() || *object == scope_site->transition_info());
+ }
- virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE;
- virtual void ExitScope(Handle<AllocationSite> site,
- Handle<JSObject> object) V8_OVERRIDE;
+ bool ShouldCreateMemento(Handle<JSObject> object);
private:
Handle<AllocationSite> top_site_;
+ bool activated_;
};
diff --git a/deps/v8/src/allocation-tracker.cc b/deps/v8/src/allocation-tracker.cc
index 586ce3c45a..83e1bb4b39 100644
--- a/deps/v8/src/allocation-tracker.cc
+++ b/deps/v8/src/allocation-tracker.cc
@@ -46,6 +46,7 @@ AllocationTraceNode::AllocationTraceNode(
AllocationTraceNode::~AllocationTraceNode() {
+ for (int i = 0; i < children_.length(); i++) delete children_[i];
}
@@ -155,6 +156,11 @@ AllocationTracker::AllocationTracker(
AllocationTracker::~AllocationTracker() {
unresolved_locations_.Iterate(DeleteUnresolvedLocation);
+ for (HashMap::Entry* p = id_to_function_info_.Start();
+ p != NULL;
+ p = id_to_function_info_.Next(p)) {
+ delete reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
+ }
}
@@ -169,7 +175,7 @@ void AllocationTracker::PrepareForSerialization() {
}
-void AllocationTracker::NewObjectEvent(Address addr, int size) {
+void AllocationTracker::AllocationEvent(Address addr, int size) {
DisallowHeapAllocation no_allocation;
Heap* heap = ids_->heap();
@@ -185,7 +191,8 @@ void AllocationTracker::NewObjectEvent(Address addr, int size) {
while (!it.done() && length < kMaxAllocationTraceLength) {
JavaScriptFrame* frame = it.frame();
SharedFunctionInfo* shared = frame->function()->shared();
- SnapshotObjectId id = ids_->FindEntry(shared->address());
+ SnapshotObjectId id = ids_->FindOrAddEntry(
+ shared->address(), shared->Size(), false);
allocation_trace_buffer_[length++] = id;
AddFunctionInfo(shared, id);
it.Advance();
@@ -245,34 +252,33 @@ AllocationTracker::UnresolvedLocation::UnresolvedLocation(
info_(info) {
script_ = Handle<Script>::cast(
script->GetIsolate()->global_handles()->Create(script));
- GlobalHandles::MakeWeak(
- reinterpret_cast<Object**>(script_.location()),
- this, &HandleWeakScript);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
+ this,
+ &HandleWeakScript);
}
AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
if (!script_.is_null()) {
- script_->GetIsolate()->global_handles()->Destroy(
- reinterpret_cast<Object**>(script_.location()));
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(script_.location()));
}
}
void AllocationTracker::UnresolvedLocation::Resolve() {
if (script_.is_null()) return;
+ HandleScope scope(script_->GetIsolate());
info_->line = GetScriptLineNumber(script_, start_position_);
info_->column = GetScriptColumnNumber(script_, start_position_);
}
void AllocationTracker::UnresolvedLocation::HandleWeakScript(
- v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data) {
- UnresolvedLocation* location = reinterpret_cast<UnresolvedLocation*>(data);
- location->script_ = Handle<Script>::null();
- obj->Dispose();
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ UnresolvedLocation* loc =
+ reinterpret_cast<UnresolvedLocation*>(data.GetParameter());
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(loc->script_.location()));
+ loc->script_ = Handle<Script>::null();
}
diff --git a/deps/v8/src/allocation-tracker.h b/deps/v8/src/allocation-tracker.h
index 617cf902e8..1a5dc9e123 100644
--- a/deps/v8/src/allocation-tracker.h
+++ b/deps/v8/src/allocation-tracker.h
@@ -96,7 +96,7 @@ class AllocationTracker {
~AllocationTracker();
void PrepareForSerialization();
- void NewObjectEvent(Address addr, int size);
+ void AllocationEvent(Address addr, int size);
AllocationTraceTree* trace_tree() { return &trace_tree_; }
HashMap* id_to_function_info() { return &id_to_function_info_; }
@@ -112,9 +112,9 @@ class AllocationTracker {
void Resolve();
private:
- static void HandleWeakScript(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data);
+ static void HandleWeakScript(
+ const v8::WeakCallbackData<v8::Value, void>& data);
+
Handle<Script> script_;
int start_position_;
FunctionInfo* info_;
@@ -135,4 +135,3 @@ class AllocationTracker {
} } // namespace v8::internal
#endif // V8_ALLOCATION_TRACKER_H_
-
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 94aaad3fd4..ff16dab3cc 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -32,6 +32,10 @@
#include "platform.h"
#include "utils.h"
+#if V8_LIBC_BIONIC
+#include <malloc.h> // NOLINT
+#endif
+
namespace v8 {
namespace internal {
@@ -101,23 +105,32 @@ char* StrNDup(const char* str, int n) {
}
-void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
- next_ = other->next_;
- other->next_->previous_ = this;
- previous_ = other;
- other->next_ = this;
-}
-
-
-void PreallocatedStorage::Unlink() {
- next_->previous_ = previous_;
- previous_->next_ = next_;
+void* AlignedAlloc(size_t size, size_t alignment) {
+ ASSERT(IsPowerOf2(alignment) && alignment >= V8_ALIGNOF(void*)); // NOLINT
+ void* ptr;
+#if V8_OS_WIN
+ ptr = _aligned_malloc(size, alignment);
+#elif V8_LIBC_BIONIC
+ // posix_memalign is not exposed in some Android versions, so we fall back to
+ // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
+ ptr = memalign(alignment, size);
+#else
+ if (posix_memalign(&ptr, alignment, size)) ptr = NULL;
+#endif
+ if (ptr == NULL) FatalProcessOutOfMemory("AlignedAlloc");
+ return ptr;
}
-PreallocatedStorage::PreallocatedStorage(size_t size)
- : size_(size) {
- previous_ = next_ = this;
+void AlignedFree(void *ptr) {
+#if V8_OS_WIN
+ _aligned_free(ptr);
+#elif V8_LIBC_BIONIC
+ // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
+ free(ptr);
+#else
+ free(ptr);
+#endif
}
} } // namespace v8::internal
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 45bde4c4cb..380fa05ff3 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -109,33 +109,8 @@ class FreeStoreAllocationPolicy {
};
-// Allocation policy for allocating in preallocated space.
-// Used as an allocation policy for ScopeInfo when generating
-// stack traces.
-class PreallocatedStorage {
- public:
- explicit PreallocatedStorage(size_t size);
- size_t size() { return size_; }
-
- private:
- size_t size_;
- PreallocatedStorage* previous_;
- PreallocatedStorage* next_;
-
- void LinkTo(PreallocatedStorage* other);
- void Unlink();
-
- friend class Isolate;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
-};
-
-
-struct PreallocatedStorageAllocationPolicy {
- INLINE(void* New(size_t size));
- INLINE(static void Delete(void* ptr));
-};
-
+void* AlignedAlloc(size_t size, size_t alignment);
+void AlignedFree(void *ptr);
} } // namespace v8::internal
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 8a73877eed..2c7db3be16 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -118,40 +118,9 @@ namespace v8 {
EXCEPTION_BAILOUT_CHECK_GENERIC(isolate, value, ;)
-#define API_ENTRY_CHECK(isolate, msg) \
- do { \
- if (v8::Locker::IsActive()) { \
- ApiCheck(isolate->thread_manager()->IsLockedByCurrentThread(), \
- msg, \
- "Entering the V8 API without proper locking in place"); \
- } \
- } while (false)
-
-
// --- E x c e p t i o n B e h a v i o r ---
-static void DefaultFatalErrorHandler(const char* location,
- const char* message) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->IsInitialized()) {
- i::VMState<i::OTHER> state(isolate);
- API_Fatal(location, message);
- } else {
- API_Fatal(location, message);
- }
-}
-
-
-static FatalErrorCallback GetFatalErrorHandler() {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->exception_behavior() == NULL) {
- isolate->set_exception_behavior(DefaultFatalErrorHandler);
- }
- return isolate->exception_behavior();
-}
-
-
void i::FatalProcessOutOfMemory(const char* location) {
i::V8::FatalProcessOutOfMemory(location, false);
}
@@ -221,21 +190,23 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
// HeapIterator here without doing a special GC.
isolate->heap()->RecordStats(&heap_stats, false);
}
- isolate->SignalFatalError();
- FatalErrorCallback callback = GetFatalErrorHandler();
- const char* message = "Allocation failed - process out of memory";
- callback(location, message);
- // If the callback returns, we stop execution.
+ Utils::ApiCheck(false, location, "Allocation failed - process out of memory");
+ // If the fatal error handler returns, we stop execution.
FATAL("API fatal error handler returned after process out of memory");
}
-bool Utils::ReportApiFailure(const char* location, const char* message) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, message);
+void Utils::ReportApiFailure(const char* location, const char* message) {
i::Isolate* isolate = i::Isolate::Current();
+ FatalErrorCallback callback = isolate->exception_behavior();
+ if (callback == NULL) {
+ i::OS::PrintError("\n#\n# Fatal error in %s\n# %s\n#\n\n",
+ location, message);
+ i::OS::Abort();
+ } else {
+ callback(location, message);
+ }
isolate->SignalFatalError();
- return false;
}
@@ -245,20 +216,6 @@ bool V8::IsDead() {
}
-static inline bool ApiCheck(bool condition,
- const char* location,
- const char* message) {
- return condition ? true : Utils::ReportApiFailure(location, message);
-}
-
-
-static bool ReportEmptyHandle(const char* location) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, "Reading from empty handle");
- return true;
-}
-
-
static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
if (!isolate->IsInitialized()) return false;
if (isolate->has_scheduled_exception()) {
@@ -269,16 +226,6 @@ static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
}
-static inline bool EmptyCheck(const char* location, v8::Handle<v8::Data> obj) {
- return obj.IsEmpty() ? ReportEmptyHandle(location) : false;
-}
-
-
-static inline bool EmptyCheck(const char* location, const v8::Data* obj) {
- return (obj == 0) ? ReportEmptyHandle(location) : false;
-}
-
-
// --- S t a t i c s ---
@@ -295,11 +242,10 @@ static bool InitializeHelper(i::Isolate* isolate) {
static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
const char* location) {
- if (isolate != NULL) {
- if (isolate->IsInitialized()) return true;
- }
- ASSERT(isolate == i::Isolate::Current());
- return ApiCheck(InitializeHelper(isolate), location, "Error initializing V8");
+ return (isolate != NULL && isolate->IsInitialized()) ||
+ Utils::ApiCheck(InitializeHelper(isolate),
+ location,
+ "Error initializing V8");
}
@@ -472,11 +418,6 @@ void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
}
-v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) {
- return v8::Isolate::GetCurrent()->ThrowException(value);
-}
-
-
RegisteredExtension* RegisteredExtension::first_extension_ = NULL;
@@ -523,59 +464,61 @@ Extension::Extension(const char* name,
}
-v8::Handle<Primitive> Undefined() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::Undefined()")) {
- return v8::Handle<v8::Primitive>();
- }
- return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
-}
-
-
-v8::Handle<Primitive> Null() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::Null()")) {
- return v8::Handle<v8::Primitive>();
- }
- return ToApiHandle<Primitive>(isolate->factory()->null_value());
-}
-
+ResourceConstraints::ResourceConstraints()
+ : max_young_space_size_(0),
+ max_old_space_size_(0),
+ max_executable_size_(0),
+ stack_limit_(NULL),
+ max_available_threads_(0) { }
+
+void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
+ uint32_t number_of_processors) {
+ const int lump_of_memory = (i::kPointerSize / 4) * i::MB;
+#if V8_OS_ANDROID
+ // Android has higher physical memory requirements before raising the maximum
+ // heap size limits since it has no swap space.
+ const uint64_t low_limit = 512ul * i::MB;
+ const uint64_t medium_limit = 1ul * i::GB;
+ const uint64_t high_limit = 2ul * i::GB;
+#else
+ const uint64_t low_limit = 512ul * i::MB;
+ const uint64_t medium_limit = 768ul * i::MB;
+ const uint64_t high_limit = 1ul * i::GB;
+#endif
-v8::Handle<Boolean> True() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::True()")) {
- return v8::Handle<Boolean>();
+ // The young_space_size should be a power of 2 and old_generation_size should
+ // be a multiple of Page::kPageSize.
+ if (physical_memory <= low_limit) {
+ set_max_young_space_size(2 * lump_of_memory);
+ set_max_old_space_size(128 * lump_of_memory);
+ set_max_executable_size(96 * lump_of_memory);
+ } else if (physical_memory <= medium_limit) {
+ set_max_young_space_size(8 * lump_of_memory);
+ set_max_old_space_size(256 * lump_of_memory);
+ set_max_executable_size(192 * lump_of_memory);
+ } else if (physical_memory <= high_limit) {
+ set_max_young_space_size(16 * lump_of_memory);
+ set_max_old_space_size(512 * lump_of_memory);
+ set_max_executable_size(256 * lump_of_memory);
+ } else {
+ set_max_young_space_size(16 * lump_of_memory);
+ set_max_old_space_size(700 * lump_of_memory);
+ set_max_executable_size(256 * lump_of_memory);
}
- return ToApiHandle<Boolean>(isolate->factory()->true_value());
-}
-
-v8::Handle<Boolean> False() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::False()")) {
- return v8::Handle<Boolean>();
- }
- return ToApiHandle<Boolean>(isolate->factory()->false_value());
+ set_max_available_threads(i::Max(i::Min(number_of_processors, 4u), 1u));
}
-ResourceConstraints::ResourceConstraints()
- : max_young_space_size_(0),
- max_old_space_size_(0),
- max_executable_size_(0),
- stack_limit_(NULL) { }
-
-
-bool SetResourceConstraints(ResourceConstraints* constraints) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
-
+bool SetResourceConstraints(Isolate* v8_isolate,
+ ResourceConstraints* constraints) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
int young_space_size = constraints->max_young_space_size();
int old_gen_size = constraints->max_old_space_size();
int max_executable_size = constraints->max_executable_size();
if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) {
// After initialization it's too late to change Heap constraints.
- // TODO(rmcilroy): fix this assert.
- // ASSERT(!isolate->IsInitialized());
+ ASSERT(!isolate->IsInitialized());
bool result = isolate->heap()->ConfigureHeap(young_space_size / 2,
old_gen_size,
max_executable_size);
@@ -585,6 +528,8 @@ bool SetResourceConstraints(ResourceConstraints* constraints) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
isolate->stack_guard()->SetStackLimit(limit);
}
+
+ isolate->set_max_available_threads(constraints->max_available_threads());
return true;
}
@@ -610,12 +555,8 @@ i::Object** V8::CopyPersistent(i::Object** obj) {
void V8::MakeWeak(i::Object** object,
void* parameters,
- WeakCallback weak_callback,
- RevivableCallback weak_reference_callback) {
- i::GlobalHandles::MakeWeak(object,
- parameters,
- weak_callback,
- weak_reference_callback);
+ WeakCallback weak_callback) {
+ i::GlobalHandles::MakeWeak(object, parameters, weak_callback);
}
@@ -652,35 +593,29 @@ HandleScope::HandleScope(Isolate* isolate) {
void HandleScope::Initialize(Isolate* isolate) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- API_ENTRY_CHECK(internal_isolate, "HandleScope::HandleScope");
- v8::ImplementationUtilities::HandleScopeData* current =
- internal_isolate->handle_scope_data();
+ // We do not want to check the correct usage of the Locker class all over the
+ // place, so we do it only here: Without a HandleScope, an embedder can do
+ // almost nothing, so it is enough to check in this central place.
+ Utils::ApiCheck(!v8::Locker::IsActive() ||
+ internal_isolate->thread_manager()->IsLockedByCurrentThread(),
+ "HandleScope::HandleScope",
+ "Entering the V8 API without proper locking in place");
+ i::HandleScopeData* current = internal_isolate->handle_scope_data();
isolate_ = internal_isolate;
prev_next_ = current->next;
prev_limit_ = current->limit;
- is_closed_ = false;
current->level++;
}
HandleScope::~HandleScope() {
- if (!is_closed_) {
- Leave();
- }
+ i::HandleScope::CloseScope(isolate_, prev_next_, prev_limit_);
}
-void HandleScope::Leave() {
- return i::HandleScope::CloseScope(isolate_, prev_next_, prev_limit_);
-}
-
-
-int HandleScope::NumberOfHandles() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "HandleScope::NumberOfHandles")) {
- return 0;
- }
- return i::HandleScope::NumberOfHandles(isolate);
+int HandleScope::NumberOfHandles(Isolate* isolate) {
+ return i::HandleScope::NumberOfHandles(
+ reinterpret_cast<i::Isolate*>(isolate));
}
@@ -704,11 +639,12 @@ EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) {
i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
- ApiCheck(*escape_slot_ == isolate_->heap()->the_hole_value(),
- "EscapeableHandleScope::Escape",
- "Escape value set twice");
+ i::Heap* heap = reinterpret_cast<i::Isolate*>(GetIsolate())->heap();
+ Utils::ApiCheck(*escape_slot_ == heap->the_hole_value(),
+ "EscapeableHandleScope::Escape",
+ "Escape value set twice");
if (escape_value == NULL) {
- *escape_slot_ = isolate_->heap()->undefined_value();
+ *escape_slot_ = heap->undefined_value();
return NULL;
}
*escape_slot_ = *escape_value;
@@ -720,38 +656,37 @@ void Context::Enter() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
ENTER_V8(isolate);
- isolate->handle_scope_implementer()->EnterContext(env);
- isolate->handle_scope_implementer()->SaveContext(isolate->context());
+ i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
+ impl->EnterContext(env);
+ impl->SaveContext(isolate->context());
isolate->set_context(*env);
}
void Context::Exit() {
- // TODO(dcarney): fix this once chrome is fixed.
- i::Isolate* isolate = i::Isolate::Current();
- i::Handle<i::Context> context = i::Handle<i::Context>::null();
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Isolate* isolate = env->GetIsolate();
ENTER_V8(isolate);
- if (!ApiCheck(isolate->handle_scope_implementer()->LeaveContext(context),
- "v8::Context::Exit()",
- "Cannot exit non-entered context")) {
+ i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
+ if (!Utils::ApiCheck(impl->LastEnteredContextWas(env),
+ "v8::Context::Exit()",
+ "Cannot exit non-entered context")) {
return;
}
- // Content of 'last_context' could be NULL.
- i::Context* last_context =
- isolate->handle_scope_implementer()->RestoreContext();
- isolate->set_context(last_context);
+ impl->LeaveContext();
+ isolate->set_context(impl->RestoreContext());
}
static void* DecodeSmiToAligned(i::Object* value, const char* location) {
- ApiCheck(value->IsSmi(), location, "Not a Smi");
+ Utils::ApiCheck(value->IsSmi(), location, "Not a Smi");
return reinterpret_cast<void*>(value);
}
static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) {
i::Smi* smi = reinterpret_cast<i::Smi*>(value);
- ApiCheck(smi->IsSmi(), location, "Pointer is not aligned");
+ Utils::ApiCheck(smi->IsSmi(), location, "Pointer is not aligned");
return smi;
}
@@ -762,13 +697,14 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
const char* location) {
i::Handle<i::Context> env = Utils::OpenHandle(context);
bool ok =
- ApiCheck(env->IsNativeContext(), location, "Not a native context") &&
- ApiCheck(index >= 0, location, "Negative index");
+ Utils::ApiCheck(env->IsNativeContext(),
+ location,
+ "Not a native context") &&
+ Utils::ApiCheck(index >= 0, location, "Negative index");
if (!ok) return i::Handle<i::FixedArray>();
i::Handle<i::FixedArray> data(env->embedder_data());
if (index < data->length()) return data;
- if (!can_grow) {
- Utils::ReportApiFailure(location, "Index too large");
+ if (!Utils::ApiCheck(can_grow, location, "Index too large")) {
return i::Handle<i::FixedArray>();
}
int new_size = i::Max(index, data->length() << 1) + 1;
@@ -814,32 +750,6 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
}
-i::Object** v8::HandleScope::RawClose(i::Object** value) {
- if (!ApiCheck(!is_closed_,
- "v8::HandleScope::Close()",
- "Local scope has already been closed")) {
- return 0;
- }
- LOG_API(isolate_, "CloseHandleScope");
-
- // Read the result before popping the handle block.
- i::Object* result = NULL;
- if (value != NULL) {
- result = *value;
- }
- is_closed_ = true;
- Leave();
-
- if (value == NULL) {
- return NULL;
- }
-
- // Allocate a new handle on the previous handle block.
- i::Handle<i::Object> handle(result, isolate_);
- return handle.location();
-}
-
-
// --- N e a n d e r ---
@@ -848,8 +758,7 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) {
// objects. To remind you about this there is no HandleScope in the
// NeanderObject constructor. When you add one to the site calling the
// constructor you should check that you ensured the VM was not dead first.
-NeanderObject::NeanderObject(int size) {
- i::Isolate* isolate = i::Isolate::Current();
+NeanderObject::NeanderObject(v8::internal::Isolate* isolate, int size) {
EnsureInitializedForIsolate(isolate, "v8::Nowhere");
ENTER_V8(isolate);
value_ = isolate->factory()->NewNeanderObject();
@@ -863,7 +772,7 @@ int NeanderObject::size() {
}
-NeanderArray::NeanderArray() : obj_(2) {
+NeanderArray::NeanderArray(v8::internal::Isolate* isolate) : obj_(isolate, 2) {
obj_.set(0, i::Smi::FromInt(0));
}
@@ -920,11 +829,11 @@ static void TemplateSet(i::Isolate* isolate,
v8::Handle<v8::Data>* data) {
i::Handle<i::Object> list(Utils::OpenHandle(templ)->property_list(), isolate);
if (list->IsUndefined()) {
- list = NeanderArray().value();
+ list = NeanderArray(isolate).value();
Utils::OpenHandle(templ)->set_property_list(*list);
}
NeanderArray array(list);
- array.add(Utils::OpenHandle(*v8::Integer::New(length)));
+ array.add(isolate->factory()->NewNumberFromInt(length));
for (int i = 0; i < length; i++) {
i::Handle<i::Object> value = data[i].IsEmpty() ?
i::Handle<i::Object>(isolate->factory()->undefined_value()) :
@@ -941,10 +850,11 @@ void Template::Set(v8::Handle<String> name,
ENTER_V8(isolate);
i::HandleScope scope(isolate);
const int kSize = 3;
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Handle<v8::Data> data[kSize] = {
- name,
- value,
- v8::Integer::New(attribute)};
+ name,
+ value,
+ v8::Integer::New(v8_isolate, attribute)};
TemplateSet(isolate, this, kSize, data);
}
@@ -961,31 +871,33 @@ void Template::SetAccessorProperty(
ASSERT(!getter.IsEmpty() || !setter.IsEmpty());
i::HandleScope scope(isolate);
const int kSize = 5;
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Handle<v8::Data> data[kSize] = {
- name,
- getter,
- setter,
- v8::Integer::New(attribute),
- v8::Integer::New(access_control)};
+ name,
+ getter,
+ setter,
+ v8::Integer::New(v8_isolate, attribute),
+ v8::Integer::New(v8_isolate, access_control)};
TemplateSet(isolate, this, kSize, data);
}
// --- F u n c t i o n T e m p l a t e ---
static void InitializeFunctionTemplate(
- i::Handle<i::FunctionTemplateInfo> info) {
+ i::Handle<i::FunctionTemplateInfo> info) {
info->set_tag(i::Smi::FromInt(Consts::FUNCTION_TEMPLATE));
info->set_flag(0);
}
Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
+ i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8(i_isolate);
i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
- isolate);
+ i_isolate);
if (result->IsUndefined()) {
- result = Utils::OpenHandle(*ObjectTemplate::New());
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
+ result = Utils::OpenHandle(*ObjectTemplate::New(isolate));
Utils::OpenHandle(this)->set_prototype_template(*result);
}
return ToApiHandle<ObjectTemplate>(result);
@@ -1033,32 +945,34 @@ static Local<FunctionTemplate> FunctionTemplateNew(
}
Local<FunctionTemplate> FunctionTemplate::New(
+ Isolate* isolate,
FunctionCallback callback,
v8::Handle<Value> data,
v8::Handle<Signature> signature,
int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
- LOG_API(isolate, "FunctionTemplate::New");
- ENTER_V8(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::FunctionTemplate::New()");
+ LOG_API(i_isolate, "FunctionTemplate::New");
+ ENTER_V8(i_isolate);
return FunctionTemplateNew(
- isolate, callback, data, signature, length, false);
+ i_isolate, callback, data, signature, length, false);
}
-Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
- int argc, Handle<FunctionTemplate> argv[]) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Signature::New()");
- LOG_API(isolate, "Signature::New");
- ENTER_V8(isolate);
+Local<Signature> Signature::New(Isolate* isolate,
+ Handle<FunctionTemplate> receiver, int argc,
+ Handle<FunctionTemplate> argv[]) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::Signature::New()");
+ LOG_API(i_isolate, "Signature::New");
+ ENTER_V8(i_isolate);
i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::SIGNATURE_INFO_TYPE);
+ i_isolate->factory()->NewStruct(i::SIGNATURE_INFO_TYPE);
i::Handle<i::SignatureInfo> obj =
i::Handle<i::SignatureInfo>::cast(struct_obj);
if (!receiver.IsEmpty()) obj->set_receiver(*Utils::OpenHandle(*receiver));
if (argc > 0) {
- i::Handle<i::FixedArray> args = isolate->factory()->NewFixedArray(argc);
+ i::Handle<i::FixedArray> args = i_isolate->factory()->NewFixedArray(argc);
for (int i = 0; i < argc; i++) {
if (!argv[i].IsEmpty())
args->set(i, *Utils::OpenHandle(*argv[i]));
@@ -1070,7 +984,8 @@ Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
Local<AccessorSignature> AccessorSignature::New(
- Handle<FunctionTemplate> receiver) {
+ Isolate* isolate,
+ Handle<FunctionTemplate> receiver) {
return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
}
@@ -1085,7 +1000,7 @@ static Local<Operation> NewDescriptor(
i::Handle<i::DeclaredAccessorDescriptor>();
if (previous_descriptor != NULL) {
previous = Utils::OpenHandle(
- static_cast<DeclaredAccessorDescriptor*>(previous_descriptor));
+ static_cast<DeclaredAccessorDescriptor*>(previous_descriptor));
}
i::Handle<i::DeclaredAccessorDescriptor> descriptor =
i::DeclaredAccessorDescriptor::Create(internal_isolate, data, previous);
@@ -1094,7 +1009,7 @@ static Local<Operation> NewDescriptor(
Local<RawOperationDescriptor>
- ObjectOperationDescriptor::NewInternalFieldDereference(
+ObjectOperationDescriptor::NewInternalFieldDereference(
Isolate* isolate,
int internal_field) {
i::DeclaredAccessorDescriptorData data;
@@ -1222,16 +1137,16 @@ int TypeSwitch::match(v8::Handle<Value> value) {
i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
i::FixedArray* types = i::FixedArray::cast(info->types());
for (int i = 0; i < types->length(); i++) {
- if (obj->IsInstanceOf(i::FunctionTemplateInfo::cast(types->get(i))))
+ if (i::FunctionTemplateInfo::cast(types->get(i))->IsTemplateFor(*obj))
return i + 1;
}
return 0;
}
-#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
- i::Handle<i::Object> foreign = FromCData(obj->GetIsolate(), cdata); \
- (obj)->setter(*foreign); \
+#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
+ i::Handle<i::Object> foreign = FromCData(obj->GetIsolate(), cdata); \
+ (obj)->setter(*foreign); \
} while (false)
@@ -1273,13 +1188,13 @@ static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
template<typename Getter, typename Setter>
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
- v8::Handle<String> name,
- Getter getter,
- Setter setter,
- v8::Handle<Value> data,
- v8::AccessControl settings,
- v8::PropertyAttribute attributes,
- v8::Handle<AccessorSignature> signature) {
+ v8::Handle<String> name,
+ Getter getter,
+ Setter setter,
+ v8::Handle<Value> data,
+ v8::AccessControl settings,
+ v8::PropertyAttribute attributes,
+ v8::Handle<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
i::Handle<i::ExecutableAccessorInfo> obj =
isolate->factory()->NewExecutableAccessorInfo();
@@ -1294,13 +1209,13 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
- v8::Handle<String> name,
- v8::Handle<v8::DeclaredAccessorDescriptor> descriptor,
- void* setter_ignored,
- void* data_ignored,
- v8::AccessControl settings,
- v8::PropertyAttribute attributes,
- v8::Handle<AccessorSignature> signature) {
+ v8::Handle<String> name,
+ v8::Handle<v8::DeclaredAccessorDescriptor> descriptor,
+ void* setter_ignored,
+ void* data_ignored,
+ v8::AccessControl settings,
+ v8::PropertyAttribute attributes,
+ v8::Handle<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
if (descriptor.IsEmpty()) return i::Handle<i::DeclaredAccessorInfo>();
i::Handle<i::DeclaredAccessorInfo> obj =
@@ -1312,13 +1227,16 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
+ if (!Utils::ApiCheck(this != NULL,
+ "v8::FunctionTemplate::InstanceTemplate()",
+ "Reading from empty handle")) {
return Local<ObjectTemplate>();
+ }
ENTER_V8(isolate);
i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this);
if (handle->instance_template()->IsUndefined()) {
Local<ObjectTemplate> templ =
- ObjectTemplate::New(ToApiHandle<FunctionTemplate>(handle));
+ ObjectTemplate::New(isolate, ToApiHandle<FunctionTemplate>(handle));
handle->set_instance_template(*Utils::OpenHandle(*templ));
}
i::Handle<i::ObjectTemplateInfo> result(
@@ -1365,14 +1283,19 @@ void FunctionTemplate::RemovePrototype() {
// --- O b j e c t T e m p l a t e ---
+Local<ObjectTemplate> ObjectTemplate::New(Isolate* isolate) {
+ return New(reinterpret_cast<i::Isolate*>(isolate), Local<FunctionTemplate>());
+}
+
+
Local<ObjectTemplate> ObjectTemplate::New() {
- return New(Local<FunctionTemplate>());
+ return New(i::Isolate::Current(), Local<FunctionTemplate>());
}
Local<ObjectTemplate> ObjectTemplate::New(
- v8::Handle<FunctionTemplate> constructor) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate,
+ v8::Handle<FunctionTemplate> constructor) {
EnsureInitializedForIsolate(isolate, "v8::ObjectTemplate::New()");
LOG_API(isolate, "ObjectTemplate::New");
ENTER_V8(isolate);
@@ -1391,13 +1314,15 @@ Local<ObjectTemplate> ObjectTemplate::New(
// Ensure that the object template has a constructor. If no
// constructor is available we create one.
static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
+ i::Isolate* isolate,
ObjectTemplate* object_template) {
i::Object* obj = Utils::OpenHandle(object_template)->constructor();
if (!obj ->IsUndefined()) {
i::FunctionTemplateInfo* info = i::FunctionTemplateInfo::cast(obj);
- return i::Handle<i::FunctionTemplateInfo>(info, info->GetIsolate());
+ return i::Handle<i::FunctionTemplateInfo>(info, isolate);
}
- Local<FunctionTemplate> templ = FunctionTemplate::New();
+ Local<FunctionTemplate> templ =
+ FunctionTemplate::New(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
constructor->set_instance_template(*Utils::OpenHandle(object_template));
Utils::OpenHandle(object_template)->set_constructor(*constructor);
@@ -1408,9 +1333,10 @@ static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
static inline void AddPropertyToTemplate(
i::Handle<i::TemplateInfo> info,
i::Handle<i::AccessorInfo> obj) {
- i::Handle<i::Object> list(info->property_accessors(), info->GetIsolate());
+ i::Isolate* isolate = info->GetIsolate();
+ i::Handle<i::Object> list(info->property_accessors(), isolate);
if (list->IsUndefined()) {
- list = NeanderArray().value();
+ list = NeanderArray(isolate).value();
info->set_property_accessors(*list);
}
NeanderArray array(list);
@@ -1419,6 +1345,7 @@ static inline void AddPropertyToTemplate(
static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
+ i::Isolate* isolate,
Template* template_obj) {
return Utils::OpenHandle(template_obj);
}
@@ -1426,8 +1353,9 @@ static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
// TODO(dcarney): remove this with ObjectTemplate::SetAccessor
static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
+ i::Isolate* isolate,
ObjectTemplate* object_template) {
- EnsureConstructor(object_template);
+ EnsureConstructor(isolate, object_template);
return Utils::OpenHandle(object_template);
}
@@ -1448,7 +1376,7 @@ static bool TemplateSetAccessor(
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(
name, getter, setter, data, settings, attribute, signature);
if (obj.is_null()) return false;
- i::Handle<i::TemplateInfo> info = GetTemplateInfo(template_obj);
+ i::Handle<i::TemplateInfo> info = GetTemplateInfo(isolate, template_obj);
AddPropertyToTemplate(info, obj);
return true;
}
@@ -1500,7 +1428,7 @@ void ObjectTemplate::SetNamedPropertyHandler(
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
+ EnsureConstructor(isolate, this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
@@ -1527,7 +1455,7 @@ void ObjectTemplate::MarkAsUndetectable() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
+ EnsureConstructor(isolate, this);
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
@@ -1536,14 +1464,14 @@ void ObjectTemplate::MarkAsUndetectable() {
void ObjectTemplate::SetAccessCheckCallbacks(
- NamedSecurityCallback named_callback,
- IndexedSecurityCallback indexed_callback,
- Handle<Value> data,
- bool turned_on_by_default) {
+ NamedSecurityCallback named_callback,
+ IndexedSecurityCallback indexed_callback,
+ Handle<Value> data,
+ bool turned_on_by_default) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
+ EnsureConstructor(isolate, this);
i::Handle<i::Struct> struct_info =
isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
@@ -1567,16 +1495,16 @@ void ObjectTemplate::SetAccessCheckCallbacks(
void ObjectTemplate::SetIndexedPropertyHandler(
- IndexedPropertyGetterCallback getter,
- IndexedPropertySetterCallback setter,
- IndexedPropertyQueryCallback query,
- IndexedPropertyDeleterCallback remover,
- IndexedPropertyEnumeratorCallback enumerator,
- Handle<Value> data) {
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter,
+ IndexedPropertyQueryCallback query,
+ IndexedPropertyDeleterCallback remover,
+ IndexedPropertyEnumeratorCallback enumerator,
+ Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
+ EnsureConstructor(isolate, this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
@@ -1604,7 +1532,7 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(this);
+ EnsureConstructor(isolate, this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
@@ -1628,9 +1556,9 @@ int ObjectTemplate::InternalFieldCount() {
void ObjectTemplate::SetInternalFieldCount(int value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (!ApiCheck(i::Smi::IsValid(value),
- "v8::ObjectTemplate::SetInternalFieldCount()",
- "Invalid internal field count")) {
+ if (!Utils::ApiCheck(i::Smi::IsValid(value),
+ "v8::ObjectTemplate::SetInternalFieldCount()",
+ "Invalid internal field count")) {
return;
}
ENTER_V8(isolate);
@@ -1638,7 +1566,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
// The internal field count is set by the constructor function's
// construct code, so we ensure that there is a constructor
// function to do the setting.
- EnsureConstructor(this);
+ EnsureConstructor(isolate, this);
}
Utils::OpenHandle(this)->set_internal_field_count(i::Smi::FromInt(value));
}
@@ -1647,22 +1575,12 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
// --- S c r i p t D a t a ---
-ScriptData* ScriptData::PreCompile(v8::Isolate* isolate,
- const char* input,
- int length) {
- i::Utf8ToUtf16CharacterStream stream(
- reinterpret_cast<const unsigned char*>(input), length);
- return i::PreParserApi::PreParse(
- reinterpret_cast<i::Isolate*>(isolate), &stream);
-}
-
-
ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
i::Handle<i::String> str = Utils::OpenHandle(*source);
i::Isolate* isolate = str->GetIsolate();
if (str->IsExternalTwoByteString()) {
i::ExternalTwoByteStringUtf16CharacterStream stream(
- i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
+ i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
return i::PreParserApi::PreParse(isolate, &stream);
} else {
i::GenericStringUtf16CharacterStream stream(str, 0, str->length());
@@ -1739,16 +1657,16 @@ Local<Script> Script::New(v8::Handle<String> source,
pre_data_impl = NULL;
}
i::Handle<i::SharedFunctionInfo> result =
- i::Compiler::Compile(str,
- name_obj,
- line_offset,
- column_offset,
- is_shared_cross_origin,
- isolate->global_context(),
- NULL,
- pre_data_impl,
- Utils::OpenHandle(*script_data, true),
- i::NOT_NATIVES_CODE);
+ i::Compiler::CompileScript(str,
+ name_obj,
+ line_offset,
+ column_offset,
+ is_shared_cross_origin,
+ isolate->global_context(),
+ NULL,
+ pre_data_impl,
+ Utils::OpenHandle(*script_data, true),
+ i::NOT_NATIVES_CODE);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
raw_result = *result;
@@ -1846,25 +1764,6 @@ static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
}
-Local<Value> Script::Id() {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::Id()", return Local<Value>());
- LOG_API(isolate, "Script::Id");
- i::Object* raw_id = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- i::Handle<i::Object> id(script->id(), isolate);
- raw_id = *id;
- }
- i::Handle<i::Object> id(raw_id, isolate);
- return Utils::ToLocal(id);
-}
-
-
int Script::GetId() {
i::Handle<i::HeapObject> obj =
i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
@@ -2057,18 +1956,18 @@ Local<String> Message::Get() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Message::Get()", return Local<String>());
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(isolate, obj);
Local<String> result = Utils::ToLocal(raw_result);
- return scope.Close(result);
+ return scope.Escape(result);
}
v8::Handle<Value> Message::GetScriptResourceName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
// Return this.script.name.
@@ -2077,14 +1976,14 @@ v8::Handle<Value> Message::GetScriptResourceName() const {
isolate));
i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name(),
isolate);
- return scope.Close(Utils::ToLocal(resource_name));
+ return scope.Escape(Utils::ToLocal(resource_name));
}
v8::Handle<Value> Message::GetScriptData() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
// Return this.script.data.
@@ -2092,21 +1991,21 @@ v8::Handle<Value> Message::GetScriptData() const {
i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
isolate));
i::Handle<i::Object> data(i::Script::cast(script->value())->data(), isolate);
- return scope.Close(Utils::ToLocal(data));
+ return scope.Escape(Utils::ToLocal(data));
}
v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
i::Handle<i::Object> stackFramesObj(message->stack_frames(), isolate);
if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>();
i::Handle<i::JSArray> stackTrace =
i::Handle<i::JSArray>::cast(stackFramesObj);
- return scope.Close(Utils::StackTraceToLocal(stackTrace));
+ return scope.Escape(Utils::StackTraceToLocal(stackTrace));
}
@@ -2226,24 +2125,24 @@ Local<String> Message::GetSourceLine() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Message::GetSourceLine()", return Local<String>());
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = CallV8HeapFunction("GetSourceLine",
Utils::OpenHandle(this),
&has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::String>());
if (result->IsString()) {
- return scope.Close(Utils::ToLocal(i::Handle<i::String>::cast(result)));
+ return scope.Escape(Utils::ToLocal(i::Handle<i::String>::cast(result)));
} else {
return Local<String>();
}
}
-void Message::PrintCurrentStackTrace(FILE* out) {
- i::Isolate* isolate = i::Isolate::Current();
- ENTER_V8(isolate);
- isolate->PrintCurrentStackTrace(out);
+void Message::PrintCurrentStackTrace(Isolate* isolate, FILE* out) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i_isolate->PrintCurrentStackTrace(out);
}
@@ -2252,11 +2151,11 @@ void Message::PrintCurrentStackTrace(FILE* out) {
Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
i::Object* raw_object = self->GetElementNoExceptionThrown(isolate, index);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_object));
- return scope.Close(Utils::StackFrameToLocal(obj));
+ return scope.Escape(Utils::StackFrameToLocal(obj));
}
@@ -2274,12 +2173,14 @@ Local<Array> StackTrace::AsArray() {
}
-Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
+Local<StackTrace> StackTrace::CurrentStackTrace(
+ Isolate* isolate,
+ int frame_limit,
StackTraceOptions options) {
- i::Isolate* isolate = i::Isolate::Current();
- ENTER_V8(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
i::Handle<i::JSArray> stackTrace =
- isolate->CaptureCurrentStackTrace(frame_limit, options);
+ i_isolate->CaptureCurrentStackTrace(frame_limit, options);
return Utils::StackTraceToLocal(stackTrace);
}
@@ -2328,39 +2229,39 @@ int StackFrame::GetScriptId() const {
Local<String> StackFrame::GetScriptName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "scriptName");
if (!name->IsString()) {
return Local<String>();
}
- return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
+ return scope.Escape(Local<String>::Cast(Utils::ToLocal(name)));
}
Local<String> StackFrame::GetScriptNameOrSourceURL() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "scriptNameOrSourceURL");
if (!name->IsString()) {
return Local<String>();
}
- return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
+ return scope.Escape(Local<String>::Cast(Utils::ToLocal(name)));
}
Local<String> StackFrame::GetFunctionName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "functionName");
if (!name->IsString()) {
return Local<String>();
}
- return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
+ return scope.Escape(Local<String>::Cast(Utils::ToLocal(name)));
}
@@ -2470,26 +2371,14 @@ bool Value::IsTypedArray() const {
}
-#define TYPED_ARRAY_LIST(F) \
-F(Uint8Array, kExternalUnsignedByteArray) \
-F(Int8Array, kExternalByteArray) \
-F(Uint16Array, kExternalUnsignedShortArray) \
-F(Int16Array, kExternalShortArray) \
-F(Uint32Array, kExternalUnsignedIntArray) \
-F(Int32Array, kExternalIntArray) \
-F(Float32Array, kExternalFloatArray) \
-F(Float64Array, kExternalDoubleArray) \
-F(Uint8ClampedArray, kExternalPixelArray)
-
-
-#define VALUE_IS_TYPED_ARRAY(TypedArray, type_const) \
- bool Value::Is##TypedArray() const { \
- i::Handle<i::Object> obj = Utils::OpenHandle(this); \
- if (!obj->IsJSTypedArray()) return false; \
- return i::JSTypedArray::cast(*obj)->type() == type_const; \
+#define VALUE_IS_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
+ bool Value::Is##Type##Array() const { \
+ i::Handle<i::Object> obj = Utils::OpenHandle(this); \
+ return obj->IsJSTypedArray() && \
+ i::JSTypedArray::cast(*obj)->type() == kExternal##Type##Array; \
}
-TYPED_ARRAY_LIST(VALUE_IS_TYPED_ARRAY)
+TYPED_ARRAYS(VALUE_IS_TYPED_ARRAY)
#undef VALUE_IS_TYPED_ARRAY
@@ -2523,13 +2412,7 @@ bool Value::IsInt32() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return true;
if (obj->IsNumber()) {
- double value = obj->Number();
- static const i::DoubleRepresentation minus_zero(-0.0);
- i::DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) {
- return false;
- }
- return i::FastI2D(i::FastD2I(value)) == value;
+ return i::IsInt32Double(obj->Number());
}
return false;
}
@@ -2540,12 +2423,10 @@ bool Value::IsUint32() const {
if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
if (obj->IsNumber()) {
double value = obj->Number();
- static const i::DoubleRepresentation minus_zero(-0.0);
- i::DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) {
- return false;
- }
- return i::FastUI2D(i::FastD2UI(value)) == value;
+ return !i::IsMinusZero(value) &&
+ value >= 0 &&
+ value <= i::kMaxUInt32 &&
+ value == i::FastUI2D(i::FastD2UI(value));
}
return false;
}
@@ -2597,7 +2478,7 @@ static bool CheckConstructor(i::Isolate* isolate,
if (!constr->IsJSFunction()) return false;
i::JSFunction* func = i::JSFunction::cast(constr);
return func->shared()->native() &&
- constr == LookupBuiltin(isolate, class_name);
+ constr == LookupBuiltin(isolate, class_name);
}
@@ -2734,172 +2615,175 @@ Local<Integer> Value::ToInteger() const {
void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
- ApiCheck(isolate != NULL && isolate->IsInitialized() && !isolate->IsDead(),
- "v8::internal::Internals::CheckInitialized()",
- "Isolate is not initialized or V8 has died");
+ Utils::ApiCheck(isolate != NULL &&
+ isolate->IsInitialized() &&
+ !isolate->IsDead(),
+ "v8::internal::Internals::CheckInitialized()",
+ "Isolate is not initialized or V8 has died");
}
void External::CheckCast(v8::Value* that) {
- ApiCheck(Utils::OpenHandle(that)->IsExternal(),
- "v8::External::Cast()",
- "Could not convert to external");
+ Utils::ApiCheck(Utils::OpenHandle(that)->IsExternal(),
+ "v8::External::Cast()",
+ "Could not convert to external");
}
void v8::Object::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSObject(),
- "v8::Object::Cast()",
- "Could not convert to object");
+ Utils::ApiCheck(obj->IsJSObject(),
+ "v8::Object::Cast()",
+ "Could not convert to object");
}
void v8::Function::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSFunction(),
- "v8::Function::Cast()",
- "Could not convert to function");
+ Utils::ApiCheck(obj->IsJSFunction(),
+ "v8::Function::Cast()",
+ "Could not convert to function");
}
void v8::String::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsString(),
- "v8::String::Cast()",
- "Could not convert to string");
+ Utils::ApiCheck(obj->IsString(),
+ "v8::String::Cast()",
+ "Could not convert to string");
}
void v8::Symbol::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsSymbol(),
- "v8::Symbol::Cast()",
- "Could not convert to symbol");
+ Utils::ApiCheck(obj->IsSymbol(),
+ "v8::Symbol::Cast()",
+ "Could not convert to symbol");
}
void v8::Number::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsNumber(),
- "v8::Number::Cast()",
- "Could not convert to number");
+ Utils::ApiCheck(obj->IsNumber(),
+ "v8::Number::Cast()",
+ "Could not convert to number");
}
void v8::Integer::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsNumber(),
- "v8::Integer::Cast()",
- "Could not convert to number");
+ Utils::ApiCheck(obj->IsNumber(),
+ "v8::Integer::Cast()",
+ "Could not convert to number");
}
void v8::Array::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSArray(),
- "v8::Array::Cast()",
- "Could not convert to array");
+ Utils::ApiCheck(obj->IsJSArray(),
+ "v8::Array::Cast()",
+ "Could not convert to array");
}
void v8::ArrayBuffer::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSArrayBuffer(),
- "v8::ArrayBuffer::Cast()",
- "Could not convert to ArrayBuffer");
+ Utils::ApiCheck(obj->IsJSArrayBuffer(),
+ "v8::ArrayBuffer::Cast()",
+ "Could not convert to ArrayBuffer");
}
void v8::ArrayBufferView::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSArrayBufferView(),
- "v8::ArrayBufferView::Cast()",
- "Could not convert to ArrayBufferView");
+ Utils::ApiCheck(obj->IsJSArrayBufferView(),
+ "v8::ArrayBufferView::Cast()",
+ "Could not convert to ArrayBufferView");
}
void v8::TypedArray::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSTypedArray(),
- "v8::TypedArray::Cast()",
- "Could not convert to TypedArray");
+ Utils::ApiCheck(obj->IsJSTypedArray(),
+ "v8::TypedArray::Cast()",
+ "Could not convert to TypedArray");
}
-#define CHECK_TYPED_ARRAY_CAST(ApiClass, typeConst) \
- void v8::ApiClass::CheckCast(Value* that) { \
- i::Handle<i::Object> obj = Utils::OpenHandle(that); \
- ApiCheck(obj->IsJSTypedArray() && \
- i::JSTypedArray::cast(*obj)->type() == typeConst, \
- "v8::" #ApiClass "::Cast()", \
- "Could not convert to " #ApiClass); \
+#define CHECK_TYPED_ARRAY_CAST(Type, typeName, TYPE, ctype, size) \
+ void v8::Type##Array::CheckCast(Value* that) { \
+ i::Handle<i::Object> obj = Utils::OpenHandle(that); \
+ Utils::ApiCheck(obj->IsJSTypedArray() && \
+ i::JSTypedArray::cast(*obj)->type() == \
+ kExternal##Type##Array, \
+ "v8::" #Type "Array::Cast()", \
+ "Could not convert to " #Type "Array"); \
}
-TYPED_ARRAY_LIST(CHECK_TYPED_ARRAY_CAST)
+TYPED_ARRAYS(CHECK_TYPED_ARRAY_CAST)
#undef CHECK_TYPED_ARRAY_CAST
void v8::DataView::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSDataView(),
- "v8::DataView::Cast()",
- "Could not convert to DataView");
+ Utils::ApiCheck(obj->IsJSDataView(),
+ "v8::DataView::Cast()",
+ "Could not convert to DataView");
}
void v8::Date::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_string()),
- "v8::Date::Cast()",
- "Could not convert to date");
+ Utils::ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_string()),
+ "v8::Date::Cast()",
+ "Could not convert to date");
}
void v8::StringObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_string()),
- "v8::StringObject::Cast()",
- "Could not convert to StringObject");
+ Utils::ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_string()),
+ "v8::StringObject::Cast()",
+ "Could not convert to StringObject");
}
void v8::SymbolObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
- "v8::SymbolObject::Cast()",
- "Could not convert to SymbolObject");
+ Utils::ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
+ "v8::SymbolObject::Cast()",
+ "Could not convert to SymbolObject");
}
void v8::NumberObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_string()),
- "v8::NumberObject::Cast()",
- "Could not convert to NumberObject");
+ Utils::ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_string()),
+ "v8::NumberObject::Cast()",
+ "Could not convert to NumberObject");
}
void v8::BooleanObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
- "v8::BooleanObject::Cast()",
- "Could not convert to BooleanObject");
+ Utils::ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
+ "v8::BooleanObject::Cast()",
+ "Could not convert to BooleanObject");
}
void v8::RegExp::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsJSRegExp(),
- "v8::RegExp::Cast()",
- "Could not convert to regular expression");
+ Utils::ApiCheck(obj->IsJSRegExp(),
+ "v8::RegExp::Cast()",
+ "Could not convert to regular expression");
}
@@ -3031,8 +2915,9 @@ int32_t Value::Int32Value() const {
bool Value::Equals(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
- if (EmptyCheck("v8::Value::Equals()", this) ||
- EmptyCheck("v8::Value::Equals()", that)) {
+ if (!Utils::ApiCheck(this != NULL && !that.IsEmpty(),
+ "v8::Value::Equals()",
+ "Reading from empty handle")) {
return false;
}
LOG_API(isolate, "Equals");
@@ -3057,8 +2942,9 @@ bool Value::Equals(Handle<Value> that) const {
bool Value::StrictEquals(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
- if (EmptyCheck("v8::Value::StrictEquals()", this) ||
- EmptyCheck("v8::Value::StrictEquals()", that)) {
+ if (!Utils::ApiCheck(this != NULL && !that.IsEmpty(),
+ "v8::Value::StrictEquals()",
+ "Reading from empty handle")) {
return false;
}
LOG_API(isolate, "StrictEquals");
@@ -3077,7 +2963,7 @@ bool Value::StrictEquals(Handle<Value> that) const {
return other->IsNumber() && obj->Number() == other->Number();
} else if (obj->IsString()) {
return other->IsString() &&
- i::String::cast(*obj)->Equals(i::String::cast(*other));
+ i::String::cast(*obj)->Equals(i::String::cast(*other));
} else if (obj->IsUndefined() || obj->IsUndetectableObject()) {
return other->IsUndefined() || other->IsUndetectableObject();
} else {
@@ -3088,8 +2974,9 @@ bool Value::StrictEquals(Handle<Value> that) const {
bool Value::SameValue(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
- if (EmptyCheck("v8::Value::SameValue()", this) ||
- EmptyCheck("v8::Value::SameValue()", that)) {
+ if (!Utils::ApiCheck(this != NULL && !that.IsEmpty(),
+ "v8::Value::SameValue()",
+ "Reading from empty handle")) {
return false;
}
LOG_API(isolate, "SameValue");
@@ -3130,7 +3017,7 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::SetProperty(
+ i::Handle<i::Object> obj = i::Runtime::SetObjectProperty(
isolate,
self,
key_obj,
@@ -3185,6 +3072,12 @@ bool v8::Object::ForceSet(v8::Handle<Value> key,
}
+bool v8::Object::SetPrivate(v8::Handle<Private> key, v8::Handle<Value> value) {
+ return Set(v8::Handle<Value>(reinterpret_cast<Value*>(*key)),
+ value, DontEnum);
+}
+
+
bool v8::Object::ForceDelete(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::ForceDelete()", return false);
@@ -3236,6 +3129,11 @@ Local<Value> v8::Object::Get(uint32_t index) {
}
+Local<Value> v8::Object::GetPrivate(v8::Handle<Private> key) {
+ return Get(v8::Handle<Value>(reinterpret_cast<Value*>(*key)));
+}
+
+
PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::GetPropertyAttribute()",
@@ -3293,7 +3191,7 @@ Local<Object> v8::Object::FindInstanceInPrototypeChain(
ENTER_V8(isolate);
i::JSObject* object = *Utils::OpenHandle(this);
i::FunctionTemplateInfo* tmpl_info = *Utils::OpenHandle(*tmpl);
- while (!object->IsInstanceOf(tmpl_info)) {
+ while (!tmpl_info->IsTemplateFor(object)) {
i::Object* prototype = object->GetPrototype();
if (!prototype->IsJSObject()) return Local<Object>();
object = i::JSObject::cast(prototype);
@@ -3345,13 +3243,14 @@ Local<Array> v8::Object::GetOwnPropertyNames() {
Local<String> v8::Object::ObjectProtoToString() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::ObjectProtoToString()",
+ i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
+ Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
+ ON_BAILOUT(i_isolate, "v8::Object::ObjectProtoToString()",
return Local<v8::String>());
- ENTER_V8(isolate);
+ ENTER_V8(i_isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name(self->class_name(), isolate);
+ i::Handle<i::Object> name(self->class_name(), i_isolate);
// Native implementation of Object.prototype.toString (v8natives.js):
// var c = %_ClassOf(this);
@@ -3359,13 +3258,11 @@ Local<String> v8::Object::ObjectProtoToString() {
// return "[object " + c + "]";
if (!name->IsString()) {
- return v8::String::New("[object ]");
-
+ return v8::String::NewFromUtf8(isolate, "[object ]");
} else {
i::Handle<i::String> class_name = i::Handle<i::String>::cast(name);
if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Arguments"))) {
- return v8::String::New("[object Object]");
-
+ return v8::String::NewFromUtf8(isolate, "[object Object]");
} else {
const char* prefix = "[object ";
Local<String> str = Utils::ToLocal(class_name);
@@ -3391,7 +3288,8 @@ Local<String> v8::Object::ObjectProtoToString() {
i::OS::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
// Copy the buffer into a heap-allocated string and return it.
- Local<String> result = v8::String::New(buf.start(), buf_len);
+ Local<String> result = v8::String::NewFromUtf8(
+ isolate, buf.start(), String::kNormalString, buf_len);
return result;
}
}
@@ -3435,6 +3333,11 @@ bool v8::Object::Delete(v8::Handle<Value> key) {
}
+bool v8::Object::DeletePrivate(v8::Handle<Private> key) {
+ return Delete(v8::Handle<Value>(reinterpret_cast<Value*>(*key)));
+}
+
+
bool v8::Object::Has(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::Has()", return false);
@@ -3449,6 +3352,11 @@ bool v8::Object::Has(v8::Handle<Value> key) {
}
+bool v8::Object::HasPrivate(v8::Handle<Private> key) {
+ return Has(v8::Handle<Value>(reinterpret_cast<Value*>(*key)));
+}
+
+
bool v8::Object::Delete(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::DeleteProperty()",
@@ -3591,7 +3499,7 @@ static Local<Value> GetPropertyByLookup(i::Isolate* isolate,
Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
- Handle<String> key) {
+ Handle<String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate,
"v8::Object::GetRealNamedPropertyInPrototypeChain()",
@@ -3689,7 +3597,8 @@ int v8::Object::GetIdentityHash() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::JSObject::GetIdentityHash(self);
+ return i::Handle<i::Smi>::cast(
+ i::JSReceiver::GetOrCreateIdentityHash(self))->value();
}
@@ -3745,33 +3654,12 @@ namespace {
static i::ElementsKind GetElementsKindFromExternalArrayType(
ExternalArrayType array_type) {
switch (array_type) {
- case kExternalByteArray:
- return i::EXTERNAL_BYTE_ELEMENTS;
- break;
- case kExternalUnsignedByteArray:
- return i::EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
- break;
- case kExternalShortArray:
- return i::EXTERNAL_SHORT_ELEMENTS;
- break;
- case kExternalUnsignedShortArray:
- return i::EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
- break;
- case kExternalIntArray:
- return i::EXTERNAL_INT_ELEMENTS;
- break;
- case kExternalUnsignedIntArray:
- return i::EXTERNAL_UNSIGNED_INT_ELEMENTS;
- break;
- case kExternalFloatArray:
- return i::EXTERNAL_FLOAT_ELEMENTS;
- break;
- case kExternalDoubleArray:
- return i::EXTERNAL_DOUBLE_ELEMENTS;
- break;
- case kExternalPixelArray:
- return i::EXTERNAL_PIXEL_ELEMENTS;
- break;
+#define ARRAY_TYPE_TO_ELEMENTS_KIND(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return i::EXTERNAL_##TYPE##_ELEMENTS;
+
+ TYPED_ARRAYS(ARRAY_TYPE_TO_ELEMENTS_KIND)
+#undef ARRAY_TYPE_TO_ELEMENTS_KIND
}
UNREACHABLE();
return i::DICTIONARY_ELEMENTS;
@@ -3803,18 +3691,19 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
ON_BAILOUT(isolate, "v8::SetElementsToPixelData()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- if (!ApiCheck(length >= 0 && length <= i::ExternalPixelArray::kMaxLength,
- "v8::Object::SetIndexedPropertiesToPixelData()",
- "length exceeds max acceptable value")) {
+ if (!Utils::ApiCheck(length >= 0 &&
+ length <= i::ExternalUint8ClampedArray::kMaxLength,
+ "v8::Object::SetIndexedPropertiesToPixelData()",
+ "length exceeds max acceptable value")) {
return;
}
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!ApiCheck(!self->IsJSArray(),
- "v8::Object::SetIndexedPropertiesToPixelData()",
- "JSArray is not supported")) {
+ if (!Utils::ApiCheck(!self->IsJSArray(),
+ "v8::Object::SetIndexedPropertiesToPixelData()",
+ "JSArray is not supported")) {
return;
}
- PrepareExternalArrayElements(self, data, kExternalPixelArray, length);
+ PrepareExternalArrayElements(self, data, kExternalUint8ClampedArray, length);
}
@@ -3822,7 +3711,7 @@ bool v8::Object::HasIndexedPropertiesInPixelData() {
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
ON_BAILOUT(self->GetIsolate(), "v8::HasIndexedPropertiesInPixelData()",
return false);
- return self->HasExternalPixelElements();
+ return self->HasExternalUint8ClampedElements();
}
@@ -3830,9 +3719,9 @@ uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
ON_BAILOUT(self->GetIsolate(), "v8::GetIndexedPropertiesPixelData()",
return NULL);
- if (self->HasExternalPixelElements()) {
- return i::ExternalPixelArray::cast(self->elements())->
- external_pixel_pointer();
+ if (self->HasExternalUint8ClampedElements()) {
+ return i::ExternalUint8ClampedArray::cast(self->elements())->
+ external_uint8_clamped_pointer();
} else {
return NULL;
}
@@ -3843,8 +3732,8 @@ int v8::Object::GetIndexedPropertiesPixelDataLength() {
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
ON_BAILOUT(self->GetIsolate(), "v8::GetIndexedPropertiesPixelDataLength()",
return -1);
- if (self->HasExternalPixelElements()) {
- return i::ExternalPixelArray::cast(self->elements())->length();
+ if (self->HasExternalUint8ClampedElements()) {
+ return i::ExternalUint8ClampedArray::cast(self->elements())->length();
} else {
return -1;
}
@@ -3859,15 +3748,15 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
ON_BAILOUT(isolate, "v8::SetIndexedPropertiesToExternalArrayData()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- if (!ApiCheck(length >= 0 && length <= i::ExternalArray::kMaxLength,
- "v8::Object::SetIndexedPropertiesToExternalArrayData()",
- "length exceeds max acceptable value")) {
+ if (!Utils::ApiCheck(length >= 0 && length <= i::ExternalArray::kMaxLength,
+ "v8::Object::SetIndexedPropertiesToExternalArrayData()",
+ "length exceeds max acceptable value")) {
return;
}
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!ApiCheck(!self->IsJSArray(),
- "v8::Object::SetIndexedPropertiesToExternalArrayData()",
- "JSArray is not supported")) {
+ if (!Utils::ApiCheck(!self->IsJSArray(),
+ "v8::Object::SetIndexedPropertiesToExternalArrayData()",
+ "JSArray is not supported")) {
return;
}
PrepareExternalArrayElements(self, data, array_type, length);
@@ -3902,24 +3791,11 @@ ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
"v8::GetIndexedPropertiesExternalArrayDataType()",
return static_cast<ExternalArrayType>(-1));
switch (self->elements()->map()->instance_type()) {
- case i::EXTERNAL_BYTE_ARRAY_TYPE:
- return kExternalByteArray;
- case i::EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return kExternalUnsignedByteArray;
- case i::EXTERNAL_SHORT_ARRAY_TYPE:
- return kExternalShortArray;
- case i::EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return kExternalUnsignedShortArray;
- case i::EXTERNAL_INT_ARRAY_TYPE:
- return kExternalIntArray;
- case i::EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return kExternalUnsignedIntArray;
- case i::EXTERNAL_FLOAT_ARRAY_TYPE:
- return kExternalFloatArray;
- case i::EXTERNAL_DOUBLE_ARRAY_TYPE:
- return kExternalDoubleArray;
- case i::EXTERNAL_PIXEL_ARRAY_TYPE:
- return kExternalPixelArray;
+#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype, size) \
+ case i::EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ return kExternal##Type##Array;
+ TYPED_ARRAYS(INSTANCE_TYPE_TO_ARRAY_TYPE)
+#undef INSTANCE_TYPE_TO_ARRAY_TYPE
default:
return static_cast<ExternalArrayType>(-1);
}
@@ -3945,8 +3821,7 @@ bool v8::Object::IsCallable() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (obj->IsJSFunction()) return true;
- return i::Execution::GetFunctionDelegate(isolate, obj)->IsJSFunction();
+ return obj->IsCallable();
}
@@ -4032,7 +3907,7 @@ Local<Function> Function::New(Isolate* v8_isolate,
ENTER_V8(isolate);
return FunctionTemplateNew(
isolate, callback, data, Local<Signature>(), length, true)->
- GetFunction();
+ GetFunction();
}
@@ -4050,7 +3925,7 @@ Local<v8::Object> Function::NewInstance(int argc,
ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -4058,7 +3933,7 @@ Local<v8::Object> Function::NewInstance(int argc,
i::Handle<i::Object> returned =
i::Execution::New(function, argc, args, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
- return scope.Close(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
+ return scope.Escape(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
}
@@ -4115,7 +3990,7 @@ Handle<Value> Function::GetDisplayName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Function::GetDisplayName()",
return ToApiHandle<Primitive>(
- isolate->factory()->undefined_value()));
+ isolate->factory()->undefined_value()));
ENTER_V8(isolate);
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
i::Handle<i::String> property_name =
@@ -4139,10 +4014,11 @@ ScriptOrigin Function::GetScriptOrigin() const {
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
i::Handle<i::Object> scriptName = GetScriptNameOrSourceURL(script);
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(func->GetIsolate());
v8::ScriptOrigin origin(
- Utils::ToLocal(scriptName),
- v8::Integer::New(script->line_offset()->value()),
- v8::Integer::New(script->column_offset()->value()));
+ Utils::ToLocal(scriptName),
+ v8::Integer::New(isolate, script->line_offset()->value()),
+ v8::Integer::New(isolate, script->column_offset()->value()));
return origin;
}
return v8::ScriptOrigin(Handle<Value>());
@@ -4178,22 +4054,25 @@ bool Function::IsBuiltin() const {
}
-Handle<Value> Function::GetScriptId() const {
+int Function::ScriptId() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- i::Isolate* isolate = func->GetIsolate();
- if (!func->shared()->script()->IsScript()) {
- return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
- }
+ if (!func->shared()->script()->IsScript()) return v8::Script::kNoScriptId;
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return Utils::ToLocal(i::Handle<i::Object>(script->id(), isolate));
+ return script->id()->value();
}
-int Function::ScriptId() const {
+Local<v8::Value> Function::GetBoundFunction() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (!func->shared()->script()->IsScript()) return v8::Script::kNoScriptId;
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return script->id()->value();
+ if (!func->shared()->bound()) {
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(func->GetIsolate()));
+ }
+ i::Handle<i::FixedArray> bound_args = i::Handle<i::FixedArray>(
+ i::FixedArray::cast(func->function_bindings()));
+ i::Handle<i::Object> original(
+ bound_args->get(i::JSFunction::kBoundFunctionIndex),
+ func->GetIsolate());
+ return Utils::ToLocal(i::Handle<i::JSFunction>::cast(original));
}
@@ -4226,7 +4105,7 @@ static inline bool Unaligned(const uint16_t* chars) {
static inline const uint16_t* Align(const uint16_t* chars) {
return reinterpret_cast<uint16_t*>(
- reinterpret_cast<uintptr_t>(chars) & ~kAlignmentMask);
+ reinterpret_cast<uintptr_t>(chars) & ~kAlignmentMask);
}
class ContainsOnlyOneByteHelper {
@@ -4246,7 +4125,7 @@ class ContainsOnlyOneByteHelper {
// Align to uintptr_t.
const uint16_t* end = chars + length;
while (Unaligned(chars) && chars != end) {
- acc |= *chars++;
+ acc |= *chars++;
}
// Read word aligned in blocks,
// checking the return value at the end of each block.
@@ -4350,8 +4229,8 @@ class Utf8LengthHelper : public i::AllStatic {
class Visitor {
public:
inline explicit Visitor()
- : utf8_length_(0),
- state_(kInitialState) {}
+ : utf8_length_(0),
+ state_(kInitialState) {}
void VisitOneByteString(const uint8_t* chars, int length) {
int utf8_length = 0;
@@ -4424,7 +4303,7 @@ class Utf8LengthHelper : public i::AllStatic {
if (!(*state & kRightmostEdgeIsCalculated)) {
ASSERT(!(*state & kRightmostEdgeIsSurrogate));
*state |= (kRightmostEdgeIsCalculated
- | (edge_surrogate ? kRightmostEdgeIsSurrogate : 0));
+ | (edge_surrogate ? kRightmostEdgeIsSurrogate : 0));
} else if (edge_surrogate && StartsWithSurrogate(*state)) {
*length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
}
@@ -4530,28 +4409,35 @@ int String::Utf8Length() const {
class Utf8WriterVisitor {
public:
Utf8WriterVisitor(
- char* buffer, int capacity, bool skip_capacity_check)
+ char* buffer,
+ int capacity,
+ bool skip_capacity_check,
+ bool replace_invalid_utf8)
: early_termination_(false),
last_character_(unibrow::Utf16::kNoPreviousCharacter),
buffer_(buffer),
start_(buffer),
capacity_(capacity),
skip_capacity_check_(capacity == -1 || skip_capacity_check),
+ replace_invalid_utf8_(replace_invalid_utf8),
utf16_chars_read_(0) {
}
static int WriteEndCharacter(uint16_t character,
int last_character,
int remaining,
- char* const buffer) {
+ char* const buffer,
+ bool replace_invalid_utf8) {
using namespace unibrow;
ASSERT(remaining > 0);
// We can't use a local buffer here because Encode needs to modify
// previous characters in the stream. We know, however, that
// exactly one character will be advanced.
- if (Utf16::IsTrailSurrogate(character) &&
- Utf16::IsLeadSurrogate(last_character)) {
- int written = Utf8::Encode(buffer, character, last_character);
+ if (Utf16::IsSurrogatePair(last_character, character)) {
+ int written = Utf8::Encode(buffer,
+ character,
+ last_character,
+ replace_invalid_utf8);
ASSERT(written == 1);
return written;
}
@@ -4560,7 +4446,8 @@ class Utf8WriterVisitor {
// Can't encode using last_character as gcc has array bounds issues.
int written = Utf8::Encode(temp_buffer,
character,
- Utf16::kNoPreviousCharacter);
+ Utf16::kNoPreviousCharacter,
+ replace_invalid_utf8);
// Won't fit.
if (written > remaining) return 0;
// Copy over the character from temp_buffer.
@@ -4570,6 +4457,16 @@ class Utf8WriterVisitor {
return written;
}
+ // Visit writes out a group of code units (chars) of a v8::String to the
+ // internal buffer_. This is done in two phases. The first phase calculates a
+ // pesimistic estimate (writable_length) on how many code units can be safely
+ // written without exceeding the buffer capacity and without writing the last
+ // code unit (it could be a lead surrogate). The estimated number of code
+ // units is then written out in one go, and the reported byte usage is used
+ // to correct the estimate. This is repeated until the estimate becomes <= 0
+ // or all code units have been written out. The second phase writes out code
+ // units until the buffer capacity is reached, would be exceeded by the next
+ // unit, or all units have been written out.
template<typename Char>
void Visit(const Char* chars, const int length) {
using namespace unibrow;
@@ -4607,7 +4504,10 @@ class Utf8WriterVisitor {
} else {
for (; i < fast_length; i++) {
uint16_t character = *chars++;
- buffer += Utf8::Encode(buffer, character, last_character);
+ buffer += Utf8::Encode(buffer,
+ character,
+ last_character,
+ replace_invalid_utf8_);
last_character = character;
ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_);
}
@@ -4627,10 +4527,17 @@ class Utf8WriterVisitor {
ASSERT(remaining_capacity >= 0);
for (; i < length && remaining_capacity > 0; i++) {
uint16_t character = *chars++;
+ // remaining_capacity is <= 3 bytes at this point, so we do not write out
+ // an umatched lead surrogate.
+ if (replace_invalid_utf8_ && Utf16::IsLeadSurrogate(character)) {
+ early_termination_ = true;
+ break;
+ }
int written = WriteEndCharacter(character,
last_character,
remaining_capacity,
- buffer);
+ buffer,
+ replace_invalid_utf8_);
if (written == 0) {
early_termination_ = true;
break;
@@ -4678,14 +4585,15 @@ class Utf8WriterVisitor {
char* const start_;
int capacity_;
bool const skip_capacity_check_;
+ bool const replace_invalid_utf8_;
int utf16_chars_read_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Utf8WriterVisitor);
};
static bool RecursivelySerializeToUtf8(i::String* current,
- Utf8WriterVisitor* writer,
- int recursion_budget) {
+ Utf8WriterVisitor* writer,
+ int recursion_budget) {
while (!writer->IsDone()) {
i::ConsString* cons_string = i::String::VisitFlat(writer, current);
if (cons_string == NULL) return true; // Leaf node.
@@ -4716,9 +4624,11 @@ int String::WriteUtf8(char* buffer,
}
const int string_length = str->length();
bool write_null = !(options & NO_NULL_TERMINATION);
+ bool replace_invalid_utf8 = (options & REPLACE_INVALID_UTF8);
+ int max16BitCodeUnitSize = unibrow::Utf8::kMax16BitCodeUnitSize;
// First check if we can just write the string without checking capacity.
- if (capacity == -1 || capacity / 3 >= string_length) {
- Utf8WriterVisitor writer(buffer, capacity, true);
+ if (capacity == -1 || capacity / max16BitCodeUnitSize >= string_length) {
+ Utf8WriterVisitor writer(buffer, capacity, true, replace_invalid_utf8);
const int kMaxRecursion = 100;
bool success = RecursivelySerializeToUtf8(*str, &writer, kMaxRecursion);
if (success) return writer.CompleteWrite(write_null, nchars_ref);
@@ -4746,7 +4656,7 @@ int String::WriteUtf8(char* buffer,
}
// Recursive slow path can potentially be unreasonable slow. Flatten.
str = FlattenGetString(str);
- Utf8WriterVisitor writer(buffer, capacity, false);
+ Utf8WriterVisitor writer(buffer, capacity, false, replace_invalid_utf8);
i::String::VisitFlat(&writer, *str);
return writer.CompleteWrite(write_null, nchars_ref);
}
@@ -4843,14 +4753,14 @@ void v8::String::VerifyExternalStringResourceBase(
} else {
expected = NULL;
expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING
- : TWO_BYTE_ENCODING;
+ : TWO_BYTE_ENCODING;
}
CHECK_EQ(expected, value);
CHECK_EQ(expectedEncoding, encoding);
}
const v8::String::ExternalAsciiStringResource*
- v8::String::GetExternalAsciiStringResource() const {
+v8::String::GetExternalAsciiStringResource() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (i::StringShape(*str).IsExternalAscii()) {
const void* resource =
@@ -4869,6 +4779,11 @@ Local<Value> Symbol::Name() const {
}
+Local<Value> Private::Name() const {
+ return reinterpret_cast<const Symbol*>(this)->Name();
+}
+
+
double Number::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->Number();
@@ -4920,9 +4835,9 @@ int v8::Object::InternalFieldCount() {
static bool InternalFieldOK(i::Handle<i::JSObject> obj,
int index,
const char* location) {
- return ApiCheck(index < obj->GetInternalFieldCount(),
- location,
- "Internal field out of bounds");
+ return Utils::ApiCheck(index < obj->GetInternalFieldCount(),
+ location,
+ "Internal field out of bounds");
}
@@ -4973,6 +4888,24 @@ static void* ExternalValue(i::Object* obj) {
// --- E n v i r o n m e n t ---
+void v8::V8::InitializePlatform(Platform* platform) {
+#ifdef V8_USE_DEFAULT_PLATFORM
+ FATAL("Can't override v8::Platform when using default implementation");
+#else
+ i::V8::InitializePlatform(platform);
+#endif
+}
+
+
+void v8::V8::ShutdownPlatform() {
+#ifdef V8_USE_DEFAULT_PLATFORM
+ FATAL("Can't override v8::Platform when using default implementation");
+#else
+ i::V8::ShutdownPlatform();
+#endif
+}
+
+
bool v8::V8::Initialize() {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
if (isolate != NULL && isolate->IsInitialized()) {
@@ -4988,7 +4921,7 @@ void v8::V8::SetEntropySource(EntropySource entropy_source) {
void v8::V8::SetReturnAddressLocationResolver(
- ReturnAddressLocationResolver return_address_resolver) {
+ ReturnAddressLocationResolver return_address_resolver) {
i::V8::SetReturnAddressLocationResolver(return_address_resolver);
}
@@ -5026,9 +4959,9 @@ void v8::V8::SetJitCodeEventHandler(
void v8::V8::SetArrayBufferAllocator(
ArrayBuffer::Allocator* allocator) {
- if (!ApiCheck(i::V8::ArrayBufferAllocator() == NULL,
- "v8::V8::SetArrayBufferAllocator",
- "ArrayBufferAllocator might only be set once"))
+ if (!Utils::ApiCheck(i::V8::ArrayBufferAllocator() == NULL,
+ "v8::V8::SetArrayBufferAllocator",
+ "ArrayBufferAllocator might only be set once"))
return;
i::V8::SetArrayBufferAllocator(allocator);
}
@@ -5036,9 +4969,9 @@ void v8::V8::SetArrayBufferAllocator(
bool v8::V8::Dispose() {
i::Isolate* isolate = i::Isolate::Current();
- if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
- "v8::V8::Dispose()",
- "Use v8::Isolate::Dispose() for a non-default isolate.")) {
+ if (!Utils::ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
+ "v8::V8::Dispose()",
+ "Use v8::Isolate::Dispose() for non-default isolate.")) {
return false;
}
i::V8::TearDown();
@@ -5147,11 +5080,12 @@ static i::Handle<i::Context> CreateEnvironment(
if (!global_template.IsEmpty()) {
// Make sure that the global_template has a constructor.
- global_constructor = EnsureConstructor(*global_template);
+ global_constructor = EnsureConstructor(isolate, *global_template);
// Create a fresh template for the global proxy object.
- proxy_template = ObjectTemplate::New();
- proxy_constructor = EnsureConstructor(*proxy_template);
+ proxy_template = ObjectTemplate::New(
+ reinterpret_cast<v8::Isolate*>(isolate));
+ proxy_constructor = EnsureConstructor(isolate, *proxy_template);
// Set the global template to be the prototype template of
// global proxy template.
@@ -5187,7 +5121,6 @@ static i::Handle<i::Context> CreateEnvironment(
global_constructor->set_needs_access_check(
proxy_constructor->needs_access_check());
}
- isolate->runtime_profiler()->Reset();
}
// Leave V8.
@@ -5204,6 +5137,8 @@ Local<Context> v8::Context::New(
LOG_API(isolate, "Context::New");
ON_BAILOUT(isolate, "v8::Context::New()", return Local<Context>());
i::HandleScope scope(isolate);
+ ExtensionConfiguration no_extensions;
+ if (extensions == NULL) extensions = &no_extensions;
i::Handle<i::Context> env =
CreateEnvironment(isolate, extensions, global_template, global_object);
if (env.is_null()) return Local<Context>();
@@ -5243,42 +5178,22 @@ bool Context::HasOutOfMemoryException() {
}
-bool Context::InContext() {
- return i::Isolate::Current()->context() != NULL;
-}
-
-
v8::Isolate* Context::GetIsolate() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
return reinterpret_cast<Isolate*>(env->GetIsolate());
}
-v8::Local<v8::Context> Context::GetEntered() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::Context::GetEntered()")) {
- return Local<Context>();
- }
- return reinterpret_cast<Isolate*>(isolate)->GetEnteredContext();
-}
-
-
-v8::Local<v8::Context> Context::GetCurrent() {
- i::Isolate* isolate = i::Isolate::Current();
- return reinterpret_cast<Isolate*>(isolate)->GetCurrentContext();
-}
-
-
-v8::Local<v8::Context> Context::GetCalling() {
- i::Isolate* isolate = i::Isolate::Current();
- return reinterpret_cast<Isolate*>(isolate)->GetCallingContext();
-}
-
-
v8::Local<v8::Object> Context::Global() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
i::Handle<i::Object> global(context->global_proxy(), isolate);
+ // TODO(dcarney): This should always return the global proxy
+ // but can't presently as calls to GetProtoype will return the wrong result.
+ if (i::Handle<i::JSGlobalProxy>::cast(
+ global)->IsDetachedFrom(context->global_object())) {
+ global = i::Handle<i::Object>(context->global_object(), isolate);
+ }
return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
}
@@ -5291,16 +5206,6 @@ void Context::DetachGlobal() {
}
-void Context::ReattachGlobal(Handle<Object> global_object) {
- i::Handle<i::Context> context = Utils::OpenHandle(this);
- i::Isolate* isolate = context->GetIsolate();
- ENTER_V8(isolate);
- i::Handle<i::JSGlobalProxy> global_proxy =
- i::Handle<i::JSGlobalProxy>::cast(Utils::OpenHandle(*global_object));
- isolate->bootstrapper()->ReattachGlobal(context, global_proxy);
-}
-
-
void Context::AllowCodeGenerationFromStrings(bool allow) {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
@@ -5358,17 +5263,17 @@ bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
ON_BAILOUT(i::Isolate::Current(), "v8::FunctionTemplate::HasInstanceOf()",
return false);
i::Object* obj = *Utils::OpenHandle(*value);
- return obj->IsInstanceOf(*Utils::OpenHandle(this));
+ return Utils::OpenHandle(this)->IsTemplateFor(obj);
}
-Local<External> v8::External::New(void* value) {
+Local<External> v8::External::New(Isolate* isolate, void* value) {
STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::External::New()");
- LOG_API(isolate, "External::New");
- ENTER_V8(isolate);
- i::Handle<i::JSObject> external = isolate->factory()->NewExternal(value);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::External::New()");
+ LOG_API(i_isolate, "External::New");
+ ENTER_V8(i_isolate);
+ i::Handle<i::JSObject> external = i_isolate->factory()->NewExternal(value);
return Utils::ExternalToLocal(external);
}
@@ -5378,16 +5283,6 @@ void* External::Value() const {
}
-Local<String> v8::String::Empty() {
- i::Isolate* isolate = i::Isolate::Current();
- if (!EnsureInitializedForIsolate(isolate, "v8::String::Empty()")) {
- return v8::Local<String>();
- }
- LOG_API(isolate, "String::Empty()");
- return Utils::ToLocal(isolate->factory()->empty_string());
-}
-
-
// anonymous namespace for string creation helper functions
namespace {
@@ -5450,7 +5345,7 @@ inline Local<String> NewString(Isolate* v8_isolate,
EnsureInitializedForIsolate(isolate, location);
LOG_API(isolate, env);
if (length == 0 && type != String::kUndetectableString) {
- return String::Empty();
+ return String::Empty(v8_isolate);
}
ENTER_V8(isolate);
if (length == -1) length = StringLength(data);
@@ -5517,48 +5412,30 @@ Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
}
-i::Handle<i::String> NewExternalStringHandle(i::Isolate* isolate,
- v8::String::ExternalStringResource* resource) {
- i::Handle<i::String> result =
- isolate->factory()->NewExternalStringFromTwoByte(resource);
- return result;
-}
-
-
-i::Handle<i::String> NewExternalAsciiStringHandle(i::Isolate* isolate,
- v8::String::ExternalAsciiStringResource* resource) {
- i::Handle<i::String> result =
- isolate->factory()->NewExternalStringFromAscii(resource);
- return result;
+static i::Handle<i::String> NewExternalStringHandle(
+ i::Isolate* isolate,
+ v8::String::ExternalStringResource* resource) {
+ return isolate->factory()->NewExternalStringFromTwoByte(resource);
}
-bool RedirectToExternalString(i::Isolate* isolate,
- i::Handle<i::String> parent,
- i::Handle<i::String> external) {
- if (parent->IsConsString()) {
- i::Handle<i::ConsString> cons = i::Handle<i::ConsString>::cast(parent);
- cons->set_first(*external);
- cons->set_second(isolate->heap()->empty_string());
- } else {
- ASSERT(parent->IsSlicedString());
- i::Handle<i::SlicedString> slice = i::Handle<i::SlicedString>::cast(parent);
- slice->set_parent(*external);
- slice->set_offset(0);
- }
- return true;
+static i::Handle<i::String> NewExternalAsciiStringHandle(
+ i::Isolate* isolate,
+ v8::String::ExternalAsciiStringResource* resource) {
+ return isolate->factory()->NewExternalStringFromAscii(resource);
}
Local<String> v8::String::NewExternal(
- v8::String::ExternalStringResource* resource) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
- LOG_API(isolate, "String::NewExternal");
- ENTER_V8(isolate);
+ Isolate* isolate,
+ v8::String::ExternalStringResource* resource) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::String::NewExternal()");
+ LOG_API(i_isolate, "String::NewExternal");
+ ENTER_V8(i_isolate);
CHECK(resource && resource->data());
- i::Handle<i::String> result = NewExternalStringHandle(isolate, resource);
- isolate->heap()->external_string_table()->AddString(*result);
+ i::Handle<i::String> result = NewExternalStringHandle(i_isolate, resource);
+ i_isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
}
@@ -5578,36 +5455,26 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
}
CHECK(resource && resource->data());
- bool result;
- i::Handle<i::String> external;
- if (isolate->heap()->old_pointer_space()->Contains(*obj)) {
- // We do not allow external strings in the old pointer space. Instead of
- // converting the string in-place, we keep the cons/sliced string and
- // point it to a newly-allocated external string.
- external = NewExternalStringHandle(isolate, resource);
- result = RedirectToExternalString(isolate, obj, external);
- } else {
- result = obj->MakeExternal(resource);
- external = obj;
- }
-
- ASSERT(external->IsExternalString());
- if (result && !external->IsInternalizedString()) {
- isolate->heap()->external_string_table()->AddString(*external);
+ bool result = obj->MakeExternal(resource);
+ if (result) {
+ ASSERT(obj->IsExternalString());
+ isolate->heap()->external_string_table()->AddString(*obj);
}
return result;
}
Local<String> v8::String::NewExternal(
- v8::String::ExternalAsciiStringResource* resource) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
- LOG_API(isolate, "String::NewExternal");
- ENTER_V8(isolate);
+ Isolate* isolate,
+ v8::String::ExternalAsciiStringResource* resource) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::String::NewExternal()");
+ LOG_API(i_isolate, "String::NewExternal");
+ ENTER_V8(i_isolate);
CHECK(resource && resource->data());
- i::Handle<i::String> result = NewExternalAsciiStringHandle(isolate, resource);
- isolate->heap()->external_string_table()->AddString(*result);
+ i::Handle<i::String> result =
+ NewExternalAsciiStringHandle(i_isolate, resource);
+ i_isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
}
@@ -5628,22 +5495,10 @@ bool v8::String::MakeExternal(
}
CHECK(resource && resource->data());
- bool result;
- i::Handle<i::String> external;
- if (isolate->heap()->old_pointer_space()->Contains(*obj)) {
- // We do not allow external strings in the old pointer space. Instead of
- // converting the string in-place, we keep the cons/sliced string and
- // point it to a newly-allocated external string.
- external = NewExternalAsciiStringHandle(isolate, resource);
- result = RedirectToExternalString(isolate, obj, external);
- } else {
- result = obj->MakeExternal(resource);
- external = obj;
- }
-
- ASSERT(external->IsExternalString());
- if (result && !external->IsInternalizedString()) {
- isolate->heap()->external_string_table()->AddString(*external);
+ bool result = obj->MakeExternal(resource);
+ if (result) {
+ ASSERT(obj->IsExternalString());
+ isolate->heap()->external_string_table()->AddString(*obj);
}
return result;
}
@@ -5667,24 +5522,24 @@ bool v8::String::CanMakeExternal() {
}
-Local<v8::Object> v8::Object::New() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Object::New()");
- LOG_API(isolate, "Object::New");
- ENTER_V8(isolate);
+Local<v8::Object> v8::Object::New(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::Object::New()");
+ LOG_API(i_isolate, "Object::New");
+ ENTER_V8(i_isolate);
i::Handle<i::JSObject> obj =
- isolate->factory()->NewJSObject(isolate->object_function());
+ i_isolate->factory()->NewJSObject(i_isolate->object_function());
return Utils::ToLocal(obj);
}
-Local<v8::Value> v8::NumberObject::New(double value) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::NumberObject::New()");
- LOG_API(isolate, "NumberObject::New");
- ENTER_V8(isolate);
- i::Handle<i::Object> number = isolate->factory()->NewNumber(value);
- i::Handle<i::Object> obj = isolate->factory()->ToObject(number);
+Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::NumberObject::New()");
+ LOG_API(i_isolate, "NumberObject::New");
+ ENTER_V8(i_isolate);
+ i::Handle<i::Object> number = i_isolate->factory()->NewNumber(value);
+ i::Handle<i::Object> obj = i_isolate->factory()->ToObject(number);
return Utils::ToLocal(obj);
}
@@ -5763,19 +5618,19 @@ Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
}
-Local<v8::Value> v8::Date::New(double time) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Date::New()");
- LOG_API(isolate, "Date::New");
+Local<v8::Value> v8::Date::New(Isolate* isolate, double time) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::Date::New()");
+ LOG_API(i_isolate, "Date::New");
if (std::isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
time = i::OS::nan_value();
}
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
+ ENTER_V8(i_isolate);
+ EXCEPTION_PREAMBLE(i_isolate);
i::Handle<i::Object> obj =
- i::Execution::NewDate(isolate, time, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Value>());
+ i::Execution::NewDate(i_isolate, time, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(i_isolate, Local<v8::Value>());
return Utils::ToLocal(obj);
}
@@ -5789,22 +5644,22 @@ double v8::Date::ValueOf() const {
}
-void v8::Date::DateTimeConfigurationChangeNotification() {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Date::DateTimeConfigurationChangeNotification()",
+void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ON_BAILOUT(i_isolate, "v8::Date::DateTimeConfigurationChangeNotification()",
return);
- LOG_API(isolate, "Date::DateTimeConfigurationChangeNotification");
- ENTER_V8(isolate);
+ LOG_API(i_isolate, "Date::DateTimeConfigurationChangeNotification");
+ ENTER_V8(i_isolate);
- isolate->date_cache()->ResetDateCache();
+ i_isolate->date_cache()->ResetDateCache();
- i::HandleScope scope(isolate);
+ i::HandleScope scope(i_isolate);
// Get the function ResetDateCache (defined in date.js).
i::Handle<i::String> func_name_str =
- isolate->factory()->InternalizeOneByteString(
+ i_isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("ResetDateCache"));
i::MaybeObject* result =
- isolate->js_builtins_object()->GetProperty(*func_name_str);
+ i_isolate->js_builtins_object()->GetProperty(*func_name_str);
i::Object* object_func;
if (!result->ToObject(&object_func)) {
return;
@@ -5817,7 +5672,7 @@ void v8::Date::DateTimeConfigurationChangeNotification() {
// Call ResetDateCache(0 but expect no exceptions:
bool caught_exception = false;
i::Execution::TryCall(func,
- isolate->js_builtins_object(),
+ i_isolate->js_builtins_object(),
0,
NULL,
&caught_exception);
@@ -5861,8 +5716,8 @@ Local<v8::String> v8::RegExp::GetSource() const {
// Assert that the static flags cast in GetFlags is valid.
-#define REGEXP_FLAG_ASSERT_EQ(api_flag, internal_flag) \
- STATIC_ASSERT(static_cast<int>(v8::RegExp::api_flag) == \
+#define REGEXP_FLAG_ASSERT_EQ(api_flag, internal_flag) \
+ STATIC_ASSERT(static_cast<int>(v8::RegExp::api_flag) == \
static_cast<int>(i::JSRegExp::internal_flag))
REGEXP_FLAG_ASSERT_EQ(kNone, NONE);
REGEXP_FLAG_ASSERT_EQ(kGlobal, GLOBAL);
@@ -5876,15 +5731,15 @@ v8::RegExp::Flags v8::RegExp::GetFlags() const {
}
-Local<v8::Array> v8::Array::New(int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Array::New()");
- LOG_API(isolate, "Array::New");
- ENTER_V8(isolate);
+Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::Array::New()");
+ LOG_API(i_isolate, "Array::New");
+ ENTER_V8(i_isolate);
int real_length = length > 0 ? length : 0;
- i::Handle<i::JSArray> obj = isolate->factory()->NewJSArray(real_length);
+ i::Handle<i::JSArray> obj = i_isolate->factory()->NewJSArray(real_length);
i::Handle<i::Object> length_obj =
- isolate->factory()->NewNumberFromInt(real_length);
+ i_isolate->factory()->NewNumberFromInt(real_length);
obj->set_length(*length_obj);
return Utils::ToLocal(obj);
}
@@ -5930,9 +5785,9 @@ bool v8::ArrayBuffer::IsExternal() const {
v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
- ApiCheck(!obj->is_external(),
- "v8::ArrayBuffer::Externalize",
- "ArrayBuffer already externalized");
+ Utils::ApiCheck(!obj->is_external(),
+ "v8::ArrayBuffer::Externalize",
+ "ArrayBuffer already externalized");
obj->set_is_external(true);
size_t byte_length = static_cast<size_t>(obj->byte_length()->Number());
Contents contents;
@@ -5945,9 +5800,9 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
void v8::ArrayBuffer::Neuter() {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- ApiCheck(obj->is_external(),
- "v8::ArrayBuffer::Neuter",
- "Only externalized ArrayBuffers can be neutered");
+ Utils::ApiCheck(obj->is_external(),
+ "v8::ArrayBuffer::Neuter",
+ "Only externalized ArrayBuffers can be neutered");
LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()");
ENTER_V8(isolate);
@@ -5973,26 +5828,27 @@ size_t v8::ArrayBuffer::ByteLength() const {
}
-Local<ArrayBuffer> v8::ArrayBuffer::New(size_t byte_length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::ArrayBuffer::New(size_t)");
- LOG_API(isolate, "v8::ArrayBuffer::New(size_t)");
- ENTER_V8(isolate);
+Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::ArrayBuffer::New(size_t)");
+ LOG_API(i_isolate, "v8::ArrayBuffer::New(size_t)");
+ ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
- isolate->factory()->NewJSArrayBuffer();
- i::Runtime::SetupArrayBufferAllocatingData(isolate, obj, byte_length);
+ i_isolate->factory()->NewJSArrayBuffer();
+ i::Runtime::SetupArrayBufferAllocatingData(i_isolate, obj, byte_length);
return Utils::ToLocal(obj);
}
-Local<ArrayBuffer> v8::ArrayBuffer::New(void* data, size_t byte_length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::ArrayBuffer::New(void*, size_t)");
- LOG_API(isolate, "v8::ArrayBuffer::New(void*, size_t)");
- ENTER_V8(isolate);
+Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
+ size_t byte_length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
+ LOG_API(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
+ ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
- isolate->factory()->NewJSArrayBuffer();
- i::Runtime::SetupArrayBuffer(isolate, obj, true, data, byte_length);
+ i_isolate->factory()->NewJSArrayBuffer();
+ i::Runtime::SetupArrayBuffer(i_isolate, obj, true, data, byte_length);
return Utils::ToLocal(obj);
}
@@ -6030,7 +5886,7 @@ static inline void SetupArrayBufferView(
size_t byte_offset,
size_t byte_length) {
ASSERT(byte_offset + byte_length <=
- static_cast<size_t>(buffer->byte_length()->Number()));
+ static_cast<size_t>(buffer->byte_length()->Number()));
obj->set_buffer(*buffer);
@@ -6038,11 +5894,11 @@ static inline void SetupArrayBufferView(
buffer->set_weak_first_view(*obj);
i::Handle<i::Object> byte_offset_object =
- isolate->factory()->NewNumberFromSize(byte_offset);
+ isolate->factory()->NewNumberFromSize(byte_offset);
obj->set_byte_offset(*byte_offset_object);
i::Handle<i::Object> byte_length_object =
- isolate->factory()->NewNumberFromSize(byte_length);
+ isolate->factory()->NewNumberFromSize(byte_length);
obj->set_byte_length(*byte_length_object);
}
@@ -6058,11 +5914,13 @@ i::Handle<i::JSTypedArray> NewTypedArray(
ASSERT(byte_offset % sizeof(ElementType) == 0);
+ CHECK(length <= (std::numeric_limits<size_t>::max() / sizeof(ElementType)));
+ size_t byte_length = length * sizeof(ElementType);
SetupArrayBufferView(
- isolate, obj, buffer, byte_offset, length * sizeof(ElementType));
+ isolate, obj, buffer, byte_offset, byte_length);
i::Handle<i::Object> length_object =
- isolate->factory()->NewNumberFromSize(length);
+ isolate->factory()->NewNumberFromSize(length);
obj->set_length(*length_object);
i::Handle<i::ExternalArray> elements =
@@ -6074,41 +5932,24 @@ i::Handle<i::JSTypedArray> NewTypedArray(
}
-#define TYPED_ARRAY_NEW(TypedArray, element_type, array_type, elements_kind) \
- Local<TypedArray> TypedArray::New(Handle<ArrayBuffer> array_buffer, \
+#define TYPED_ARRAY_NEW(Type, type, TYPE, ctype, size) \
+ Local<Type##Array> Type##Array::New(Handle<ArrayBuffer> array_buffer, \
size_t byte_offset, size_t length) { \
i::Isolate* isolate = i::Isolate::Current(); \
EnsureInitializedForIsolate(isolate, \
- "v8::" #TypedArray "::New(Handle<ArrayBuffer>, size_t, size_t)"); \
+ "v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)"); \
LOG_API(isolate, \
- "v8::" #TypedArray "::New(Handle<ArrayBuffer>, size_t, size_t)"); \
+ "v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)"); \
ENTER_V8(isolate); \
i::Handle<i::JSTypedArray> obj = \
- NewTypedArray<element_type, array_type, elements_kind>( \
+ NewTypedArray<ctype, v8::kExternal##Type##Array, \
+ i::EXTERNAL_##TYPE##_ELEMENTS>( \
isolate, array_buffer, byte_offset, length); \
- return Utils::ToLocal##TypedArray(obj); \
- }
-
-
-TYPED_ARRAY_NEW(Uint8Array, uint8_t, kExternalUnsignedByteArray,
- i::EXTERNAL_UNSIGNED_BYTE_ELEMENTS)
-TYPED_ARRAY_NEW(Uint8ClampedArray, uint8_t, kExternalPixelArray,
- i::EXTERNAL_PIXEL_ELEMENTS)
-TYPED_ARRAY_NEW(Int8Array, int8_t, kExternalByteArray,
- i::EXTERNAL_BYTE_ELEMENTS)
-TYPED_ARRAY_NEW(Uint16Array, uint16_t, kExternalUnsignedShortArray,
- i::EXTERNAL_UNSIGNED_SHORT_ELEMENTS)
-TYPED_ARRAY_NEW(Int16Array, int16_t, kExternalShortArray,
- i::EXTERNAL_SHORT_ELEMENTS)
-TYPED_ARRAY_NEW(Uint32Array, uint32_t, kExternalUnsignedIntArray,
- i::EXTERNAL_UNSIGNED_INT_ELEMENTS)
-TYPED_ARRAY_NEW(Int32Array, int32_t, kExternalIntArray,
- i::EXTERNAL_INT_ELEMENTS)
-TYPED_ARRAY_NEW(Float32Array, float, kExternalFloatArray,
- i::EXTERNAL_FLOAT_ELEMENTS)
-TYPED_ARRAY_NEW(Float64Array, double, kExternalDoubleArray,
- i::EXTERNAL_DOUBLE_ELEMENTS)
+ return Utils::ToLocal##Type##Array(obj); \
+ }
+
+TYPED_ARRAYS(TYPED_ARRAY_NEW)
#undef TYPED_ARRAY_NEW
Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
@@ -6126,34 +5967,37 @@ Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
}
-Local<Symbol> v8::Symbol::New(Isolate* isolate) {
+Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
LOG_API(i_isolate, "Symbol::New()");
ENTER_V8(i_isolate);
i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
+ if (data != NULL) {
+ if (length == -1) length = i::StrLength(data);
+ i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
+ i::Vector<const char>(data, length));
+ result->set_name(*name);
+ }
return Utils::ToLocal(result);
}
-Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
+Local<Private> v8::Private::New(
+ Isolate* isolate, const char* data, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
- LOG_API(i_isolate, "Symbol::New(char)");
+ EnsureInitializedForIsolate(i_isolate, "v8::Private::New()");
+ LOG_API(i_isolate, "Private::New()");
ENTER_V8(i_isolate);
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
- result->set_name(*name);
- return Utils::ToLocal(result);
-}
-
-
-Local<Number> v8::Number::New(double value) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Number::New()");
- return Number::New(reinterpret_cast<Isolate*>(isolate), value);
+ i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
+ if (data != NULL) {
+ if (length == -1) length = i::StrLength(data);
+ i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
+ i::Vector<const char>(data, length));
+ symbol->set_name(*name);
+ }
+ Local<Symbol> result = Utils::ToLocal(symbol);
+ return v8::Handle<Private>(reinterpret_cast<Private*>(*result));
}
@@ -6170,21 +6014,7 @@ Local<Number> v8::Number::New(Isolate* isolate, double value) {
}
-Local<Integer> v8::Integer::New(int32_t value) {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
- return v8::Integer::New(value, reinterpret_cast<Isolate*>(isolate));
-}
-
-
-Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Integer::NewFromUnsigned()");
- return Integer::NewFromUnsigned(value, reinterpret_cast<Isolate*>(isolate));
-}
-
-
-Local<Integer> v8::Integer::New(int32_t value, Isolate* isolate) {
+Local<Integer> v8::Integer::New(Isolate* isolate, int32_t value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ASSERT(internal_isolate->IsInitialized());
if (i::Smi::IsValid(value)) {
@@ -6197,12 +6027,12 @@ Local<Integer> v8::Integer::New(int32_t value, Isolate* isolate) {
}
-Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
+Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ASSERT(internal_isolate->IsInitialized());
bool fits_into_int32_t = (value & (1 << 31)) == 0;
if (fits_into_int32_t) {
- return Integer::New(static_cast<int32_t>(value), isolate);
+ return Integer::New(isolate, static_cast<int32_t>(value));
}
ENTER_V8(internal_isolate);
i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
@@ -6210,18 +6040,6 @@ Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
}
-#ifdef DEBUG
-v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate) {
- disallow_heap_allocation_ = new i::DisallowHeapAllocation();
-}
-
-
-v8::AssertNoGCScope::~AssertNoGCScope() {
- delete static_cast<i::DisallowHeapAllocation*>(disallow_heap_allocation_);
-}
-#endif
-
-
void V8::IgnoreOutOfMemoryException() {
EnterIsolateIfNeeded()->set_ignore_out_of_memory(true);
}
@@ -6234,10 +6052,10 @@ bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
NeanderArray listeners(isolate->factory()->message_listeners());
- NeanderObject obj(2);
+ NeanderObject obj(isolate, 2);
obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
obj.set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
- : *Utils::OpenHandle(*data));
+ : *Utils::OpenHandle(*data));
listeners.add(obj.value());
return true;
}
@@ -6263,9 +6081,9 @@ void V8::RemoveMessageListeners(MessageCallback that) {
void V8::SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options) {
+ bool capture,
+ int frame_limit,
+ StackTrace::StackTraceOptions options) {
i::Isolate::Current()->SetCaptureStackTraceForUncaughtExceptions(
capture,
frame_limit,
@@ -6294,29 +6112,19 @@ void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
}
void V8::SetFailedAccessCheckCallbackFunction(
- FailedAccessCheckCallback callback) {
+ FailedAccessCheckCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
isolate->SetFailedAccessCheckCallback(callback);
}
-intptr_t Isolate::AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes) {
+int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
+ int64_t change_in_bytes) {
i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
return heap->AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
}
-intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized()) {
- return 0;
- }
- Isolate* isolate_ext = reinterpret_cast<Isolate*>(isolate);
- return isolate_ext->AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
-}
-
-
HeapProfiler* Isolate::GetHeapProfiler() {
i::HeapProfiler* heap_profiler =
reinterpret_cast<i::Isolate*>(this)->heap_profiler();
@@ -6485,6 +6293,25 @@ void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
}
+void V8::RunMicrotasks(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::HandleScope scope(i_isolate);
+ i::V8::RunMicrotasks(i_isolate);
+}
+
+
+void V8::EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i::Execution::EnqueueMicrotask(i_isolate, Utils::OpenHandle(*microtask));
+}
+
+
+void V8::SetAutorunMicrotasks(Isolate* isolate, bool autorun) {
+ reinterpret_cast<i::Isolate*>(isolate)->set_autorun_microtasks(autorun);
+}
+
+
void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
i::V8::RemoveCallCompletedCallback(callback);
}
@@ -6513,6 +6340,32 @@ void V8::CancelTerminateExecution(Isolate* isolate) {
}
+void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
+ reinterpret_cast<i::Isolate*>(this)->stack_guard()->RequestInterrupt(
+ callback, data);
+}
+
+
+void Isolate::ClearInterrupt() {
+ reinterpret_cast<i::Isolate*>(this)->stack_guard()->ClearInterrupt();
+}
+
+
+void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) {
+ CHECK(i::FLAG_expose_gc);
+ if (type == kMinorGarbageCollection) {
+ reinterpret_cast<i::Isolate*>(this)->heap()->CollectGarbage(
+ i::NEW_SPACE, "Isolate::RequestGarbageCollection",
+ kGCCallbackFlagForced);
+ } else {
+ ASSERT_EQ(kFullGarbageCollection, type);
+ reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage(
+ i::Heap::kAbortIncrementalMarkingMask,
+ "Isolate::RequestGarbageCollection", kGCCallbackFlagForced);
+ }
+}
+
+
Isolate* Isolate::GetCurrent() {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
return reinterpret_cast<Isolate*>(isolate);
@@ -6527,9 +6380,9 @@ Isolate* Isolate::New() {
void Isolate::Dispose() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- if (!ApiCheck(!isolate->IsInUse(),
- "v8::Isolate::Dispose()",
- "Disposing the isolate that is entered by a thread.")) {
+ if (!Utils::ApiCheck(!isolate->IsInUse(),
+ "v8::Isolate::Dispose()",
+ "Disposing the isolate that is entered by a thread.")) {
return;
}
isolate->TearDown();
@@ -6589,27 +6442,6 @@ String::Utf8Value::~Utf8Value() {
}
-String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj)
- : str_(NULL), length_(0) {
- i::Isolate* isolate = i::Isolate::Current();
- if (obj.IsEmpty()) return;
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- TryCatch try_catch;
- Handle<String> str = obj->ToString();
- if (str.IsEmpty()) return;
- length_ = str->Utf8Length();
- str_ = i::NewArray<char>(length_ + 1);
- str->WriteUtf8(str_);
- ASSERT(i::String::NonAsciiStart(str_, length_) >= length_);
-}
-
-
-String::AsciiValue::~AsciiValue() {
- i::DeleteArray(str_);
-}
-
-
String::Value::Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
@@ -6860,7 +6692,7 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
if (!isolate->IsInitialized()) return Local<Value>();
ON_BAILOUT(isolate, "v8::Debug::GetMirror()", return Local<Value>());
ENTER_V8(isolate);
- v8::HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ v8::EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Debug* isolate_debug = isolate->debug();
isolate_debug->Load();
i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
@@ -6872,11 +6704,10 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
const int kArgc = 1;
v8::Handle<v8::Value> argv[kArgc] = { obj };
EXCEPTION_PREAMBLE(isolate);
- v8::Handle<v8::Value> result = v8_fun->Call(Utils::ToLocal(debug),
- kArgc,
- argv);
+ v8::Local<v8::Value> result =
+ v8_fun->Call(Utils::ToLocal(debug), kArgc, argv);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return scope.Close(result);
+ return scope.Escape(result);
}
@@ -6999,15 +6830,6 @@ void CpuProfile::Delete() {
i::CpuProfiler* profiler = isolate->cpu_profiler();
ASSERT(profiler != NULL);
profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
- if (profiler->GetProfilesCount() == 0) {
- // If this was the last profile, clean up all accessory data as well.
- profiler->DeleteAllProfiles();
- }
-}
-
-
-unsigned CpuProfile::GetUid() const {
- return reinterpret_cast<const i::CpuProfile*>(this)->uid();
}
@@ -7048,11 +6870,6 @@ int CpuProfile::GetSamplesCount() const {
}
-int CpuProfiler::GetProfileCount() {
- return reinterpret_cast<i::CpuProfiler*>(this)->GetProfilesCount();
-}
-
-
void CpuProfiler::SetSamplingInterval(int us) {
ASSERT(us >= 0);
return reinterpret_cast<i::CpuProfiler*>(this)->set_sampling_interval(
@@ -7060,12 +6877,6 @@ void CpuProfiler::SetSamplingInterval(int us) {
}
-const CpuProfile* CpuProfiler::GetCpuProfile(int index) {
- return reinterpret_cast<const CpuProfile*>(
- reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(index));
-}
-
-
void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), record_samples);
@@ -7079,11 +6890,6 @@ const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
}
-void CpuProfiler::DeleteAllCpuProfiles() {
- reinterpret_cast<i::CpuProfiler*>(this)->DeleteAllProfiles();
-}
-
-
void CpuProfiler::SetIdle(bool is_idle) {
i::Isolate* isolate = reinterpret_cast<i::CpuProfiler*>(this)->isolate();
i::StateTag state = isolate->current_vm_state();
@@ -7116,11 +6922,11 @@ Handle<Value> HeapGraphEdge::GetName() const {
case i::HeapGraphEdge::kInternal:
case i::HeapGraphEdge::kProperty:
case i::HeapGraphEdge::kShortcut:
+ case i::HeapGraphEdge::kWeak:
return ToApiHandle<String>(
isolate->factory()->InternalizeUtf8String(edge->name()));
case i::HeapGraphEdge::kElement:
case i::HeapGraphEdge::kHidden:
- case i::HeapGraphEdge::kWeak:
return ToApiHandle<Number>(
isolate->factory()->NewNumberFromInt(edge->index()));
default: UNREACHABLE();
@@ -7165,6 +6971,13 @@ SnapshotObjectId HeapGraphNode::GetId() const {
int HeapGraphNode::GetSelfSize() const {
+ size_t size = ToInternal(this)->self_size();
+ CHECK(size <= static_cast<size_t>(internal::kMaxInt));
+ return static_cast<int>(size);
+}
+
+
+size_t HeapGraphNode::GetShallowSize() const {
return ToInternal(this)->self_size();
}
@@ -7180,15 +6993,6 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
}
-v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
- i::Isolate* isolate = i::Isolate::Current();
- i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject();
- return !object.is_null() ?
- ToApiHandle<Value>(object) :
- ToApiHandle<Value>(isolate->factory()->undefined_value());
-}
-
-
static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
return const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
@@ -7247,15 +7051,15 @@ SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const {
void HeapSnapshot::Serialize(OutputStream* stream,
HeapSnapshot::SerializationFormat format) const {
- ApiCheck(format == kJSON,
- "v8::HeapSnapshot::Serialize",
- "Unknown serialization format");
- ApiCheck(stream->GetOutputEncoding() == OutputStream::kAscii,
- "v8::HeapSnapshot::Serialize",
- "Unsupported output encoding");
- ApiCheck(stream->GetChunkSize() > 0,
- "v8::HeapSnapshot::Serialize",
- "Invalid stream chunk size");
+ Utils::ApiCheck(format == kJSON,
+ "v8::HeapSnapshot::Serialize",
+ "Unknown serialization format");
+ Utils::ApiCheck(stream->GetOutputEncoding() == OutputStream::kAscii,
+ "v8::HeapSnapshot::Serialize",
+ "Unsupported output encoding");
+ Utils::ApiCheck(stream->GetChunkSize() > 0,
+ "v8::HeapSnapshot::Serialize",
+ "Invalid stream chunk size");
i::HeapSnapshotJSONSerializer serializer(ToInternal(this));
serializer.Serialize(stream);
}
@@ -7278,6 +7082,19 @@ SnapshotObjectId HeapProfiler::GetObjectId(Handle<Value> value) {
}
+Handle<Value> HeapProfiler::FindObjectById(SnapshotObjectId id) {
+ i::Handle<i::Object> obj =
+ reinterpret_cast<i::HeapProfiler*>(this)->FindHeapObjectById(id);
+ if (obj.is_null()) return Local<Value>();
+ return Utils::ToLocal(obj);
+}
+
+
+void HeapProfiler::ClearObjectIds() {
+ reinterpret_cast<i::HeapProfiler*>(this)->ClearHeapObjectMap();
+}
+
+
const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
Handle<String> title,
ActivityControl* control,
@@ -7288,8 +7105,9 @@ const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
}
-void HeapProfiler::StartTrackingHeapObjects() {
- reinterpret_cast<i::HeapProfiler*>(this)->StartHeapObjectsTracking();
+void HeapProfiler::StartTrackingHeapObjects(bool track_allocations) {
+ reinterpret_cast<i::HeapProfiler*>(this)->StartHeapObjectsTracking(
+ track_allocations);
}
@@ -7327,16 +7145,6 @@ void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
}
-void HeapProfiler::StartRecordingHeapAllocations() {
- reinterpret_cast<i::HeapProfiler*>(this)->StartHeapAllocationsRecording();
-}
-
-
-void HeapProfiler::StopRecordingHeapAllocations() {
- reinterpret_cast<i::HeapProfiler*>(this)->StopHeapAllocationsRecording();
-}
-
-
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
@@ -7415,8 +7223,7 @@ void HandleScopeImplementer::FreeThreadResources() {
char* HandleScopeImplementer::ArchiveThread(char* storage) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
+ HandleScopeData* current = isolate_->handle_scope_data();
handle_scope_data_ = *current;
OS::MemCopy(storage, this, sizeof(*this));
@@ -7477,8 +7284,7 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
+ HandleScopeData* current = isolate_->handle_scope_data();
handle_scope_data_ = *current;
IterateThis(v);
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 9197bafbc5..9fc99d9d2a 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -31,7 +31,6 @@
#include "v8.h"
#include "../include/v8-testing.h"
-#include "apiutils.h"
#include "contexts.h"
#include "factory.h"
#include "isolate.h"
@@ -56,7 +55,7 @@ class Consts {
// env-independent JSObjects used by the api.
class NeanderObject {
public:
- explicit NeanderObject(int size);
+ explicit NeanderObject(v8::internal::Isolate* isolate, int size);
explicit inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
explicit inline NeanderObject(v8::internal::Object* obj);
inline v8::internal::Object* get(int index);
@@ -72,7 +71,7 @@ class NeanderObject {
// array abstraction built on neander-objects.
class NeanderArray {
public:
- NeanderArray();
+ explicit NeanderArray(v8::internal::Isolate* isolate);
explicit inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
inline v8::internal::Handle<v8::internal::JSObject> value() {
return obj_.value();
@@ -196,7 +195,12 @@ class RegisteredExtension {
class Utils {
public:
- static bool ReportApiFailure(const char* location, const char* message);
+ static inline bool ApiCheck(bool condition,
+ const char* location,
+ const char* message) {
+ if (!condition) Utils::ReportApiFailure(location, message);
+ return condition;
+ }
static Local<FunctionTemplate> ToFunctionTemplate(NeanderObject obj);
static Local<ObjectTemplate> ToObjectTemplate(NeanderObject obj);
@@ -303,17 +307,20 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
static inline v8::internal::Handle<To> OpenHandle(v8::Local<From> handle) {
return OpenHandle(*handle);
}
+
+ private:
+ static void ReportApiFailure(const char* location, const char* message);
};
template <class T>
v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
- v8::HandleScope* scope) {
+ v8::EscapableHandleScope* scope) {
v8::internal::Handle<T> handle;
if (!is_null()) {
handle = *this;
}
- return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)), true);
+ return Utils::OpenHandle(*scope->Escape(Utils::ToLocal(handle)), true);
}
@@ -337,11 +344,11 @@ inline v8::Local<T> ToApiHandle(
}
-#define MAKE_TO_LOCAL_TYPED_ARRAY(TypedArray, typeConst) \
- Local<v8::TypedArray> Utils::ToLocal##TypedArray( \
+#define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
+ Local<v8::Type##Array> Utils::ToLocal##Type##Array( \
v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
- ASSERT(obj->type() == typeConst); \
- return Convert<v8::internal::JSTypedArray, v8::TypedArray>(obj); \
+ ASSERT(obj->type() == kExternal##Type##Array); \
+ return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj); \
}
@@ -358,15 +365,7 @@ MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Uint8Array, kExternalUnsignedByteArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Uint8ClampedArray, kExternalPixelArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Int8Array, kExternalByteArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Uint16Array, kExternalUnsignedShortArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Int16Array, kExternalShortArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Uint32Array, kExternalUnsignedIntArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Int32Array, kExternalIntArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Float32Array, kExternalFloatArray)
-MAKE_TO_LOCAL_TYPED_ARRAY(Float64Array, kExternalDoubleArray)
+TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
@@ -543,7 +542,8 @@ class HandleScopeImplementer {
inline bool CallDepthIsZero() { return call_depth_ == 0; }
inline void EnterContext(Handle<Context> context);
- inline bool LeaveContext(Handle<Context> context);
+ inline void LeaveContext();
+ inline bool LastEnteredContextWas(Handle<Context> context);
// Returns the last entered context or an empty handle if no
// contexts have been entered.
@@ -599,7 +599,7 @@ class HandleScopeImplementer {
int call_depth_;
Object** last_handle_before_deferred_block_;
// This is only used for threading support.
- v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
+ HandleScopeData handle_scope_data_;
void IterateThis(ObjectVisitor* v);
char* RestoreThreadHelper(char* from);
@@ -635,12 +635,13 @@ void HandleScopeImplementer::EnterContext(Handle<Context> context) {
}
-bool HandleScopeImplementer::LeaveContext(Handle<Context> context) {
- if (entered_contexts_.is_empty()) return false;
- // TODO(dcarney): figure out what's wrong here
- // if (entered_contexts_.last() != *context) return false;
+void HandleScopeImplementer::LeaveContext() {
entered_contexts_.RemoveLast();
- return true;
+}
+
+
+bool HandleScopeImplementer::LastEnteredContextWas(Handle<Context> context) {
+ return !entered_contexts_.is_empty() && entered_contexts_.last() == *context;
}
diff --git a/deps/v8/src/arm/OWNERS b/deps/v8/src/arm/OWNERS
new file mode 100644
index 0000000000..906a5ce641
--- /dev/null
+++ b/deps/v8/src/arm/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index e3b39f407c..3399958ee3 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -57,6 +57,11 @@ int DwVfpRegister::NumRegisters() {
}
+int DwVfpRegister::NumReservedRegisters() {
+ return kNumReservedRegisters;
+}
+
+
int DwVfpRegister::NumAllocatableRegisters() {
return NumRegisters() - kNumReservedRegisters;
}
@@ -104,7 +109,7 @@ Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
+ return Assembler::target_pointer_address_at(pc_);
}
@@ -126,31 +131,21 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_pointer_at(pc_)));
-}
-
-
-Object** RelocInfo::target_object_address() {
- // Provide a "natural pointer" to the embedded object,
- // which can be de-referenced during heap iteration.
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- reconstructed_obj_ptr_ =
- reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
- return &reconstructed_obj_ptr_;
+ Assembler::target_address_at(pc_)));
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
- Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
@@ -160,10 +155,9 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
}
-Address* RelocInfo::target_reference_address() {
+Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
- return &reconstructed_adr_ptr_;
+ return Assembler::target_address_at(pc_);
}
@@ -269,6 +263,15 @@ Object** RelocInfo::call_object_address() {
}
+void RelocInfo::WipeOut() {
+ ASSERT(IsEmbeddedObject(rmode_) ||
+ IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, NULL);
+}
+
+
bool RelocInfo::IsPatchedReturnSequence() {
Instr current_instr = Assembler::instr_at(pc_);
Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
@@ -394,33 +397,12 @@ void Assembler::emit(Instr x) {
Address Assembler::target_pointer_address_at(Address pc) {
- Address target_pc = pc;
- Instr instr = Memory::int32_at(target_pc);
- // If we have a bx instruction, the instruction before the bx is
- // what we need to patch.
- static const int32_t kBxInstMask = 0x0ffffff0;
- static const int32_t kBxInstPattern = 0x012fff10;
- if ((instr & kBxInstMask) == kBxInstPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-
- // With a blx instruction, the instruction before is what needs to be patched.
- if ((instr & kBlxRegMask) == kBlxRegPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-
- ASSERT(IsLdrPcImmediateOffset(instr));
- int offset = instr & 0xfff; // offset_12 is unsigned
- if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
- // Verify that the constant pool comes after the instruction referencing it.
- ASSERT(offset >= -4);
- return target_pc + offset + 8;
+ Instr instr = Memory::int32_at(pc);
+ return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
}
-Address Assembler::target_pointer_at(Address pc) {
+Address Assembler::target_address_at(Address pc) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* instr = Instruction::At(pc);
@@ -429,6 +411,7 @@ Address Assembler::target_pointer_at(Address pc) {
(next_instr->ImmedMovwMovtValue() << 16) |
instr->ImmedMovwMovtValue());
}
+ ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
return Memory::Address_at(target_pointer_address_at(pc));
}
@@ -474,19 +457,13 @@ void Assembler::deserialization_set_special_target_at(
}
-void Assembler::set_external_target_at(Address constant_pool_entry,
- Address target) {
- Memory::Address_at(constant_pool_entry) = target;
-}
-
-
static Instr EncodeMovwImmediate(uint32_t immediate) {
ASSERT(immediate < 0x10000);
return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
}
-void Assembler::set_target_pointer_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc, Address target) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
@@ -517,16 +494,6 @@ void Assembler::set_target_pointer_at(Address pc, Address target) {
}
-Address Assembler::target_address_at(Address pc) {
- return target_pointer_at(pc);
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- set_target_pointer_at(pc, target);
-}
-
-
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 05b25ae2d7..35279e557c 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -517,12 +517,13 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
- num_pending_reloc_info_ = 0;
+ num_pending_32_bit_reloc_info_ = 0;
num_pending_64_bit_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
- first_const_pool_use_ = -1;
+ first_const_pool_32_use_ = -1;
+ first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
ClearRecordedAstId();
}
@@ -536,7 +537,7 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
- ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
// Set up code descriptor.
@@ -544,6 +545,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->origin = this;
}
@@ -3077,6 +3079,11 @@ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
}
+bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
+ return is_uint12(abs(imm32));
+}
+
+
// Debugging.
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
@@ -3149,14 +3156,19 @@ void Assembler::GrowBuffer() {
// to relocate any emitted relocation entries.
// Relocate pending relocation entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
+ for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
rinfo.set_pc(rinfo.pc() + pc_delta);
}
}
+ for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
+ ASSERT(rinfo.rmode() == RelocInfo::NONE64);
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
}
@@ -3164,7 +3176,7 @@ void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
- ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
@@ -3176,7 +3188,7 @@ void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
- ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
@@ -3184,6 +3196,14 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::emit_code_stub_address(Code* stub) {
+ CheckBuffer();
+ *reinterpret_cast<uint32_t*>(pc_) =
+ reinterpret_cast<uint32_t>(stub->instruction_start());
+ pc_ += sizeof(uint32_t);
+}
+
+
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
UseConstantPoolMode mode) {
// We do not try to reuse pool constants.
@@ -3238,15 +3258,19 @@ void Assembler::RecordRelocInfo(double data) {
void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
- ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
- if (num_pending_reloc_info_ == 0) {
- first_const_pool_use_ = pc_offset();
- }
- pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
if (rinfo.rmode() == RelocInfo::NONE64) {
- ++num_pending_64_bit_reloc_info_;
+ ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
+ if (num_pending_64_bit_reloc_info_ == 0) {
+ first_const_pool_64_use_ = pc_offset();
+ }
+ pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+ } else {
+ ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
+ if (num_pending_32_bit_reloc_info_ == 0) {
+ first_const_pool_32_use_ = pc_offset();
+ }
+ pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
}
- ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
@@ -3256,12 +3280,15 @@ void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
- // If there are some pending entries, the constant pool cannot be blocked
- // further than constant pool instruction's reach.
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_limit - first_const_pool_use_ < kMaxDistToIntPool));
- // TODO(jfb) Also check 64-bit entries are in range (requires splitting
- // them up from 32-bit entries).
+ // Max pool start (if we need a jump and an alignment).
+#ifdef DEBUG
+ int start = pc_limit + kInstrSize + 2 * kPointerSize;
+ ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
+ (start - first_const_pool_32_use_ +
+ num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
+ ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
+ (start - first_const_pool_64_use_ < kMaxDistToFPPool));
+#endif
no_const_pool_before_ = pc_limit;
}
@@ -3282,8 +3309,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// There is nothing to do if there are no pending constant pool entries.
- if (num_pending_reloc_info_ == 0) {
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ if ((num_pending_32_bit_reloc_info_ == 0) &&
+ (num_pending_64_bit_reloc_info_ == 0)) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
@@ -3292,24 +3319,18 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Check that the code buffer is large enough before emitting the constant
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
- // Note 64-bit values are wider, and the first one needs to be 64-bit aligned.
int jump_instr = require_jump ? kInstrSize : 0;
int size_up_to_marker = jump_instr + kInstrSize;
- int size_after_marker = num_pending_reloc_info_ * kPointerSize;
+ int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
- // 64-bit values must be 64-bit aligned.
- // We'll start emitting at PC: branch+marker, then 32-bit values, then
- // 64-bit values which might need to be aligned.
- bool require_64_bit_align = has_fp_values &&
- (((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3);
- if (require_64_bit_align) {
- size_after_marker += kInstrSize;
- }
- // num_pending_reloc_info_ also contains 64-bit entries, the above code
- // therefore already counted half of the size for 64-bit entries. Add the
- // remaining size.
- STATIC_ASSERT(kPointerSize == kDoubleSize / 2);
- size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2);
+ bool require_64_bit_align = false;
+ if (has_fp_values) {
+ require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
+ if (require_64_bit_align) {
+ size_after_marker += kInstrSize;
+ }
+ size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
+ }
int size = size_up_to_marker + size_after_marker;
@@ -3322,19 +3343,25 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// * the instruction doesn't require a jump after itself to jump over the
// constant pool, and we're getting close to running out of range.
if (!force_emit) {
- ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0));
- int dist = pc_offset() + size - first_const_pool_use_;
+ ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
+ bool need_emit = false;
if (has_fp_values) {
- if ((dist < kMaxDistToFPPool - kCheckPoolInterval) &&
- (require_jump || (dist < kMaxDistToFPPool / 2))) {
- return;
- }
- } else {
- if ((dist < kMaxDistToIntPool - kCheckPoolInterval) &&
- (require_jump || (dist < kMaxDistToIntPool / 2))) {
- return;
+ int dist64 = pc_offset() +
+ size -
+ num_pending_32_bit_reloc_info_ * kPointerSize -
+ first_const_pool_64_use_;
+ if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
+ (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
+ need_emit = true;
}
}
+ int dist32 =
+ pc_offset() + size - first_const_pool_32_use_;
+ if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
+ (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
+ need_emit = true;
+ }
+ if (!need_emit) return;
}
int needed_space = size + kGap;
@@ -3363,15 +3390,10 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Emit 64-bit constant pool entries first: their range is smaller than
// 32-bit entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
-
- if (rinfo.rmode() != RelocInfo::NONE64) {
- // 32-bit values emitted later.
- continue;
- }
+ for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
- ASSERT(!((uintptr_t)pc_ & 0x3)); // Check 64-bit alignment.
+ ASSERT(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
@@ -3381,53 +3403,85 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
ASSERT(is_uint10(delta));
+ bool found = false;
+ uint64_t value = rinfo.raw_data64();
+ for (int j = 0; j < i; j++) {
+ RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
+ if (value == rinfo2.raw_data64()) {
+ found = true;
+ ASSERT(rinfo2.rmode() == RelocInfo::NONE64);
+ Instr instr2 = instr_at(rinfo2.pc());
+ ASSERT(IsVldrDPcImmediateOffset(instr2));
+ delta = GetVldrDRegisterImmediateOffset(instr2);
+ delta += rinfo2.pc() - rinfo.pc();
+ break;
+ }
+ }
+
instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
- const double double_data = rinfo.data64();
- uint64_t uint_data = 0;
- OS::MemCopy(&uint_data, &double_data, sizeof(double_data));
- emit(uint_data & 0xFFFFFFFF);
- emit(uint_data >> 32);
+ if (!found) {
+ uint64_t uint_data = rinfo.raw_data64();
+ emit(uint_data & 0xFFFFFFFF);
+ emit(uint_data >> 32);
+ }
}
// Emit 32-bit constant pool entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
+ for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
- rinfo.rmode() != RelocInfo::CONST_POOL);
-
- if (rinfo.rmode() == RelocInfo::NONE64) {
- // 64-bit values emitted earlier.
- continue;
- }
+ rinfo.rmode() != RelocInfo::CONST_POOL &&
+ rinfo.rmode() != RelocInfo::NONE64);
Instr instr = instr_at(rinfo.pc());
// 64-bit loads shouldn't get here.
ASSERT(!IsVldrDPcImmediateOffset(instr));
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
- // 0 is the smallest delta:
- // ldr rd, [pc, #0]
- // constant pool marker
- // data
-
if (IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0) {
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
ASSERT(is_uint12(delta));
+ // 0 is the smallest delta:
+ // ldr rd, [pc, #0]
+ // constant pool marker
+ // data
+
+ bool found = false;
+ if (!Serializer::enabled() && (rinfo.rmode() >= RelocInfo::CELL)) {
+ for (int j = 0; j < i; j++) {
+ RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
+
+ if ((rinfo2.data() == rinfo.data()) &&
+ (rinfo2.rmode() == rinfo.rmode())) {
+ Instr instr2 = instr_at(rinfo2.pc());
+ if (IsLdrPcImmediateOffset(instr2)) {
+ delta = GetLdrRegisterImmediateOffset(instr2);
+ delta += rinfo2.pc() - rinfo.pc();
+ found = true;
+ break;
+ }
+ }
+ }
+ }
+
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
- emit(rinfo.data());
+
+ if (!found) {
+ emit(rinfo.data());
+ }
} else {
ASSERT(IsMovW(instr));
- emit(rinfo.data());
}
}
- num_pending_reloc_info_ = 0;
+ num_pending_32_bit_reloc_info_ = 0;
num_pending_64_bit_reloc_info_ = 0;
- first_const_pool_use_ = -1;
+ first_const_pool_32_use_ = -1;
+ first_const_pool_64_use_ = -1;
RecordComment("]");
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 8caa64df34..ccb5104206 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -164,18 +164,12 @@ struct Register {
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
- if (FLAG_enable_ool_constant_pool && (reg.code() >= kRegister_r8_Code)) {
- return reg.code() - 1;
- }
ASSERT(reg.code() < kMaxNumAllocatableRegisters);
return reg.code();
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- if (FLAG_enable_ool_constant_pool && (index >= 7)) {
- return from_code(index + 1);
- }
return from_code(index);
}
@@ -285,6 +279,7 @@ struct DwVfpRegister {
// Any code included in the snapshot must be able to run both with 16 or 32
// registers.
inline static int NumRegisters();
+ inline static int NumReservedRegisters();
inline static int NumAllocatableRegisters();
inline static int ToAllocationIndex(DwVfpRegister reg);
@@ -785,10 +780,6 @@ class Assembler : public AssemblerBase {
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
- // Read/Modify the pointer in the branch/call/move instruction at pc.
- INLINE(static Address target_pointer_at(Address pc));
- INLINE(static void set_target_pointer_at(Address pc, Address target));
-
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target));
@@ -806,11 +797,6 @@ class Assembler : public AssemblerBase {
inline static void deserialization_set_special_target_at(
Address constant_pool_entry, Address target);
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address constant_pool_entry,
- Address target);
-
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
@@ -1325,6 +1311,9 @@ class Assembler : public AssemblerBase {
// Check whether an immediate fits an addressing mode 1 instruction.
bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
+ // Check whether an immediate fits an addressing mode 2 instruction.
+ bool ImmediateFitsAddrMode2Instruction(int32_t imm32);
+
// Class for scoping postponing the constant pool generation.
class BlockConstPoolScope {
public:
@@ -1393,6 +1382,9 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
+ // Emits the address of the code stub's first instruction.
+ void emit_code_stub_address(Code* stub);
+
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
@@ -1444,7 +1436,8 @@ class Assembler : public AssemblerBase {
static const int kMaxDistToIntPool = 4*KB;
static const int kMaxDistToFPPool = 1*KB;
// All relocations could be integer, it therefore acts as the limit.
- static const int kMaxNumPendingRelocInfo = kMaxDistToIntPool/kInstrSize;
+ static const int kMaxNumPending32RelocInfo = kMaxDistToIntPool/kInstrSize;
+ static const int kMaxNumPending64RelocInfo = kMaxDistToFPPool/kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
@@ -1482,11 +1475,16 @@ class Assembler : public AssemblerBase {
// StartBlockConstPool to have an effect.
void EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
+#ifdef DEBUG
+ // Max pool start (if we need a jump and an alignment).
+ int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long.
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToIntPool)));
+ ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
+ (start + num_pending_64_bit_reloc_info_ * kDoubleSize <
+ (first_const_pool_32_use_ + kMaxDistToIntPool)));
ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToFPPool)));
+ (start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
+#endif
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
// still blocked
@@ -1535,7 +1533,8 @@ class Assembler : public AssemblerBase {
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
- int first_const_pool_use_;
+ int first_const_pool_32_use_;
+ int first_const_pool_64_use_;
// Relocation info generation
// Each relocation is encoded as a variable size value
@@ -1549,12 +1548,12 @@ class Assembler : public AssemblerBase {
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
- // the buffer of pending relocation info
- RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
- // number of pending reloc info entries in the buffer
- int num_pending_reloc_info_;
- // Number of pending reloc info entries included above which also happen to
- // be 64-bit.
+ // The buffers of pending relocation info.
+ RelocInfo pending_32_bit_reloc_info_[kMaxNumPending32RelocInfo];
+ RelocInfo pending_64_bit_reloc_info_[kMaxNumPending64RelocInfo];
+ // Number of pending reloc info entries in the 32 bits buffer.
+ int num_pending_32_bit_reloc_info_;
+ // Number of pending reloc info entries in the 64 bits buffer.
int num_pending_64_bit_reloc_info_;
// The bound position, before this we cannot do instruction elimination.
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index c075826a00..7898086c07 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -34,6 +34,7 @@
#include "deoptimizer.h"
#include "full-codegen.h"
#include "runtime.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
@@ -289,19 +290,15 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
- // Push call kind information.
- __ push(r5);
- // Function is also the parameter to the runtime call.
- __ push(r1);
+ // Push function as parameter to the runtime call.
+ __ Push(r1);
__ CallRuntime(function_id, 1);
- // Restore call kind information.
- __ pop(r5);
// Restore receiver.
__ pop(r1);
}
@@ -315,7 +312,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
}
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r0);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
@@ -326,22 +329,14 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
- // Tail call to returned code.
- __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r0);
+ CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
- GenerateTailCallToSharedCode(masm);
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -406,9 +401,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ strb(r4, constructor_count);
__ b(ne, &allocate);
- __ Push(r1, r2);
+ __ push(r1);
- __ push(r1); // constructor
+ __ Push(r2, r1); // r1 = constructor
// The call will replace the stub, so the countdown is only done once.
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
@@ -610,13 +605,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ __ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Store offset of return address for deoptimizer.
@@ -695,7 +687,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver
// r3: argc
// r4: argv
- // r5-r6, r7 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
+ // r5-r6, r8 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
@@ -736,7 +728,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
if (!FLAG_enable_ool_constant_pool) {
- __ mov(r7, Operand(r4));
+ __ mov(r8, Operand(r4));
}
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
@@ -753,8 +745,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ CallStub(&stub);
} else {
ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Exit the JS frame and remove the parameters (except function), and
// return.
@@ -776,19 +767,36 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyCompile);
- // Do a tail-call of the compiled function.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
- // Do a tail-call of the compiled function.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(r1);
+ // Push function as parameter to the runtime call.
+ __ Push(r1);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kCompileOptimized, 2);
+ // Restore receiver.
+ __ pop(r1);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
}
@@ -805,7 +813,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// r1 - isolate
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(1, 0, r2);
+ __ PrepareCallCFunction(2, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
@@ -838,15 +846,15 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// r1 - isolate
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(1, 0, r2);
+ __ PrepareCallCFunction(2, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(ExternalReference::get_mark_code_as_executed_function(
masm->isolate()), 2);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
// Perform prologue operations usually performed by the young code stub.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(2 * kPointerSize));
+ __ PushFixedFrame(r1);
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Jump to point after the code-age stub.
__ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize));
@@ -940,18 +948,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Lookup and calculate pc offset.
- __ ldr(r1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ ldr(r2, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
- __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sub(r1, r1, r2);
- __ SmiTag(r1);
-
- // Pass both function and pc offset as arguments.
+ // Pass function as argument.
__ push(r0);
- __ push(r1);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
// If the code object is null, just return to the unoptimized code.
@@ -1082,15 +1081,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(r4, Operand::Zero());
__ jmp(&patch_receiver);
- // Use the global receiver object from the called function as the
- // receiver.
__ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
__ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
@@ -1150,18 +1143,17 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ b(eq, &function);
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
__ mov(r2, Operand::Zero());
- __ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r4, Operand(1));
__ b(ne, &non_proxy);
__ push(r1); // re-add proxy object as additional argument
__ add(r0, r0, Operand(1));
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+ __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&non_proxy);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+ __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
@@ -1176,25 +1168,25 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(r2);
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET,
ne);
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(0);
- __ InvokeCode(r3, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeCode(r3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset = -5 * kPointerSize;
- const int kLimitOffset = -4 * kPointerSize;
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
@@ -1219,8 +1211,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Out of stack space.
__ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ push(r1);
- __ push(r0);
+ __ Push(r1, r0);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
// End of stack check.
@@ -1278,13 +1269,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ b(&push_receiver);
- // Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
+ __ ldr(r0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
@@ -1302,8 +1288,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// r0: current argument index
__ bind(&loop);
__ ldr(r1, MemOperand(fp, kArgsOffset));
- __ push(r1);
- __ push(r0);
+ __ Push(r1, r0);
// Call the runtime to access the property in the arguments array.
__ CallRuntime(Runtime::kGetProperty, 2);
@@ -1321,27 +1306,25 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ cmp(r0, r1);
__ b(ne, &loop);
- // Invoke the function.
+ // Call the function.
Label call_proxy;
ParameterCount actual(r0);
__ SmiUntag(r0);
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &call_proxy);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ add(sp, sp, Operand(3 * kPointerSize));
__ Jump(lr);
- // Invoke the function proxy.
+ // Call the function proxy.
__ bind(&call_proxy);
__ push(r1); // add function proxy as last argument
__ add(r0, r0, Operand(1));
__ mov(r2, Operand::Zero());
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+ __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -1355,8 +1338,11 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(3 * kPointerSize));
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
+ (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
+ fp.bit() | lr.bit());
+ __ add(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -1366,7 +1352,8 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ ldr(r1, MemOperand(fp, -3 * kPointerSize));
+ __ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
@@ -1379,13 +1366,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r0 : actual number of arguments
// -- r1 : function (passed through to callee)
// -- r2 : expected number of arguments
- // -- r3 : code entry to call
- // -- r5 : call kind information
// -----------------------------------
Label invoke, dont_adapt_arguments;
Label enough, too_few;
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ cmp(r0, r2);
__ b(lt, &too_few);
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -1453,7 +1439,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: code entry to call
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
- __ sub(r2, r2, Operand(4 * kPointerSize)); // Adjust for frame.
+ // Adjust for frame.
+ __ sub(r2, r2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ 2 * kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 9330eb1411..501b5c7dff 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -49,6 +49,16 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
}
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -77,7 +87,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
}
@@ -95,8 +105,8 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { r2, r3 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -113,6 +123,28 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r2, r1, r0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -169,18 +201,6 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
}
-void BinaryOpStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r1, r0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
-}
-
-
static void InitializeArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
@@ -188,15 +208,22 @@ static void InitializeArrayConstructorDescriptor(
// register state
// r0 -- number of arguments
// r1 -- function
- // r2 -- type info cell with elements kind
- static Register registers[] = { r1, r2 };
- descriptor->register_param_count_ = 2;
- if (constant_stack_parameter_count != 0) {
+ // r2 -- allocation site with elements kind
+ static Register registers_variable_args[] = { r1, r2, r0 };
+ static Register registers_no_args[] = { r1, r2 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
descriptor->stack_parameter_count_ = r0;
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
@@ -210,15 +237,21 @@ static void InitializeInternalArrayConstructorDescriptor(
// register state
// r0 -- number of arguments
// r1 -- constructor function
- static Register registers[] = { r1 };
- descriptor->register_param_count_ = 1;
+ static Register registers_variable_args[] = { r1, r0 };
+ static Register registers_no_args[] = { r1 };
- if (constant_stack_parameter_count != 0) {
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
descriptor->stack_parameter_count_ = r0;
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
@@ -302,6 +335,135 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r2, r1, r0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ static PlatformCallInterfaceDescriptor default_descriptor =
+ PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ static PlatformCallInterfaceDescriptor noInlineDescriptor =
+ PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { r1, // JSFunction
+ cp, // context
+ r0, // actual number of arguments
+ r2, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { cp, // context
+ r2, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { cp, // context
+ r2, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { cp, // context
+ r0, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { r0, // callee
+ r4, // call_data
+ r2, // holder
+ r1, // api_function_address
+ cp, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -343,110 +505,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Set up the object header.
- __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(r1, Operand(Smi::FromInt(0)));
- __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ pop();
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(r3, &after_sentinel);
- if (FLAG_debug_code) {
- __ cmp(r3, Operand::Zero());
- __ Assert(eq, kExpected0AsASmiSentinel);
- }
- __ ldr(r3, GlobalObjectOperand());
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
- __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
- __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
- __ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
@@ -544,20 +602,20 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
Register result_reg = destination();
+ ASSERT(is_truncating());
int double_offset = offset();
// Account for saved regs if input is sp.
- if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
+ if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
- // Immediate values for this stub fit in instructions, so it's safe to use ip.
- Register scratch = ip;
+ Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
Register scratch_low =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
Register scratch_high =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
LowDwVfpRegister double_scratch = kScratchDoubleReg;
- __ Push(scratch_high, scratch_low);
+ __ Push(scratch_high, scratch_low, scratch);
if (!skip_fastpath()) {
// Load double input.
@@ -640,32 +698,17 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ bind(&done);
- __ Pop(scratch_high, scratch_low);
+ __ Pop(scratch_high, scratch_low, scratch);
__ Ret();
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
- return true;
- }
- if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
+ stub2.GetCode(isolate);
}
@@ -1198,241 +1241,16 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Untagged case: double input in d2, double result goes
- // into d2.
- // Tagged case: tagged input on top of stack and in r0,
- // tagged result (heap number) goes into r0.
-
- Label input_not_smi;
- Label loaded;
- Label calculate;
- Label invalid_cache;
- const Register scratch0 = r9;
- Register scratch1 = no_reg; // will be r4
- const Register cache_entry = r0;
- const bool tagged = (argument_type_ == TAGGED);
-
- if (tagged) {
- // Argument is a number and is on stack and in r0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(r0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into r2, r3.
- __ SmiToDouble(d7, r0);
- __ vmov(r2, r3, d7);
- __ b(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(r0,
- r1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Load it to a double register and store the
- // low and high words into r2, r3.
- __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(r2, r3, d0);
- } else {
- // Input is untagged double in d2. Output goes to d2.
- __ vmov(r2, r3, d2);
- }
- __ bind(&loaded);
- // r2 = low 32 bits of double value
- // r3 = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ eor(r1, r2, Operand(r3));
- __ eor(r1, r1, Operand(r1, ASR, 16));
- __ eor(r1, r1, Operand(r1, ASR, 8));
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // r2 = low 32 bits of double value.
- // r3 = high 32 bits of double value.
- // r1 = TranscendentalCache::hash(double value).
- Isolate* isolate = masm->isolate();
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(isolate);
- __ mov(cache_entry, Operand(cache_array));
- // cache_entry points to cache array.
- int cache_array_index
- = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
- __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
- // r0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ cmp(cache_entry, Operand::Zero());
- __ b(eq, &invalid_cache);
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
- __ add(r1, r1, Operand(r1, LSL, 1));
- __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
- __ cmp(r2, r4);
- __ cmp(r3, r5, eq);
- __ b(ne, &calculate);
-
- scratch1 = r4; // Start of scratch1 range.
-
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into r0.
- __ pop();
- __ mov(r0, Operand(r6));
- } else {
- // Load result into d2.
- __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- }
- __ Ret();
-
- __ bind(&calculate);
- __ IncrementCounter(
- counters->transcendental_cache_miss(), 1, scratch0, scratch1);
- if (tagged) {
- __ bind(&invalid_cache);
- ExternalReference runtime_function =
- ExternalReference(RuntimeFunction(), masm->isolate());
- __ TailCallExternalReference(runtime_function, 1, 1);
- } else {
- Label no_update;
- Label skip_cache;
-
- // Call C function to calculate the result and update the cache.
- // r0: precalculated cache entry address.
- // r2 and r3: parts of the double value.
- // Store r0, r2 and r3 on stack for later before calling C function.
- __ Push(r3, r2, cache_entry);
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(d2);
-
- // Try to update the cache. If we cannot allocate a
- // heap number, we return the result without updating.
- __ Pop(r3, r2, cache_entry);
- __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
- __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
- __ Ret();
-
- __ bind(&invalid_cache);
- // The cache is invalid. Call runtime which will recreate the
- // cache.
- __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
- __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ Ret();
-
- __ bind(&skip_cache);
- // Call C function to calculate the result and answer directly
- // without updating the cache.
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(d2);
- __ bind(&no_update);
-
- // We return the value in d2 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ mov(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-}
-
-
-void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
- Register scratch) {
- Isolate* isolate = masm->isolate();
-
- __ push(lr);
- __ PrepareCallCFunction(0, 1, scratch);
- if (masm->use_eabi_hardfloat()) {
- __ vmov(d0, d2);
- } else {
- __ vmov(r0, r1, d2);
- }
- AllowExternalCallThatCantCauseGC scope(masm);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::COS:
- __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::TAN:
- __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::LOG:
- __ CallCFunction(ExternalReference::math_log_double_function(isolate),
- 0, 1);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- __ pop(lr);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = r1;
const Register exponent = r2;
const Register heapnumbermap = r5;
const Register heapnumber = r0;
- const DwVfpRegister double_base = d1;
- const DwVfpRegister double_exponent = d2;
- const DwVfpRegister double_result = d3;
- const DwVfpRegister double_scratch = d0;
- const SwVfpRegister single_scratch = s0;
+ const DwVfpRegister double_base = d0;
+ const DwVfpRegister double_exponent = d1;
+ const DwVfpRegister double_result = d2;
+ const DwVfpRegister double_scratch = d3;
+ const SwVfpRegister single_scratch = s6;
const Register scratch = r9;
const Register scratch2 = r4;
@@ -1532,13 +1350,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0, 2);
}
__ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ jmp(&done);
__ bind(&int_exponent_convert);
@@ -1606,13 +1424,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0, 2);
}
__ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
@@ -1626,21 +1444,15 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated(Isolate* isolate) {
- return (!save_doubles_ || isolate->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- BinaryOpStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -1659,16 +1471,13 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
store_buffer_overflow_code = *stub.GetCode(isolate);
}
- save_doubles_code->set_is_pregenerated(true);
- store_buffer_overflow_code->set_is_pregenerated(true);
isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
+ stub.GetCode(isolate);
}
@@ -1946,13 +1755,18 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r4: argv
Isolate* isolate = masm->isolate();
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ mov(r8, Operand(Smi::FromInt(marker)));
+ if (FLAG_enable_ool_constant_pool) {
+ __ mov(r8, Operand(Smi::FromInt(marker)));
+ }
+ __ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
__ mov(r5,
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
__ ldr(r5, MemOperand(r5));
__ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- __ Push(ip, r8, r6, r5);
+ __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
+ (FLAG_enable_ool_constant_pool ? r8.bit() : 0) |
+ ip.bit());
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -2536,7 +2350,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
- __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ cmp(r1, Operand::Zero());
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
@@ -2587,7 +2401,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
__ add(r6, r1, Operand(Smi::FromInt(2)));
__ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
__ add(r6, r4, Operand(r1, LSL, 1));
__ add(r6, r6, Operand(kParameterMapHeaderSize));
__ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
@@ -2981,7 +2795,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
- __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@@ -2992,12 +2806,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 4, r3: End of string data
// Argument 3, r2: Start of string data
// Prepare start and end index of the input.
- __ add(r9, r8, Operand(r9, LSL, r3));
+ __ add(r9, r7, Operand(r9, LSL, r3));
__ add(r2, r9, Operand(r1, LSL, r3));
- __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
- __ SmiUntag(r8);
- __ add(r3, r9, Operand(r8, LSL, r3));
+ __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
+ __ SmiUntag(r7);
+ __ add(r3, r9, Operand(r7, LSL, r3));
// Argument 2 (r1): Previous index.
// Already there
@@ -3190,177 +3004,103 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- Factory* factory = masm->isolate()->factory();
-
- __ ldr(r1, MemOperand(sp, kPointerSize * 2));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ JumpIfNotSmi(r1, &slowcase);
- __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
- __ b(hi, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- // Size of JSArray with two in-object properties and the header of a
- // FixedArray.
- int objects_size =
- (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ SmiUntag(r5, r1);
- __ add(r2, r5, Operand(objects_size));
- __ Allocate(
- r2, // In: Size, in words.
- r0, // Out: Start of allocation (tagged).
- r3, // Scratch register.
- r4, // Scratch register.
- &slowcase,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
- // r0: Start of allocated area, object-tagged.
- // r1: Number of elements in array, as smi.
- // r5: Number of elements, untagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ add(r3, r0, Operand(JSRegExpResult::kSize));
- __ mov(r4, Operand(factory->empty_fixed_array()));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
- __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Set input, index and length fields from arguments.
- __ ldr(r1, MemOperand(sp, kPointerSize * 0));
- __ ldr(r2, MemOperand(sp, kPointerSize * 1));
- __ ldr(r6, MemOperand(sp, kPointerSize * 2));
- __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
- __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
- __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
-
- // Fill out the elements FixedArray.
- // r0: JSArray, tagged.
- // r3: FixedArray, tagged.
- // r5: Number of elements in array, untagged.
-
- // Set map.
- __ mov(r2, Operand(factory->fixed_array_map()));
- __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- // Set FixedArray length.
- __ SmiTag(r6, r5);
- __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with undefined.
- // r0: JSArray, tagged.
- // r2: undefined.
- // r3: Start of elements in FixedArray.
- // r5: Number of elements to fill.
- Label loop;
- __ cmp(r5, Operand::Zero());
- __ bind(&loop);
- __ b(le, &done); // Jump if r5 is negative or zero.
- __ sub(r5, r5, Operand(1), SetCC);
- __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
- __ jmp(&loop);
-
- __ bind(&done);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r0 : number of arguments to the construct function
// r1 : the function to call
- // r2 : cache cell for call target
- Label initialize, done, miss, megamorphic, not_array_function;
+ // r2 : Feedback vector
+ // r3 : slot in feedback vector (Smi)
+ Label check_array, initialize_array, initialize_non_array, megamorphic, done;
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
+ Heap::RootListIndex kMegamorphicRootIndex = Heap::kUndefinedValueRootIndex;
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->the_hole_value());
+ Heap::RootListIndex kUninitializedRootIndex = Heap::kTheHoleValueRootIndex;
+ ASSERT_EQ(*TypeFeedbackInfo::PremonomorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->null_value());
+ Heap::RootListIndex kPremonomorphicRootIndex = Heap::kNullValueRootIndex;
- // Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
+ // Load the cache state into r4.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmp(r3, r1);
+ __ cmp(r4, r1);
+ __ b(eq, &done);
+ __ CompareRoot(r4, kMegamorphicRootIndex);
__ b(eq, &done);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in ecx.
- __ ldr(r5, FieldMemOperand(r3, 0));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &miss);
+ // Check if we're dealing with the Array function or not.
+ __ LoadArrayFunction(r5);
+ __ cmp(r1, r5);
+ __ b(eq, &check_array);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
+ // Non-array cache: Check the cache state.
+ __ CompareRoot(r4, kPremonomorphicRootIndex);
+ __ b(eq, &initialize_non_array);
+ __ CompareRoot(r4, kUninitializedRootIndex);
__ b(ne, &megamorphic);
+
+ // Non-array cache: Uninitialized -> premonomorphic. The sentinel is an
+ // immortal immovable object (null) so no write-barrier is needed.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, kPremonomorphicRootIndex);
+ __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ jmp(&done);
- __ bind(&miss);
+ // Array cache: Check the cache state to see if we're in a monomorphic
+ // state where the state object is an AllocationSite object.
+ __ bind(&check_array);
+ __ ldr(r5, FieldMemOperand(r4, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(eq, &done);
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ b(eq, &initialize);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
+ // Array cache: Uninitialized or premonomorphic -> monomorphic.
+ __ CompareRoot(r4, kUninitializedRootIndex);
+ __ b(eq, &initialize_array);
+ __ CompareRoot(r4, kPremonomorphicRootIndex);
+ __ b(eq, &initialize_array);
+
+ // Both caches: Monomorphic -> megamorphic. The sentinel is an
+ // immortal immovable object (undefined) so no write-barrier is needed.
__ bind(&megamorphic);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, kMegamorphicRootIndex);
+ __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ jmp(&done);
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
- __ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
+ // Array cache: Uninitialized or premonomorphic -> monomorphic.
+ __ bind(&initialize_array);
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Arguments register must be smi-tagged to call out.
__ SmiTag(r0);
- __ push(r0);
- __ push(r1);
- __ push(r2);
+ __ Push(r3, r2, r1, r0);
CreateAllocationSiteStub create_stub;
__ CallStub(&create_stub);
- __ pop(r2);
- __ pop(r1);
- __ pop(r0);
+ __ Pop(r3, r2, r1, r0);
__ SmiUntag(r0);
}
__ b(&done);
- __ bind(&not_array_function);
- __ str(r1, FieldMemOperand(r2, Cell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
+ // Non-array cache: Premonomorphic -> monomorphic.
+ __ bind(&initialize_non_array);
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(r1, MemOperand(r4, 0));
+
+ __ Push(r4, r2, r1);
+ __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(r4, r2, r1);
__ bind(&done);
}
@@ -3368,108 +3108,119 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
// r1 : the function to call
- // r2 : cache cell for call target
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
- // Call as function is indicated with the hole.
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ b(ne, &call);
- // Patch the receiver on the stack with the global receiver object.
- __ ldr(r3,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc_ * kPointerSize));
- __ bind(&call);
- }
+ // r2 : feedback vector
+ // r3 : (only if r2 is not undefined) slot in feedback vector (Smi)
+ Label slow, non_function, wrap, cont;
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(r1, &non_function);
- // Get the map of the function object.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
+ if (NeedsChecks()) {
+ // Check that the function is really a JavaScript function.
+ // r1: pushed function (to be verified)
+ __ JumpIfSmi(r1, &non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ // Goto slow case if we do not have a function.
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
+ __ b(ne, &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
}
// Fast-case: Invoke the function now.
// r1: pushed function
ParameterCount actual(argc_);
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ b(eq, &call_as_function);
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
+ if (CallAsMethod()) {
+ if (NeedsChecks()) {
+ // Do not transform the receiver for strict mode functions.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ b(ne, &cont);
+
+ // Do not transform the receiver for native (Compilerhints already in r3).
+ __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &cont);
+ }
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
+ // Compute the receiver in non-strict mode.
+ __ ldr(r3, MemOperand(sp, argc_ * kPointerSize));
+
+ if (NeedsChecks()) {
+ __ JumpIfSmi(r3, &wrap);
+ __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ b(lt, &wrap);
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
}
- // Check for function proxy.
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, &non_function);
- __ push(r1); // put proxy as additional argument
- __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
- __ mov(r2, Operand::Zero());
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(r5, CALL_AS_METHOD);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (NeedsChecks()) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ if (RecordCallTarget()) {
+ // If there is a call target cache, mark it megamorphic in the
+ // non-function case. MegamorphicSentinel is an immortal immovable
+ // object (undefined) so no write barrier is needed.
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ }
+ // Check for function proxy.
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(ne, &non_function);
+ __ push(r1); // put proxy as additional argument
+ __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
+ __ mov(r2, Operand::Zero());
+ __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ bind(&non_function);
+ __ str(r1, MemOperand(sp, argc_ * kPointerSize));
+ __ mov(r0, Operand(argc_)); // Set up the number of arguments.
+ __ mov(r2, Operand::Zero());
+ __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
}
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ str(r1, MemOperand(sp, argc_ * kPointerSize));
- __ mov(r0, Operand(argc_)); // Set up the number of arguments.
- __ mov(r2, Operand::Zero());
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ if (CallAsMethod()) {
+ __ bind(&wrap);
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(r1);
+ }
+ __ str(r0, MemOperand(sp, argc_ * kPointerSize));
+ __ jmp(&cont);
+ }
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : feedback vector
+ // r3 : (only if r2 is not undefined) slot in feedback vector (Smi)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
@@ -3477,7 +3228,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
// Jump to the function-specific construct stub.
- Register jmp_reg = r3;
+ Register jmp_reg = r4;
__ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3485,20 +3236,19 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
- // r3: object type
+ // r4: object type
Label do_call;
__ bind(&slow);
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
__ bind(&non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand::Zero());
- __ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
@@ -3635,36 +3385,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand::Zero());
- }
- __ b(eq, &done);
-
- __ bind(&loop);
- __ ldrb(scratch, MemOperand(src, 1, PostIndex));
- // Perform sub between load and dependent store to get the load time to
- // complete.
- __ sub(count, count, Operand(1), SetCC);
- __ strb(scratch, MemOperand(dest, 1, PostIndex));
- // last iteration.
- __ b(gt, &loop);
-
- __ bind(&done);
-}
-
-
enum CopyCharactersFlags {
COPY_ASCII = 1,
DEST_ALWAYS_ALIGNED = 2
@@ -3812,143 +3532,6 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
}
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ sub(scratch, c1, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
- __ b(hi, &not_array_index);
- __ sub(scratch, c2, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register
- __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
- __ b(ls, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load string table
- // Load address of first element of the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ ldr(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, 1));
- __ sub(mask, mask, Operand(1));
-
- // Calculate untagged address of the first element of the string table.
- Register first_string_table_element = string_table;
- __ add(first_string_table_element, string_table,
- Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_string_table_element: address of the first element of
- // the string table
- // undefined: the undefined object
- // scratch: -
-
- // Perform a number of probes in the string table.
- const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch5; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- if (i > 0) {
- __ add(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ and_(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ ldr(candidate,
- MemOperand(first_string_table_element,
- candidate,
- LSL,
- kPointerSizeLog2));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
- __ b(ne, &is_string);
-
- __ cmp(undefined, candidate);
- __ b(eq, not_found);
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(ip, candidate);
- __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole);
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // Check that the candidate is a non-external ASCII string. The instance
- // type is still in the scratch register from the CompareObjectType
- // operation.
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ cmp(scratch, Operand(Smi::FromInt(2)));
- __ b(ne, &next_probe[i]);
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
- __ cmp(chars, scratch);
- __ b(eq, &found_in_string_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ Move(r0, result);
-}
-
-
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character) {
@@ -4363,359 +3946,240 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
+void ArrayPushStub::Generate(MacroAssembler* masm) {
+ Register receiver = r0;
+ Register scratch = r1;
- Counters* counters = masm->isolate()->counters();
+ int argc = arguments_count();
- // Stack on entry:
- // sp[0]: second argument (right).
- // sp[4]: first argument (left).
-
- // Load the two arguments.
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- // Otherwise, at least one of the arguments is definitely a string,
- // and we convert the one that is not known to be a string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
- __ JumpIfEitherSmi(r0, r1, &call_runtime);
- // Load instance types.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ tst(r4, Operand(kIsNotStringMask));
- __ tst(r5, Operand(kIsNotStringMask), eq);
- __ b(ne, &call_runtime);
- } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
+ if (argc == 0) {
+ // Nothing to do, just return the length.
+ __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Drop(argc + 1);
+ __ Ret();
+ return;
}
- // Both arguments are strings.
- // r0: first string
- // r1: second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
- __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
- __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
- STATIC_ASSERT(kSmiTag == 0);
- // Else test if second string is empty.
- __ cmp(r3, Operand(Smi::FromInt(0)), ne);
- __ b(ne, &strings_not_empty); // If either string was empty, return r0.
-
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ Isolate* isolate = masm->isolate();
- __ bind(&strings_not_empty);
+ if (argc != 1) {
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
}
- __ SmiUntag(r2);
- __ SmiUntag(r3);
- // Both strings are non-empty.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ add(r6, r2, Operand(r3));
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return a string here.
- __ cmp(r6, Operand(2));
- __ b(ne, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ Label call_builtin, attempt_to_grow_elements, with_write_barrier;
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in r2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode)
- __ mov(r6, Operand(2));
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ Register elements = r6;
+ Register end_elements = r5;
+ // Get the elements array of the object.
+ __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(r6, Operand(ConsString::kMinLength));
- __ b(lt, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ cmp(r6, Operand(String::kMaxLength + 1));
- __ b(hs, &call_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ASCII the result is an ASCII cons string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ scratch,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
}
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r4, Operand(kStringEncodingMask));
- __ tst(r5, Operand(kStringEncodingMask), ne);
- __ b(eq, &non_ascii);
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- Label skip_write_barrier, after_writing;
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(masm->isolate());
- __ mov(r4, Operand(high_promotion_mode));
- __ ldr(r4, MemOperand(r4, 0));
- __ cmp(r4, Operand::Zero());
- __ b(eq, &skip_write_barrier);
-
- __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
- __ RecordWriteField(r3,
- ConsString::kFirstOffset,
- r0,
- r4,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
- __ RecordWriteField(r3,
- ConsString::kSecondOffset,
- r1,
- r4,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- __ jmp(&after_writing);
- __ bind(&skip_write_barrier);
- __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
- __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
+ // Get the array's length into scratch and calculate new length.
+ __ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ add(scratch, scratch, Operand(Smi::FromInt(argc)));
- __ bind(&after_writing);
+ // Get the elements' length.
+ __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ mov(r0, Operand(r3));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ // Check if we could survive without allocation.
+ __ cmp(scratch, r4);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only one byte characters.
- // r4: first instance type.
- // r5: second instance type.
- __ tst(r4, Operand(kOneByteDataHintMask));
- __ tst(r5, Operand(kOneByteDataHintMask), ne);
- __ b(ne, &ascii_data);
- __ eor(r4, r4, Operand(r5));
- STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
- __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
- __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
- __ b(eq, &ascii_data);
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r6: sum of lengths.
- Label first_prepared, second_prepared;
- __ bind(&string_add_flat_result);
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
+ const int kEndElementsOffset =
+ FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- // Check whether both strings have same encoding
- __ eor(ip, r4, Operand(r5));
- ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask));
- __ tst(ip, Operand(kStringEncodingMask));
- __ b(ne, &call_runtime);
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ __ b(gt, &attempt_to_grow_elements);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r4, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r6,
- r0,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
- LeaveCC,
- eq);
- __ b(eq, &first_prepared);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r4, Operand(kShortExternalStringMask));
- __ b(ne, &call_runtime);
- __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
- __ bind(&first_prepared);
+ // Check if value is a smi.
+ __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ JumpIfNotSmi(r4, &with_write_barrier);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r5, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r1,
- r1,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
- LeaveCC,
- eq);
- __ b(eq, &second_prepared);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r5, Operand(kShortExternalStringMask));
- __ b(ne, &call_runtime);
- __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // r6: first character of first string
- // r1: first character of second string
- // r2: length of first string.
- // r3: length of second string.
- // Both strings have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r5, Operand(kStringEncodingMask));
- __ b(eq, &non_ascii_string_add_flat_result);
+ // Store the value.
+ // We may need a register containing the address end_elements below, so
+ // write back the value in end_elements.
+ __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
+ __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+ } else {
+ // Check if we could survive without allocation.
+ __ cmp(scratch, r4);
+ __ b(gt, &call_builtin);
- __ add(r2, r2, Operand(r3));
- __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime);
- __ sub(r2, r2, Operand(r3));
- __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // r0: result string.
- // r6: first character of first string.
- // r1: first character of second string.
- // r2: length of first string.
- // r3: length of second string.
- // r5: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true);
- // r5: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
+ __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ StoreNumberToDoubleElements(r4, scratch, elements, r5, d0,
+ &call_builtin, argc * kDoubleSize);
+ }
+
+ // Save new length.
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Drop(argc + 1);
+ __ mov(r0, scratch);
__ Ret();
- __ bind(&non_ascii_string_add_flat_result);
- __ add(r2, r2, Operand(r3));
- __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime);
- __ sub(r2, r2, Operand(r3));
- __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // r0: result string.
- // r6: first character of first string.
- // r1: first character of second string.
- // r2: length of first string.
- // r3: length of second string.
- // r5: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false);
- // r5: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
+ if (IsFastDoubleElementsKind(elements_kind())) {
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ __ bind(&with_write_barrier);
+
+ if (IsFastSmiElementsKind(elements_kind())) {
+ if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
+
+ __ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r9, ip);
+ __ b(eq, &call_builtin);
+
+ ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
+ ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
+ __ ldr(r3, ContextOperand(r3, Context::JS_ARRAY_MAPS_INDEX));
+ const int header_size = FixedArrayBase::kHeaderSize;
+ // Verify that the object can be transitioned in place.
+ const int origin_offset = header_size + elements_kind() * kPointerSize;
+ __ ldr(r2, FieldMemOperand(receiver, origin_offset));
+ __ ldr(ip, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ cmp(r2, ip);
+ __ b(ne, &call_builtin);
+
+ const int target_offset = header_size + target_kind * kPointerSize;
+ __ ldr(r3, FieldMemOperand(r3, target_offset));
+ __ mov(r2, receiver);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, DONT_TRACK_ALLOCATION_SITE, NULL);
+ }
+
+ // Save new length.
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Store the value.
+ // We may need a register containing the address end_elements below, so write
+ // back the value in end_elements.
+ __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
+ __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+
+ __ RecordWrite(elements,
+ end_elements,
+ r4,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Drop(argc + 1);
+ __ mov(r0, scratch);
__ Ret();
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ __ bind(&attempt_to_grow_elements);
+ // scratch: array's length + 1.
- if (call_builtin.is_linked()) {
+ if (!FLAG_inline_new) {
__ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
}
-}
+ __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
+ // Growing elements that are SMI-only requires special handling in case the
+ // new element is non-Smi. For now, delegate to the builtin.
+ if (IsFastSmiElementsKind(elements_kind())) {
+ __ JumpIfNotSmi(r2, &call_builtin);
+ }
-void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(r0);
- __ push(r1);
-}
+ // We could be lucky and the elements array could be at the top of new-space.
+ // In this case we can just grow it in place by moving the allocation pointer
+ // up.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate);
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate);
+
+ const int kAllocationDelta = 4;
+ ASSERT(kAllocationDelta >= argc);
+ // Load top and check if it is the end of elements.
+ __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
+ __ add(end_elements, end_elements, Operand(kEndElementsOffset));
+ __ mov(r4, Operand(new_space_allocation_top));
+ __ ldr(r3, MemOperand(r4));
+ __ cmp(end_elements, r3);
+ __ b(ne, &call_builtin);
+
+ __ mov(r9, Operand(new_space_allocation_limit));
+ __ ldr(r9, MemOperand(r9));
+ __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
+ __ cmp(r3, r9);
+ __ b(hi, &call_builtin);
+
+ // We fit and could grow elements.
+ // Update new_space_allocation_top.
+ __ str(r3, MemOperand(r4));
+ // Push the argument.
+ __ str(r2, MemOperand(end_elements));
+ // Fill the rest with holes.
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < kAllocationDelta; i++) {
+ __ str(r3, MemOperand(end_elements, i * kPointerSize));
+ }
+ // Update elements' and array's sizes.
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
+ __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
- __ pop(r1);
- __ pop(r0);
+ // Elements are in new space, so write barrier is not required.
+ __ Drop(argc + 1);
+ __ mov(r0, scratch);
+ __ Ret();
+
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
}
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
- __ b(lt, &done);
-
- // Check the number to string cache.
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
- __ mov(arg, scratch1);
- __ str(arg, MemOperand(sp, stack_offset));
- __ bind(&done);
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r1 : left
+ // -- r0 : right
+ // -- lr : return address
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ // Load r2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ Move(r2, handle(isolate->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, kExpectedAllocationSite);
+ __ push(r2);
+ __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
+ __ cmp(r2, ip);
+ __ pop(r2);
+ __ Assert(eq, kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(state_);
+ __ TailCallStub(&stub);
}
@@ -5021,8 +4485,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
- __ push(lr);
- __ Push(r1, r0);
+ __ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
__ push(ip);
__ CallExternalReference(miss, 3);
@@ -5030,8 +4493,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
__ pop(lr);
- __ pop(r0);
- __ pop(r1);
+ __ Pop(r1, r0);
}
__ Jump(r2);
@@ -5305,89 +4767,13 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(r6), REG(r4), REG(r3), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
- // StoreArrayLiteralElementStub::Generate
- { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
- // FastNewClosureStub::Generate
- { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
- // StringAddStub::Generate
- { REG(r3), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-
-bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
// Hydrogen code stubs need stub2 at snapshot time.
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
+ stub2.GetCode(isolate);
}
@@ -5678,7 +5064,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
- AllowStubCallsScope allow_stub_calls(masm, true);
ProfileEntryHookStub stub;
__ push(lr);
__ CallStub(&stub);
@@ -5754,21 +5139,16 @@ template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(),
- CONTEXT_CHECK_REQUIRED,
- mode);
+ T stub(GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
- Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
- __ b(ne, &next);
T stub(kind);
- __ TailCallStub(&stub);
- __ bind(&next);
+ __ TailCallStub(&stub, eq);
}
// If we reached this point there is a problem.
@@ -5781,7 +5161,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- // r2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// r0 - number of arguments
// r1 - constructor?
@@ -5810,45 +5190,40 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind holey_initial = GetHoleyElementsKind(initial);
ArraySingleArgumentConstructorStub stub_holey(holey_initial,
- CONTEXT_CHECK_REQUIRED,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
ArraySingleArgumentConstructorStub stub(initial,
- CONTEXT_CHECK_REQUIRED,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ add(r3, r3, Operand(1));
- __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
if (FLAG_debug_code) {
- __ ldr(r5, FieldMemOperand(r5, 0));
+ __ ldr(r5, FieldMemOperand(r2, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSiteInCell);
- __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+ __ Assert(eq, kExpectedAllocationSite);
}
- // Save the resulting elements kind in type info
- __ SmiTag(r3);
- __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
- __ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
- __ SmiUntag(r3);
+ // Save the resulting elements kind in type info. We can't just store r3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
+ __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
+ __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
- Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
- __ b(ne, &next);
ArraySingleArgumentConstructorStub stub(kind);
- __ TailCallStub(&stub);
- __ bind(&next);
+ __ TailCallStub(&stub, eq);
}
// If we reached this point there is a problem.
@@ -5861,20 +5236,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- ElementsKind initial_kind = GetInitialFastElementsKind();
- ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
-
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
- (!FLAG_track_allocation_sites &&
- (kind == initial_kind || kind == initial_holey_kind))) {
- T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode(isolate);
}
}
}
@@ -5896,11 +5266,11 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ stubh1.GetCode(isolate);
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ stubh2.GetCode(isolate);
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ stubh3.GetCode(isolate);
}
}
@@ -5937,7 +5307,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc (only if argument_count_ == ANY)
// -- r1 : constructor
- // -- r2 : type info cell
+ // -- r2 : feedback vector (fixed array or undefined)
+ // -- r3 : slot index (if r2 is fixed array)
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
@@ -5946,21 +5317,25 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ tst(r3, Operand(kSmiTagMask));
+ __ tst(r4, Operand(kSmiTagMask));
__ Assert(ne, kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(r3, r3, r4, MAP_TYPE);
+ __ CompareObjectType(r4, r4, r5, MAP_TYPE);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in ebx or a valid cell
+ // We should either have undefined in ebx or a valid fixed array.
Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
+ Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &okay_here);
- __ ldr(r3, FieldMemOperand(r2, 0));
- __ cmp(r3, Operand(cell_map));
- __ Assert(eq, kExpectedPropertyCellInRegisterEbx);
+ __ ldr(r4, FieldMemOperand(r2, 0));
+ __ cmp(r4, Operand(fixed_array_map));
+ __ Assert(eq, kExpectedFixedArrayInRegisterR2);
+
+ // r3 should be a smi if we don't have undefined in r2
+ __ AssertSmi(r3);
+
__ bind(&okay_here);
}
@@ -5968,16 +5343,19 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Get the elements kind and case on that.
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
- __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
+ __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r2, FieldMemOperand(r2, FixedArray::kHeaderSize));
- // If the type cell is undefined, or contains anything other than an
+ // If the feedback vector is undefined, or contains anything other than an
// AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ ldr(r4, FieldMemOperand(r3, 0));
+ __ ldr(r4, FieldMemOperand(r2, 0));
__ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &no_info);
- __ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset));
+ __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
@@ -5987,37 +5365,27 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
void InternalArrayConstructorStub::GenerateCase(
MacroAssembler* masm, ElementsKind kind) {
- Label not_zero_case, not_one_case;
- Label normal_sequence;
+ __ cmp(r0, Operand(1));
- __ tst(r0, r0);
- __ b(ne, &not_zero_case);
InternalArrayNoArgumentConstructorStub stub0(kind);
- __ TailCallStub(&stub0);
+ __ TailCallStub(&stub0, lo);
- __ bind(&not_zero_case);
- __ cmp(r0, Operand(1));
- __ b(gt, &not_one_case);
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN, hi);
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument
__ ldr(r3, MemOperand(sp, 0));
__ cmp(r3, Operand::Zero());
- __ b(eq, &normal_sequence);
InternalArraySingleArgumentConstructorStub
stub1_holey(GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey);
+ __ TailCallStub(&stub1_holey, ne);
}
- __ bind(&normal_sequence);
InternalArraySingleArgumentConstructorStub stub1(kind);
__ TailCallStub(&stub1);
-
- __ bind(&not_one_case);
- InternalArrayNArgumentsConstructorStub stubN(kind);
- __ TailCallStub(&stubN);
}
@@ -6070,6 +5438,160 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : callee
+ // -- r4 : call_data
+ // -- r2 : holder
+ // -- r1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1)* 4] : first argument
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ Register callee = r0;
+ Register call_data = r4;
+ Register holder = r2;
+ Register api_function_address = r1;
+ Register context = cp;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ Isolate* isolate = masm->isolate();
+
+ // context save
+ __ push(context);
+ // load context from callee
+ __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ // callee
+ __ push(callee);
+
+ // call data
+ __ push(call_data);
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // return value
+ __ push(scratch);
+ // return value default
+ __ push(scratch);
+ // isolate
+ __ mov(scratch,
+ Operand(ExternalReference::isolate_address(isolate)));
+ __ push(scratch);
+ // holder
+ __ push(holder);
+
+ // Prepare arguments.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ ASSERT(!api_function_address.is(r0) && !scratch.is(r0));
+ // r0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ add(r0, sp, Operand(1 * kPointerSize));
+ // FunctionCallbackInfo::implicit_args_
+ __ str(scratch, MemOperand(r0, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ str(ip, MemOperand(r0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ mov(ip, Operand(argc));
+ __ str(ip, MemOperand(r0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ mov(ip, Operand::Zero());
+ __ str(ip, MemOperand(r0, 3 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- r2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = r2;
+
+ __ mov(r0, sp); // r0 = Handle<Name>
+ __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // r1 (internal::Object** args_) as the data.
+ __ str(r1, MemOperand(sp, 1 * kPointerSize));
+ __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference::Type thunk_type =
+ ExternalReference::PROFILING_GETTER_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index c03d8f27ec..7a371f1694 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -37,30 +37,6 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) { }
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
- void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
@@ -68,7 +44,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -82,18 +57,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
// Generate code for copying a large number of characters. This function
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
@@ -109,23 +72,6 @@ class StringHelper : public AllStatic {
int flags);
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
@@ -143,32 +89,6 @@ class StringHelper : public AllStatic {
};
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow);
-
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateRegisterArgsPop(MacroAssembler* masm);
-
- const StringAddFlags flags_;
-};
-
-
class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -231,7 +151,6 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
@@ -279,8 +198,6 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
@@ -508,6 +425,18 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
};
+struct PlatformCallInterfaceDescriptor {
+ explicit PlatformCallInterfaceDescriptor(
+ TargetAddressStorageMode storage_mode)
+ : storage_mode_(storage_mode) { }
+
+ TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+ TargetAddressStorageMode storage_mode_;
+};
+
+
} } // namespace v8::internal
#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 44c331b75f..cfc9dfec4d 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -37,18 +37,6 @@ namespace v8 {
namespace internal {
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- return NULL;
-}
-
-
#define __ masm.
@@ -62,10 +50,10 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &exp;
+ if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
+ if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -359,13 +347,33 @@ OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
}
#endif
-#undef __
+UnaryMathFunction CreateSqrtFunction() {
+#if defined(USE_SIMULATOR)
+ return &std::sqrt;
+#else
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::sqrt;
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-UnaryMathFunction CreateSqrtFunction() {
- return &sqrt;
+ __ MovFromFloatParameter(d0);
+ __ vsqrt(d0, d0);
+ __ MovToFloatResult(d0);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#endif
}
+#undef __
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@@ -836,8 +844,10 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
+#ifdef DEBUG
// add(r0, pc, Operand(-8))
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
+#endif
static byte* GetNoCodeAgeSequence(uint32_t* length) {
// The sequence of instructions that is patched out for aging code is the
@@ -847,11 +857,15 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
byte* byte_sequence = reinterpret_cast<byte*>(sequence);
*length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
if (!initialized) {
- CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
- PredictableCodeSizeScope scope(patcher.masm(), *length);
- patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- patcher.masm()->nop(ip.code());
- patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
+ // Since patcher is a large object, allocate it dynamically when needed,
+ // to avoid overloading the stack in stress conditions.
+ SmartPointer<CodePatcher>
+ patcher(new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength));
+ PredictableCodeSizeScope scope(patcher->masm(), *length);
+ patcher->masm()->PushFixedFrame(r1);
+ patcher->masm()->nop(ip.code());
+ patcher->masm()->add(
+ fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
initialized = true;
}
return byte_sequence;
@@ -896,7 +910,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));
- patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ patcher.masm()->emit_code_stub_address(stub);
}
}
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index ecbe64cbad..0bf7ccadca 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -34,50 +34,9 @@
namespace v8 {
namespace internal {
-// Forward declarations
-class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- explicit CodeGenerator(Isolate* isolate) {
- InitializeAstVisitor(isolate);
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info, const char* kind);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
-
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
class StringCharLoadGenerator : public AllStatic {
public:
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 703613932c..78bb66c49f 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -50,6 +50,9 @@ inline int DecodeConstantPoolLength(int instr) {
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
}
+// Used in code age prologue - ldr(pc, MemOperand(pc, -4))
+const int kCodeAgeJumpInstruction = 0xe51ff004;
+
// Number of registers in normal ARM mode.
const int kNumRegisters = 16;
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index cf531e1292..20c6a5dcce 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -27,8 +27,13 @@
// CPU specific code for arm independent of OS goes here.
#ifdef __arm__
+#ifdef __QNXNTO__
+#include <sys/mman.h> // for cache flushing.
+#undef MAP_TYPE
+#else
#include <sys/syscall.h> // for cache flushing.
#endif
+#endif
#include "v8.h"
@@ -57,13 +62,15 @@ void CPU::FlushICache(void* start, size_t size) {
return;
}
-#if defined (USE_SIMULATOR)
+#if defined(USE_SIMULATOR)
// Not generating ARM instructions for C-code. This means that we are
// building an ARM emulator based target. We should notify the simulator
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
+#elif V8_OS_QNX
+ msync(start, size, MS_SYNC | MS_INVALIDATE_ICACHE);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index efd11069b3..9990bccdcf 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -265,9 +265,10 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
- // -- r2 : cache cell for call target
+ // -- r2 : feedback array
+ // -- r3 : slot in feedback array
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), 0);
}
@@ -286,9 +287,10 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
// -- r1 : constructor function
- // -- r2 : cache cell for call target
+ // -- r2 : feedback array
+ // -- r3 : feedback slot (smi)
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit());
}
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 96d7cb33a8..6031499dbd 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -107,7 +107,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->environment_length();
+ int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(r0.code(), params);
output_frame->SetRegister(r1.code(), handler);
}
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index acffaa3f23..49e4126b32 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1679,6 +1679,14 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"constant pool begin (length %d)",
DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
+ } else if (instruction_bits == kCodeAgeJumpInstruction) {
+ // The code age prologue has a constant immediatly following the jump
+ // instruction.
+ Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
+ DecodeType2(instr);
+ OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ " (0x%08x)", target->InstructionBits());
+ return 2 * Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
case 0:
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index b2071807d2..780b48a8ef 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -42,10 +42,25 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ return pp;
+}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ return pp;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ const int offset = ExitFrameConstants::kConstantPoolOffset;
+ return Memory::Object_at(fp() + offset);
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index 64a718e89f..29000ca3ab 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -64,8 +64,8 @@ const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
- 1 << 7 | // r7 v4 (pp in JavaScript code)
- 1 << 8 | // r8 v5 (cp in JavaScript code)
+ 1 << 7 | // r7 v4 (cp in JavaScript code)
+ 1 << 8 | // r8 v5 (pp in JavaScript code)
kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
@@ -102,14 +102,20 @@ const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = -3 * kPointerSize;
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kFrameSize = FLAG_enable_ool_constant_pool ?
+ 3 * kPointerSize : 2 * kPointerSize;
+
+ static const int kConstantPoolOffset = FLAG_enable_ool_constant_pool ?
+ -3 * kPointerSize : 0;
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = 0 * kPointerSize;
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index c57c785598..2eb5ccf974 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -119,6 +119,7 @@ class JumpPatchSite BASE_EMBEDDED {
// The live registers are:
// o r1: the JS function object being called (i.e., ourselves)
// o cp: our context
+// o pp: our caller's constant pool pointer (if FLAG_enable_ool_constant_pool)
// o fp: our caller's frame pointer
// o sp: stack pointer
// o lr: return address
@@ -129,6 +130,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -143,15 +147,22 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
- __ cmp(r5, Operand::Zero());
+ // Classic mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->is_classic_mode() && !info->is_native()) {
+ Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset), ne);
+ __ ldr(r2, MemOperand(sp, receiver_offset));
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &ok);
+
+ __ ldr(r2, GlobalObjectOperand());
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+
+ __ str(r2, MemOperand(sp, receiver_offset));
+
+ __ bind(&ok);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -162,15 +173,27 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
+ __ LoadConstantPoolPointerRegister();
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
+ // Emit a loop to initialize stack cells for locals when optimizing for
+ // size. Otherwise, unroll the loop for maximum performance.
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ if (FLAG_optimize_for_size && locals_count > 4) {
+ Label loop;
+ __ mov(r2, Operand(locals_count));
+ __ bind(&loop);
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ push(r9);
+ __ b(&loop, ne);
+ } else {
+ for (int i = 0; i < locals_count; i++) {
+ __ push(r9);
+ }
}
}
}
@@ -182,20 +205,22 @@ void FullCodeGenerator::Generate() {
if (heap_slots > 0) {
// Argument to NewContext is the function, which is still in r1.
Comment cmnt(masm_, "[ Allocate context");
- __ push(r1);
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ push(r1);
__ Push(info->scope()->GetScopeInfo());
__ CallRuntime(Runtime::kNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
+ __ push(r1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Context is returned in r0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ mov(cp, r0);
+ __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -323,10 +348,6 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
if (isolate()->IsDebuggerActive()) {
// Detect debug break requests as soon as possible.
reset_value = FLAG_interrupt_budget >> 4;
@@ -344,13 +365,10 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
__ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
@@ -383,53 +401,41 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ b(pl, &ok);
- __ push(r0);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(r2);
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- }
- __ pop(r0);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ b(pl, &ok);
+ __ push(r0);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ pop(r0);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
+ __ bind(&check_exit_codesize);
#endif
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
// TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
PredictableCodeSizeScope predictable(masm_, -1);
__ RecordJSReturn();
- masm_->mov(sp, fp);
- int no_frame_start = masm_->pc_offset();
- masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
- masm_->add(sp, sp, Operand(sp_delta));
- masm_->Jump(lr);
+ int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ add(sp, sp, Operand(sp_delta));
+ __ Jump(lr);
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
@@ -613,12 +619,11 @@ void FullCodeGenerator::StackValueContext::Plug(
Label done;
__ bind(materialize_true);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
__ jmp(&done);
__ bind(materialize_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
__ bind(&done);
+ __ push(ip);
}
@@ -666,7 +671,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
+ CallIC(ic, condition->test_id());
__ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@@ -1027,9 +1032,19 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
+ Label skip;
+ __ b(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &next_test);
+ __ Drop(1);
+ __ jmp(clause->body_target());
+ __ bind(&skip);
+
__ cmp(r0, Operand::Zero());
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
@@ -1062,6 +1077,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1151,13 +1167,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ Move(r1, cell);
- __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ str(r2, FieldMemOperand(r1, Cell::kValueOffset));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ __ Move(r1, FeedbackVector());
+ __ mov(r2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
+ __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1385,11 +1401,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, mode);
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
}
@@ -1467,13 +1482,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object (receiver) in r0.
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallLoadIC(CONTEXTUAL);
context()->Plug(r0);
break;
}
@@ -1481,9 +1495,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1546,12 +1559,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ mov(r1, Operand(var->name()));
__ Push(cp, r1); // Context and name.
__ CallRuntime(Runtime::kLoadContextSlot, 2);
@@ -1597,9 +1610,8 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ jmp(&allocated);
__ bind(&runtime_allocate);
- __ push(r5);
__ mov(r0, Operand(Smi::FromInt(size)));
- __ push(r0);
+ __ Push(r5, r0);
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ pop(r5);
@@ -1625,6 +1637,8 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
@@ -1681,10 +1695,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->value()));
__ ldr(r1, MemOperand(sp));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1758,6 +1769,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
@@ -1768,6 +1784,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
@@ -1776,29 +1799,24 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
__ IncrementCounter(
isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
- } else if (expr->depth() > 1) {
- __ Push(r3, r2, r1);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ Push(r3, r2, r1);
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ mov(r0, Operand(Smi::FromInt(flags)));
+ __ Push(r3, r2, r1, r0);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
if (has_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
@@ -2034,10 +2052,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
- __ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw"
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ push(r3); // iter
- __ push(r0); // exception
+ __ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw"
+ __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(r2, r3, r0); // "throw", iter, except
__ jmp(&l_call);
// try { received = %yield result }
@@ -2064,30 +2081,36 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
kLRHasBeenSaved, kDontSaveFPRegs);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ pop(r0); // result
+ __ pop(r0); // result
EmitReturnSequence();
- __ bind(&l_resume); // received in r0
+ __ bind(&l_resume); // received in r0
__ PopTryHandler();
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
- __ LoadRoot(r2, Heap::knext_stringRootIndex); // "next"
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ push(r3); // iter
- __ push(r0); // received
+ __ LoadRoot(r2, Heap::knext_stringRootIndex); // "next"
+ __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(r2, r3, r0); // "next", iter, received
// result = receiver[f](arg);
__ bind(&l_call);
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
- CallIC(ic);
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ mov(r1, r0);
+ __ str(r1, MemOperand(sp, 2 * kPointerSize));
+ CallFunctionStub stub(1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
// if (!result.done) goto l_try;
__ bind(&l_loop);
__ push(r0); // save result
__ LoadRoot(r2, Heap::kdone_stringRootIndex); // "done"
- Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(done_ic); // result.done in r0
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in r0
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ cmp(r0, Operand(0));
@@ -2096,8 +2119,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result.value
__ pop(r0); // result
__ LoadRoot(r2, Heap::kvalue_stringRootIndex); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in r0
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in r0
context()->DropAndPlug(2, r0); // drop iter and g
break;
}
@@ -2109,19 +2131,21 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in r0, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. r1
- // will hold the generator object until the activation has been resumed.
+ // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // r1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
__ pop(r1);
// Check generator state.
- Label wrong_state, done;
+ Label wrong_state, closed_state, done;
__ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
__ cmp(r3, Operand(Smi::FromInt(0)));
- __ b(le, &wrong_state);
+ __ b(eq, &closed_state);
+ __ b(lt, &wrong_state);
// Load suspended function and context.
__ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
@@ -2150,11 +2174,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ bl(&resume_frame);
__ jmp(&done);
__ bind(&resume_frame);
- __ push(lr); // Return address.
- __ push(fp); // Caller's frame pointer.
- __ mov(fp, sp);
- __ push(cp); // Callee's context.
- __ push(r4); // Callee's JS Function.
+ // lr = return address.
+ // fp = caller's frame pointer.
+ // pp = caller's constant pool (if FLAG_enable_ool_constant_pool),
+ // cp = callee's context,
+ // r4 = callee's JS function.
+ __ PushFixedFrame(r4);
+ // Adjust FP to point to saved FP.
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Load the operand stack size.
__ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
@@ -2186,13 +2213,28 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(r2);
__ b(&push_operand_holes);
__ bind(&call_resume);
- __ push(r1);
- __ push(result_register());
+ ASSERT(!result_register().is(r1));
+ __ Push(r1, result_register());
__ Push(Smi::FromInt(resume_mode));
__ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ push(r2);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(r0);
+ __ CallRuntime(Runtime::kThrow, 1);
+ }
+ __ jmp(&done);
+
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(r1);
@@ -2244,8 +2286,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->value()));
// Call load IC. It has arguments receiver and property name r0 and r2.
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
@@ -2253,7 +2294,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2279,16 +2320,14 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ BinaryOpICStub stub(op, mode);
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
__ bind(&smi_case);
// Smi case. This code works the same way as the smi-smi case in the type
// recording binary operation stub, see
- // BinaryOpStub::GenerateSmiSmiOperation for comments.
switch (op) {
case Token::SAR:
__ GetLeastBitsFromSmi(scratch1, right, 5);
@@ -2357,10 +2396,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(r1);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@@ -2398,10 +2436,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2409,8 +2444,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(r1, r0);
- __ pop(r2);
- __ pop(r0); // Restore value.
+ __ Pop(r0, r2); // r0 = restored value.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
@@ -2422,48 +2456,60 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ str(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ mov(r3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, LanguageMode mode) {
+ __ push(r0); // Value.
+ __ mov(r1, Operand(name));
+ __ mov(r0, Operand(Smi::FromInt(mode)));
+ __ Push(cp, r1, r0); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallStoreIC();
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ ldr(r1, StackOperand(var));
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
- __ b(ne, &skip);
- __ str(result_register(), StackOperand(var));
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ if (var->IsLookupSlot()) {
__ push(r0);
__ mov(r0, Operand(var->name()));
__ Push(cp, r0); // Context and name.
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, r1);
+ __ ldr(r2, location);
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), language_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2476,20 +2522,16 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
- __ str(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(r3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
} else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), language_mode());
+ } else {
+ ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r1);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2497,21 +2539,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ str(r0, location);
- if (var->IsContextSlot()) {
- __ mov(r3, r0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2529,10 +2557,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2544,13 +2569,12 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ pop(r1); // Key.
- __ pop(r2);
+ __ Pop(r2, r1); // r1 = key.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2577,73 +2601,109 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
ic_total_count_++;
// All calls must have a predictable size in full-codegen code to ensure that
// the debugger can patch them correctly.
- __ Call(code, rmode, ast_id, al, NEVER_INLINE_TARGET_ADDRESS);
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id, al,
+ NEVER_INLINE_TARGET_ADDRESS);
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithIC(Call* expr) {
+ Expression* callee = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
+
+ CallFunctionFlags flags;
+ // Get the target function.
+ if (callee->IsVariableProxy()) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a classic mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ flags = NO_CALL_FUNCTION_FLAGS;
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ ldr(r0, MemOperand(sp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ ldr(ip, MemOperand(sp, 0));
+ __ push(ip);
+ __ str(r0, MemOperand(sp, kPointerSize));
+ flags = CALL_AS_METHOD;
+ }
+
+ // Load the arguments.
{ PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
- __ mov(r2, Operand(name));
}
+
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
+ CallFunctionStub stub(arg_count, flags);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
RecordJSReturnSite(expr);
+
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(r0);
+
+ context()->DropAndPlug(1, r0);
}
+// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(r1);
- __ push(r0);
- __ push(r1);
-
- // Code common for calls using the IC.
+ Expression* callee = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
+
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ ldr(r1, MemOperand(sp, 0));
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ ldr(ip, MemOperand(sp, 0));
+ __ push(ip);
+ __ str(r0, MemOperand(sp, kPointerSize));
+
{ PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
}
+
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
+ CallFunctionStub stub(arg_count, CALL_AS_METHOD);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0); // Drop the key still on the stack.
+
+ context()->DropAndPlug(1, r0);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2655,17 +2715,16 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(r2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ Move(r2, FeedbackVector());
+ __ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
- CallFunctionStub stub(arg_count, flags);
+ // Record call targets in unoptimized code.
+ CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2674,27 +2733,25 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
+ // r4: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ __ ldr(r4, MemOperand(sp, arg_count * kPointerSize));
} else {
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
}
- __ push(r1);
- // Push the receiver of the enclosing function.
+ // r3: the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
- __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
- __ push(r1);
- // Push the language mode.
- __ mov(r1, Operand(Smi::FromInt(language_mode())));
- __ push(r1);
+ __ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
+
+ // r2: the language mode.
+ __ mov(r2, Operand(Smi::FromInt(language_mode())));
- // Push the start position of the scope the calls resides in.
+ // r1: the start position of the scope the calls resides in.
__ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
- __ push(r1);
// Do the runtime call.
+ __ Push(r4, r3, r2, r1);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
@@ -2708,10 +2765,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
+ Call::CallType call_type = expr->GetCallType(isolate());
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@@ -2743,20 +2799,19 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ ldr(r0, GlobalObjectOperand());
- __ push(r0);
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2768,9 +2823,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in r0)
// and the object holding it (returned in edx).
- __ push(context_register());
+ ASSERT(!context_register().is(r2));
__ mov(r2, Operand(proxy->name()));
- __ push(r2);
+ __ Push(context_register(), r2);
__ CallRuntime(Runtime::kLoadContextSlot, 2);
__ Push(r0, r1); // Function, receiver.
@@ -2785,37 +2840,34 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ push(r0);
// The receiver is implicitly the global receiver. Indicate this
// by passing the hole to the call function stub.
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ push(r1);
__ bind(&call);
}
// The receiver is either the global receiver or an object found
- // by LoadContextSlot. That object could be the hole if the
- // receiver is implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
+ // by LoadContextSlot.
+ EmitCallWithStub(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
}
if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->value(),
- RelocInfo::CODE_TARGET);
+ EmitCallWithIC(expr);
} else {
EmitKeyedCallWithIC(expr, property->key());
}
} else {
+ ASSERT(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
- // Load global receiver object.
- __ ldr(r1, GlobalObjectOperand());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ push(r1);
// Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ EmitCallWithStub(expr);
}
#ifdef DEBUG
@@ -2853,10 +2905,10 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(r2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ __ Move(r2, FeedbackVector());
+ __ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -3097,6 +3149,32 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ cmp(r2, Operand(0x80000000));
+ __ cmp(r1, Operand(0x00000000), eq);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -3156,14 +3234,11 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &check_frame_marker);
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset), eq);
// Check the marker in the calling frame.
- __ bind(&check_frame_marker);
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -3214,7 +3289,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
ASSERT(expr->arguments()->length() == 0);
- Label exit;
+
// Get the number of formal parameters.
__ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -3222,13 +3297,11 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &exit);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame.
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset), eq);
- __ bind(&exit);
context()->Plug(r0);
}
@@ -3316,50 +3389,6 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r4, Operand(r0));
-
- __ bind(&heapnumber_allocated);
-
- // Convert 32 random bits in r0 to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ PrepareCallCFunction(1, r0);
- __ ldr(r0,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand::Zero());
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
- __ sub(r0, r4, Operand(kHeapObjectTag));
- __ vstr(d7, r0, HeapNumber::kValueOffset);
- __ mov(r0, r4);
-
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
@@ -3397,8 +3426,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
__ JumpIfSmi(r0, &done);
// If the object is not a value type, return the object.
__ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
- __ b(ne, &done);
- __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
+ __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset), eq);
__ bind(&done);
context()->Plug(r0);
@@ -3452,31 +3480,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask) {
- __ SmiTst(index);
- __ Check(eq, kNonSmiIndex);
- __ SmiTst(value);
- __ Check(eq, kNonSmiValue);
-
- __ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
- __ cmp(index, ip);
- __ Check(lt, kIndexIsTooLarge);
-
- __ cmp(index, Operand(Smi::FromInt(0)));
- __ Check(ge, kIndexIsNegative);
-
- __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
-
- __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
- __ cmp(ip, Operand(encoding_mask));
- __ Check(eq, kUnexpectedStringType);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
@@ -3487,13 +3490,18 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(value);
- __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(index, value);
if (FLAG_debug_code) {
+ __ SmiTst(value);
+ __ Check(eq, kNonSmiValue);
+ __ SmiTst(index);
+ __ Check(eq, kNonSmiIndex);
+ __ SmiUntag(index, index);
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ SmiTag(index, index);
}
__ SmiUntag(value, value);
@@ -3515,13 +3523,18 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(value);
- __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(index, value);
if (FLAG_debug_code) {
+ __ SmiTst(value);
+ __ Check(eq, kNonSmiValue);
+ __ SmiTst(index);
+ __ Check(eq, kNonSmiIndex);
+ __ SmiUntag(index, index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ SmiTag(index, index);
}
__ SmiUntag(value, value);
@@ -3701,9 +3714,10 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(1));
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
+ __ pop(r1);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
__ CallStub(&stub);
context()->Plug(r0);
}
@@ -3721,50 +3735,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
+ // Load the argument on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallStub(&stub);
+ __ CallRuntime(Runtime::kMath_log, 1);
context()->Plug(r0);
}
@@ -3798,8 +3774,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
// InvokeFunction requires the function in r1. Move it in there.
__ mov(r1, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(r1, count, CALL_FUNCTION, NullCallWrapper());
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
@@ -3818,7 +3793,9 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
+ VisitForAccumulatorValue(args->at(2));
+ __ pop(r1);
+ __ pop(r2);
__ CallStub(&stub);
context()->Plug(r0);
}
@@ -3851,7 +3828,6 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Label done, not_found;
- // tmp now holds finger offset as a smi.
__ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
// r2 now holds finger offset as a smi.
__ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -3874,47 +3850,6 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = r0;
- Register left = r1;
- Register tmp = r2;
- Register tmp2 = r3;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmp(left, Operand(right));
- __ b(eq, &ok);
- // Fail if either is a non-HeapObject.
- __ and_(tmp, left, Operand(right));
- __ JumpIfSmi(tmp, &fail);
- __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
- __ b(ne, &fail);
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ cmp(tmp, Operand(tmp2));
- __ b(ne, &fail);
- __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ cmp(tmp, tmp2);
- __ b(eq, &ok);
- __ bind(&fail);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&ok);
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4194,34 +4129,49 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
+ // Push the builtins object as the receiver.
__ ldr(r0, GlobalObjectOperand());
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
__ push(r0);
- }
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function.
+ // Load the function from the receiver.
__ mov(r2, Operand(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ ldr(ip, MemOperand(sp, 0));
+ __ push(ip);
+ __ str(r0, MemOperand(sp, kPointerSize));
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, r0);
} else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
+ context()->Plug(r0);
}
- context()->Plug(r0);
}
@@ -4260,9 +4210,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ push(context_register());
+ ASSERT(!context_register().is(r2));
__ mov(r2, Operand(var->name()));
- __ push(r2);
+ __ Push(context_register(), r2);
__ CallRuntime(Runtime::kDeleteContextSlot, 2);
context()->Plug(r0);
}
@@ -4393,14 +4343,44 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
if (ShouldInlineSmiCase(expr->op())) {
- __ JumpIfSmi(r0, &no_conversion);
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(r0, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r0);
+ break;
+ case NAMED_PROPERTY:
+ __ str(r0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
+ __ b(vc, &done);
+ // Call stub. Undo operation first.
+ __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
+ __ jmp(&stub_call);
+ __ bind(&slow);
}
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4423,32 +4403,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
- __ b(vs, &stub_call);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(r0, &done);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
- }
+ __ bind(&stub_call);
__ mov(r1, r0);
__ mov(r0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
- BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
+ BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4477,10 +4440,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4492,12 +4452,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ pop(r1); // Key.
- __ pop(r2); // Receiver.
+ __ Pop(r2, r1); // r1 = key. r2 = receiver.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4517,16 +4476,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- CallIC(ic);
+ CallLoadIC(NOT_CONTEXTUAL);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4688,7 +4647,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand::Zero());
@@ -4723,7 +4682,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
__ cmp(r0, Operand(0));
Split(ne, if_true, if_false, fall_through);
}
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index aded4c1dd8..1af6cf87b8 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -104,7 +104,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
}
-// Helper function used from LoadIC/CallIC GenerateNormal.
+// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
@@ -333,320 +333,6 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- r1 : receiver
- // -- r2 : name
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(r1, &number);
- __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
- __ b(ne, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, r1);
- __ b(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
- __ b(hs, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, r1);
- __ b(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &boolean);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss,
- Register scratch) {
- // r1: function
-
- // Check that the value isn't a smi.
- __ JumpIfSmi(r1, miss);
-
- // Check that the value is a JSFunction.
- __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- GenerateNameDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
-
- // r0: elements
- // Search the dictionary - put result in register r1.
- GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
-
- GenerateFunctionTailCall(masm, argc, &miss, r4);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(isolate->counters()->call_miss(), 1, r3, r4);
- } else {
- __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, r3, r4);
- }
-
- // Get the receiver of the function from the stack.
- __ ldr(r3, MemOperand(sp, argc * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ Push(r3, r2);
-
- // Call the entry.
- __ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Move result to r1 and leave the internal frame.
- __ mov(r1, Operand(r0));
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
- __ JumpIfSmi(r2, &invoke);
- __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
- __ b(eq, &global);
- __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(ne, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
- __ str(r2, MemOperand(sp, argc * kPointerSize));
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_name, lookup_monomorphic_cache;
- Label index_smi, index_name;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(r2, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, r0, r3);
-
- __ bind(&do_call);
- // receiver in r1 is not used after this point.
- // r2: key
- // r1: function
- GenerateFunctionTailCall(masm, argc, &slow_call, r0);
-
- __ bind(&check_number_dictionary);
- // r2: key
- // r3: elements map
- // r4: elements
- // Check whether the elements is a number dictionary.
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow_load);
- __ SmiUntag(r0, r2);
- // r0: untagged index
- __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r2); // save the key
- __ Push(r1, r2); // pass the receiver and the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(r2); // restore the key
- }
- __ mov(r1, r0);
- __ jmp(&do_call);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, r2, r0, r3, &index_name, &slow_call);
-
- // The key is known to be a unique name.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &lookup_monomorphic_cache);
-
- GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, r0, r3);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, r0, r3);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor a unique name,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3);
- GenerateMiss(masm, argc);
-
- __ bind(&index_name);
- __ IndexFromHash(r3, r2);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Check if the name is really a name.
- Label miss;
- __ JumpIfSmi(r2, &miss);
- __ IsObjectNameType(r2, r0, &miss);
-
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
@@ -655,9 +341,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@@ -827,7 +511,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(r0, r2);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -856,38 +540,11 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- // Load receiver.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, r1, r2, r3, r4, r5, &notin, &slow);
- __ ldr(r1, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow, r3);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in r3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, r2, r3, r4, &slow);
- __ ldr(r1, unmapped_location);
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ cmp(r1, r3);
- __ b(eq, &slow);
- GenerateFunctionTailCall(masm, argc, &slow, r3);
- __ bind(&slow);
- GenerateMiss(masm, argc);
+ GenerateMiss(masm);
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@@ -900,9 +557,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ Push(r1, r0);
// Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
__ TailCallExternalReference(ref, 2, 1);
}
@@ -1120,7 +776,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -1160,11 +816,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1);
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1175,10 +831,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
@@ -1268,6 +922,21 @@ static void KeyedStoreGenerateGenericHelper(
Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, fast_double);
}
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(scratch_value,
+ MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
+ __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
+ __ b(ne, &holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
+ __ bind(&holecheck_passed1);
+
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(value, &non_smi_value);
@@ -1315,6 +984,20 @@ static void KeyedStoreGenerateGenericHelper(
__ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
__ b(ne, slow);
}
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ __ add(address, elements,
+ Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32))
+ - kHeapObjectTag));
+ __ ldr(scratch_value,
+ MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
+ __ cmp(scratch_value, Operand(kHoleNanUpper32));
+ __ b(ne, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value, key, elements, r3, d0,
&transition_double_elements);
@@ -1403,10 +1086,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
__ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
__ b(ne, &slow);
// Check if the object is a JS array or not.
__ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -1476,8 +1159,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -1486,9 +1168,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, strict_mode,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
@@ -1615,12 +1295,10 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
return;
}
-#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
address, cmp_instruction_address, delta);
}
-#endif
Address patch_address =
cmp_instruction_address - delta * Instruction::kInstrSize;
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 615c835bea..fdf4ddfd80 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -256,7 +256,7 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
+ hydrogen()->type_literal()->ToCString().get(),
true_block_id(), false_block_id());
}
@@ -272,11 +272,23 @@ void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
- stream->Add(" + %d", offset());
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
stream->Add("#%d / ", arity());
}
@@ -301,28 +313,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[r2] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -359,7 +349,7 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -562,8 +552,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
@@ -571,40 +560,35 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr, int index) {
+ LTemplateResultInstruction<1>* instr, int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixed(
- LTemplateInstruction<1, I, T>* instr, Register reg) {
+ LTemplateResultInstruction<1>* instr, Register reg) {
return Define(instr, ToUnallocated(reg));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -758,13 +742,10 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
if (op == Token::MOD) {
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = UseFixedDouble(instr->right(), d2);
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = UseFixedDouble(instr->right(), d1);
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- return MarkAsCall(DefineFixedDouble(result, d1), instr);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
} else {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
@@ -859,17 +840,18 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
if (current->OperandCount() == 0) {
instr = DefineAsRegister(new(zone()) LDummy());
} else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
instr = DefineAsRegister(new(zone())
LDummyUse(UseAny(current->OperandAt(0))));
}
for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
LInstruction* dummy =
new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
dummy->set_hydrogen_value(current);
@@ -928,90 +910,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- int object_index = objects_to_materialize->length();
- for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- LOperand* op;
- HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- for (int i = object_index; i < objects_to_materialize->length(); ++i) {
- HValue* object_to_materialize = objects_to_materialize->at(i);
- int previously_materialized_object = -1;
- for (int prev = 0; prev < i; ++prev) {
- if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
- previously_materialized_object = prev;
- break;
- }
- }
- int length = object_to_materialize->OperandCount();
- bool is_arguments = object_to_materialize->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- continue;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
- }
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* value = object_to_materialize->OperandAt(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else {
- ASSERT(!value->IsPushArgument());
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
- }
-
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
- }
-
- return result;
-}
-
-
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -1043,6 +941,9 @@ LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
@@ -1087,7 +988,7 @@ LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineSameAsFirst(result));
+ return AssignEnvironment(DefineAsRegister(result));
}
@@ -1119,11 +1020,11 @@ LInstruction* LChunkBuilder::DoStoreCodeEntry(
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* inner_object) {
- LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
- LInnerAllocatedObject* result =
- new(zone()) LInnerAllocatedObject(base_object);
- return DefineAsRegister(result);
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
}
@@ -1145,33 +1046,38 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
}
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context));
-}
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), r1);
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+ return MarkAsCall(DefineFixed(result, r0), instr);
}
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1189,9 +1095,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathRound: return DoMathRound(instr);
case kMathAbs: return DoMathAbs(instr);
case kMathLog: return DoMathLog(instr);
- case kMathSin: return DoMathSin(instr);
- case kMathCos: return DoMathCos(instr);
- case kMathTan: return DoMathTan(instr);
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
@@ -1229,30 +1132,10 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LMathLog* result = new(zone()) LMathLog(input);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LMathSin* result = new(zone()) LMathSin(input);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LMathCos* result = new(zone()) LMathCos(input);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LMathTan* result = new(zone()) LMathTan(input);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), d0), instr);
}
@@ -1269,43 +1152,16 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
+ LOperand* input = UseRegisterAtStart(instr->value());
LMathSqrt* result = new(zone()) LMathSqrt(input);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LOperand* temp = FixedTemp(d3);
- LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
- return DefineFixedDouble(result, d2);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* key = UseFixed(instr->key(), r2);
- return MarkAsCall(
- DefineFixed(new(zone()) LCallKeyed(context, key), r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input);
+ return DefineAsRegister(result);
}
@@ -1328,8 +1184,8 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
- return MarkAsCall(
- DefineFixed(new(zone()) LCallFunction(context, function), r0), instr);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, r0), instr);
}
@@ -1378,9 +1234,9 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
+ if (instr->RightIsPowerOf2()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
+ LOperand* value = UseRegister(instr->left());
LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
return AssignEnvironment(DefineAsRegister(div));
}
@@ -1424,43 +1280,25 @@ bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
}
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (CpuFeatures::IsSupported(SUDIV)) {
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
- }
- }
-
- if (divisor->IsConstant() && HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- int32_t int32_val = constant_val->Integer32Value();
- if (LChunkBuilder::HasMagicNumberForDivisor(int32_val) ||
- CpuFeatures::IsSupported(SUDIV)) {
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
- }
-
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ // LMathFloorOfDiv can only handle a subset of divisors, so fall
+ // back to a flooring division in all other cases.
HValue* right = instr->right();
+ if (!right->IsInteger32Constant() ||
+ (!CpuFeatures::IsSupported(SUDIV) &&
+ !HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()))) {
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(right);
+ LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
+ LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineAsRegister(div));
+ }
+
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
? UseRegister(right)
: UseOrConstant(right);
LOperand* remainder = TempRegister();
- ASSERT(CpuFeatures::IsSupported(SUDIV) ||
- (right->IsConstant() &&
- HConstant::cast(right)->HasInteger32Value() &&
- HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value())));
return AssignEnvironment(DefineAsRegister(
new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
}
@@ -1472,19 +1310,15 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
+ if (instr->RightIsPowerOf2()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseOrConstant(right));
+ UseConstant(right));
LInstruction* result = DefineAsRegister(mod);
return (left->CanBeNegative() &&
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- return AssignEnvironment(DefineAsRegister(mod));
} else if (CpuFeatures::IsSupported(SUDIV)) {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegister(right));
@@ -1669,6 +1503,15 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
result = AssignEnvironment(result);
}
return result;
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ return result;
} else if (instr->representation().IsDouble()) {
if (instr->left()->IsMul()) {
return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
@@ -1711,30 +1554,17 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* left = UseFixedDouble(instr->left(), d0);
LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), d2) :
+ UseFixedDouble(instr->right(), d1) :
UseFixed(instr->right(), r2);
LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, d3),
+ return MarkAsCall(DefineFixedDouble(result, d2),
instr,
CAN_DEOPTIMIZE_EAGERLY);
}
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseTempRegister(instr->global_object());
- LOperand* scratch = TempRegister();
- LOperand* scratch2 = TempRegister();
- LOperand* scratch3 = TempRegister();
- LRandom* result = new(zone()) LRandom(
- global_object, scratch, scratch2, scratch3);
- return DefineFixedDouble(result, d7);
-}
-
-
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
@@ -1783,6 +1613,16 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
}
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1865,19 +1705,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
LDateField* result =
@@ -1886,11 +1713,21 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegisterOrConstant(instr->index());
- LOperand* value = UseRegister(instr->value());
- return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ return new(zone()) LSeqStringSetChar(context, string, index, value);
}
@@ -1915,13 +1752,6 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
}
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(), r0);
- return MarkAsCall(new(zone()) LThrow(context, value), instr);
-}
-
-
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
@@ -2006,7 +1836,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegisterAtStart(val);
if (val->CheckFlag(HInstruction::kUint32)) {
LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineAsRegister(new(zone()) LSmiTag(value));
} else {
@@ -2017,8 +1847,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
- : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
+ : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
@@ -2153,16 +1983,6 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object = UseFixed(instr->global_object(), r1);
- LOperand* value = UseFixed(instr->value(), r0);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(context, global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2213,20 +2033,13 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseRegister(instr->elements());
@@ -2238,20 +2051,19 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(backing_store, key);
}
DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS;
return can_deoptimize ? AssignEnvironment(result) : result;
}
@@ -2268,7 +2080,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@@ -2297,16 +2109,17 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
- (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
- ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
- (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
+ LOperand* backing_store = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(backing_store, key, val);
}
@@ -2406,8 +2219,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
return MarkAsCall(
DefineFixed(new(zone()) LStringAdd(context, left, right), r0),
instr);
@@ -2478,7 +2291,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
int index = static_cast<int>(instr->index());
- Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2528,15 +2341,8 @@ LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
- LOperand* length;
- LOperand* index;
- if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
- length = UseRegisterOrConstant(instr->length());
- index = UseOrConstant(instr->index());
- } else {
- length = UseTempRegister(instr->length());
- index = UseRegisterAtStart(instr->index());
- }
+ LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2556,7 +2362,10 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ return new(zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
}
@@ -2607,8 +2416,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
+ instr->inlining_kind());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2666,5 +2474,4 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 5087fb33d2..982ac2c5a3 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -52,12 +52,9 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallConstantFunction) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@@ -72,6 +69,7 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
+ V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -93,13 +91,10 @@ class LCodeGen;
V(Drop) \
V(Dummy) \
V(DummyUse) \
- V(ElementsKind) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@@ -118,7 +113,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadExternalArrayPointer) \
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
@@ -130,7 +124,6 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
- V(MathCos) \
V(MathExp) \
V(MathFloor) \
V(MathFloorOfDiv) \
@@ -138,9 +131,7 @@ class LCodeGen;
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
- V(MathSin) \
V(MathSqrt) \
- V(MathTan) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@@ -150,13 +141,12 @@ class LCodeGen;
V(NumberTagU) \
V(NumberUntagD) \
V(OsrEntry) \
- V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
- V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringGetChar) \
V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
@@ -165,7 +155,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -178,7 +167,6 @@ class LCodeGen;
V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
@@ -187,7 +175,6 @@ class LCodeGen;
V(Uint32ToDouble) \
V(Uint32ToSmi) \
V(UnknownOSRValue) \
- V(ValueOf) \
V(WrapReceiver)
@@ -305,10 +292,8 @@ class LInstruction : public ZoneObject {
// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LInstruction {
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
@@ -320,10 +305,20 @@ class LTemplateInstruction : public LInstruction {
protected:
EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
private:
+ // Iterator support.
virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
@@ -490,10 +485,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
};
@@ -558,6 +549,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
LOperand* receiver() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
@@ -656,6 +648,8 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
+ bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
+
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
@@ -815,42 +809,6 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathTan(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
-};
-
-
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
@@ -885,15 +843,13 @@ class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathPowHalf(LOperand* value, LOperand* temp) {
+ explicit LMathPowHalf(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
};
@@ -927,6 +883,22 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@@ -1321,34 +1293,6 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
@@ -1368,41 +1312,39 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
- inputs_[2] = value;
}
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
- private:
- String::Encoding encoding_;
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
- LThrow(LOperand* context, LOperand* value) {
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
inputs_[0] = context;
- inputs_[1] = value;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
};
@@ -1451,28 +1393,6 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
- public:
- LRandom(LOperand* global_object,
- LOperand* scratch,
- LOperand* scratch2,
- LOperand* scratch3) {
- inputs_[0] = global_object;
- temps_[0] = scratch;
- temps_[1] = scratch2;
- temps_[2] = scratch3;
- }
-
- LOperand* global_object() const { return inputs_[0]; }
- LOperand* scratch() const { return temps_[0]; }
- LOperand* scratch2() const { return temps_[1]; }
- LOperand* scratch3() const { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1599,20 +1519,6 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadExternalArrayPointer V8_FINAL
- : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
@@ -1628,6 +1534,12 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
bool is_external() const {
return hydrogen()->is_external();
}
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -1693,28 +1605,6 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1793,19 +1683,19 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInnerAllocatedObject(LOperand* base_object) {
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
+ inputs_[1] = offset;
}
- LOperand* base_object() { return inputs_[0]; }
- int offset() { return hydrogen()->offset(); }
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
- DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@@ -1823,18 +1713,6 @@ class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
@@ -1848,95 +1726,73 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalObject(LOperand* context) {
- inputs_[0] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
- LOperand* context() { return inputs_[0]; }
-};
-
-
-class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
}
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
+ LOperand* function() { return inputs_[0]; }
-class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
}
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
+ LOperand* target() const { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+ const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallKeyed(LOperand* context, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = key;
- }
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<LOperand*> inputs_;
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
};
-
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNamed(LOperand* context) {
+ LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
+ inputs_[1] = function;
}
LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1958,35 +1814,6 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallGlobal(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
@@ -2287,6 +2114,12 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@@ -2735,20 +2568,18 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- argument_count_(0),
allocator_(allocator),
- position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
@@ -2767,15 +2598,11 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoRSub(HSub* instr);
static bool HasMagicNumberForDivisor(int32_t divisor);
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathTan(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
@@ -2791,7 +2618,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2841,7 +2667,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2850,22 +2676,16 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- DoubleRegister reg);
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
LInstruction* AssignEnvironment(LInstruction* instr);
LInstruction* AssignPointerMap(LInstruction* instr);
@@ -2879,10 +2699,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
-
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
@@ -2895,14 +2711,11 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
- Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
- int argument_count_;
LAllocator* allocator_;
- int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 647ce723e2..5ff3fa0764 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -84,9 +84,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
+ RegisterDependentCodeForEmbeddedMaps(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -145,18 +143,28 @@ bool LCodeGen::GeneratePrologue() {
// r1: Callee's JS function.
// cp: Callee's context.
+ // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
// fp: Caller's frame pointer.
// lr: Caller's pc.
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- __ cmp(r5, Operand::Zero());
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset), ne);
+ // Classic mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->is_classic_mode() &&
+ !info_->is_native()) {
+ Label ok;
+ int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
+ __ ldr(r2, MemOperand(sp, receiver_offset));
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &ok);
+
+ __ ldr(r2, GlobalObjectOperand());
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+
+ __ str(r2, MemOperand(sp, receiver_offset));
+
+ __ bind(&ok);
}
}
@@ -165,6 +173,7 @@ bool LCodeGen::GeneratePrologue() {
__ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
frame_is_built_ = true;
info_->AddNoFrameRange(0, masm_->pc_offset());
+ __ LoadConstantPoolPointerRegister();
}
// Reserve space for the stack slots needed by the code.
@@ -198,17 +207,18 @@ bool LCodeGen::GeneratePrologue() {
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in r1.
- __ push(r1);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
+ __ push(r1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both r0 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ mov(cp, r0);
+ __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -267,7 +277,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -280,10 +291,10 @@ bool LCodeGen::GenerateDeferredCode() {
ASSERT(!frame_is_built_);
ASSERT(info()->IsStub());
frame_is_built_ = true;
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ __ PushFixedFrame();
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
code->Generate();
@@ -291,7 +302,7 @@ bool LCodeGen::GenerateDeferredCode() {
Comment(";;; Destroy frame");
ASSERT(frame_is_built_);
__ pop(ip);
- __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
+ __ PopFixedFrame();
frame_is_built_ = false;
}
__ jmp(code->exit());
@@ -342,14 +353,14 @@ bool LCodeGen::GenerateDeoptJumpTable() {
__ b(&needs_frame);
} else {
__ bind(&needs_frame);
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ __ PushFixedFrame();
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, ip);
}
@@ -537,17 +548,36 @@ Operand LCodeGen::ToOperand(LOperand* op) {
}
+static int ArgumentsOffsetWithoutFrame(int index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()));
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
}
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
ASSERT(op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(
+ sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ }
}
@@ -692,7 +722,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode,
TargetAddressStorageMode storage_mode) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
@@ -797,13 +826,39 @@ void LCodeGen::DeoptimizeIf(Condition condition,
return;
}
- ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
- if (FLAG_deopt_every_n_times == 1 &&
- !info()->IsStub() &&
- info()->opt_count() == id) {
- ASSERT(frame_is_built_);
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- return;
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Register scratch = scratch0();
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+
+ // Store the condition on the stack if necessary
+ if (condition != al) {
+ __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
+ __ mov(scratch, Operand(1), LeaveCC, condition);
+ __ push(scratch);
+ }
+
+ __ push(r1);
+ __ mov(scratch, Operand(count));
+ __ ldr(r1, MemOperand(scratch));
+ __ sub(r1, r1, Operand(1), SetCC);
+ __ movw(r1, FLAG_deopt_every_n_times, eq);
+ __ str(r1, MemOperand(scratch));
+ __ pop(r1);
+
+ if (condition != al) {
+ // Clean up the stack before the deoptimizer call
+ __ pop(scratch);
+ }
+
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
+
+ // 'Restore' the condition in a slightly hacky way. (It would be better
+ // to use 'msr' and 'mrs' instructions here, but they are not supported by
+ // our ARM simulator).
+ if (condition != al) {
+ condition = ne;
+ __ cmp(scratch, Operand::Zero());
+ }
}
if (info()->ShouldTrapOnDeopt()) {
@@ -842,36 +897,6 @@ void LCodeGen::DeoptimizeIf(Condition condition,
}
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<JSObject> > objects(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else if (it.rinfo()->target_object()->IsJSObject()) {
- Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded objects after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
- }
-}
-
-
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
@@ -882,6 +907,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -964,6 +990,10 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
+ if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
+ // Register pp always contains a pointer to the constant pool.
+ safepoint.DefinePointerRegister(pp, zone());
+ }
}
@@ -1052,11 +1082,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::RegExpExec: {
RegExpExecStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1072,13 +1097,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::TranscendentalCache: {
- __ ldr(r0, MemOperand(sp, 0));
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -1094,7 +1112,7 @@ void LCodeGen::DoModI(LModI* instr) {
HMod* hmod = instr->hydrogen();
HValue* left = hmod->left();
HValue* right = hmod->right();
- if (hmod->HasPowerOf2Divisor()) {
+ if (hmod->RightIsPowerOf2()) {
// TODO(svenpanne) We should really do the strength reduction on the
// Hydrogen level.
Register left_reg = ToRegister(instr->left());
@@ -1119,36 +1137,6 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&left_is_not_negative);
__ and_(result_reg, left_reg, Operand(divisor - 1));
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- Register left_reg = ToRegister(instr->left());
- Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- __ cmp(right_reg, Operand(divisor));
- DeoptimizeIf(ne, instr->environment());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ cmp(left_reg, Operand::Zero());
- __ b(pl, &left_is_not_negative);
- __ rsb(result_reg, left_reg, Operand::Zero());
- __ and_(result_reg, result_reg, Operand(divisor - 1));
- __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
- }
- __ b(&done);
- }
-
- __ bind(&left_is_not_negative);
- __ and_(result_reg, left_reg, Operand(divisor - 1));
- __ bind(&done);
-
} else if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
@@ -1354,55 +1342,46 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
void LCodeGen::DoDivI(LDivI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- const Register dividend = ToRegister(instr->left());
- const Register result = ToRegister(instr->result());
- int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
+ if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
+ Register dividend = ToRegister(instr->left());
+ HDiv* hdiv = instr->hydrogen();
+ int32_t divisor = hdiv->right()->GetInteger32Constant();
+ Register result = ToRegister(instr->result());
+ ASSERT(!result.is(dividend));
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- __ sub(result, dividend, Operand::Zero(), SetCC);
- __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
- __ mov(result, Operand(result, ASR, power));
- if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
- if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
- return; // Don't fall through to "__ rsb" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ tst(dividend, Operand(test_value));
- DeoptimizeIf(ne, instr->environment());
- __ mov(result, Operand(dividend, ASR, power));
- if (divisor < 0) __ rsb(result, result, Operand(0));
- }
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->left()->RangeCanInclude(0) && divisor < 0 &&
+ hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 &&
+ hdiv->CheckFlag(HValue::kCanOverflow)) {
+ __ cmp(dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ Abs(divisor) != 1) {
+ __ tst(dividend, Operand(Abs(divisor) - 1));
+ DeoptimizeIf(ne, instr->environment());
+ }
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ rsb(result, dividend, Operand(0));
+ return;
+ }
+ int32_t shift = WhichPowerOf2(Abs(divisor));
+ if (shift == 0) {
+ __ mov(result, dividend);
+ } else if (shift == 1) {
+ __ add(result, dividend, Operand(dividend, LSR, 31));
} else {
- if (divisor < 0) {
- __ rsb(result, dividend, Operand(0));
- } else {
- __ Move(result, dividend);
- }
+ __ mov(result, Operand(dividend, ASR, 31));
+ __ add(result, dividend, Operand(result, LSR, 32 - shift));
}
-
+ if (shift > 0) __ mov(result, Operand(result, ASR, shift));
+ if (divisor < 0) __ rsb(result, result, Operand(0));
return;
}
@@ -1411,15 +1390,15 @@ void LCodeGen::DoDivI(LDivI* instr) {
const Register result = ToRegister(instr->result());
// Check for x / 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label positive;
- if (!instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
// Do the test only if it hadn't be done above.
__ cmp(right, Operand::Zero());
}
@@ -1430,20 +1409,22 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
// Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
+ if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow) &&
+ (!CpuFeatures::IsSupported(SUDIV) ||
+ !instr->hydrogen_value()->CheckFlag(
+ HValue::kAllUsesTruncatingToInt32))) {
+ // We don't need to check for overflow when truncating with sdiv
+ // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
+ __ cmp(right, Operand(-1), eq);
DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
}
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
__ sdiv(result, left, right);
- if (!instr->hydrogen()->CheckFlag(
+ if (!instr->hydrogen_value()->CheckFlag(
HInstruction::kAllUsesTruncatingToInt32)) {
// Compute remainder and deopt if it's not zero.
const Register remainder = scratch0();
@@ -1462,7 +1443,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ vcvt_s32_f64(double_scratch0().low(), vleft);
__ vmov(result, double_scratch0().low());
- if (!instr->hydrogen()->CheckFlag(
+ if (!instr->hydrogen_value()->CheckFlag(
HInstruction::kAllUsesTruncatingToInt32)) {
// Deopt if exact conversion to integer was not possible.
// Use vright as scratch register.
@@ -1535,12 +1516,9 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
// Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
__ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
+ __ cmp(right, Operand(-1), eq);
DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1879,43 +1857,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temp());
- Label done;
-
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- // If the object is a smi return the object.
- __ SmiTst(input);
- __ Move(result, input, eq);
- __ b(eq, &done);
- }
-
- // If the object is not a value type, return the object.
- __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
- __ Move(result, input, ne);
- __ b(ne, &done);
- __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
@@ -1955,14 +1896,37 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- Register string = ToRegister(instr->string());
- LOperand* index_op = instr->index();
- Register value = ToRegister(instr->value());
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
Register scratch = scratch0();
- String::Encoding encoding = instr->encoding();
+ ASSERT(!scratch.is(string));
+ ASSERT(!scratch.is(ToRegister(index)));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ add(scratch, string, Operand(ToRegister(index)));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
+ }
+ return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
if (FLAG_debug_code) {
+ Register scratch = scratch0();
__ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
@@ -1975,36 +1939,35 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
__ Check(eq, kUnexpectedStringType);
}
- if (index_op->IsConstantOperand()) {
- int constant_index = ToInteger32(LConstantOperand::cast(index_op));
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ strb(value,
- FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
- } else {
- __ strh(value,
- FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
- }
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ ldrb(result, operand);
} else {
- Register index = ToRegister(index_op);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ add(scratch, string, Operand(index));
- __ strb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
- } else {
- __ add(scratch, string, Operand(index, LSL, 1));
- __ strh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
- }
+ __ ldrh(result, operand);
}
}
-void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->value(), ip);
- __ push(input_reg);
- ASSERT(ToRegister(instr->context()).is(cp));
- CallRuntime(Runtime::kThrow, 1, instr);
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
if (FLAG_debug_code) {
- __ stop("Unreachable code.");
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ strb(value, operand);
+ } else {
+ __ strh(value, operand);
}
}
@@ -2115,19 +2078,13 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ vdiv(result, left, right);
break;
case Token::MOD: {
- // Save r0-r3 on the stack.
- __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
-
__ PrepareCallCFunction(0, 2, scratch0());
- __ SetCallCDoubleArguments(left, right);
+ __ MovToFloatParameters(left, right);
__ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
- __ GetCFunctionDoubleResult(result);
-
- // Restore r0-r3.
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
+ __ MovFromFloatResult(result);
break;
}
default:
@@ -2143,7 +2100,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
@@ -2439,6 +2396,33 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ __ VFPCompareAndSetFlags(value, 0.0);
+ EmitFalseBranch(instr, ne);
+ __ VmovHigh(scratch, value);
+ __ cmp(scratch, Operand(0x80000000));
+ } else {
+ Register value = ToRegister(instr->value());
+ __ CheckMap(value,
+ scratch,
+ Heap::kHeapNumberMapRootIndex,
+ instr->FalseLabel(chunk()),
+ DO_SMI_CHECK);
+ __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+ __ cmp(scratch, Operand(0x80000000));
+ __ cmp(ip, Operand(0x00000000), eq);
+ }
+ EmitBranch(instr, eq);
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -2888,9 +2872,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
- __ mov(sp, fp);
- no_frame_start = masm_->pc_offset();
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
}
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
@@ -2931,10 +2913,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r2, Operand(instr->name()));
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2963,19 +2944,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
}
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->global_object()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3038,11 +3006,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (access.IsExternalMemory()) {
Register result = ToRegister(instr->result());
MemOperand operand = MemOperand(object, offset);
- if (access.representation().IsByte()) {
- __ ldrb(result, operand);
- } else {
- __ ldr(result, operand);
- }
+ __ Load(result, operand, access.representation());
return;
}
@@ -3058,11 +3022,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
object = result;
}
MemOperand operand = FieldMemOperand(object, offset);
- if (access.representation().IsByte()) {
- __ ldrb(result, operand);
- } else {
- __ ldr(result, operand);
- }
+ __ Load(result, operand, access.representation());
}
@@ -3073,7 +3033,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3128,32 +3088,38 @@ void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
}
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->object());
- __ ldr(to_reg, FieldMemOperand(from_reg,
- ExternalArray::kExternalPointerOffset));
-}
-
-
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
- if (instr->length()->IsConstantOperand() &&
- instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ if (instr->length()->IsConstantOperand()) {
int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- int index = (const_length - const_index) + 1;
- __ ldr(result, MemOperand(arguments, index * kPointerSize));
- } else {
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int index = (const_length - const_index) + 1;
+ __ ldr(result, MemOperand(arguments, index * kPointerSize));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ rsb(result, index, Operand(const_length + 1));
+ __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
+ }
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister(instr->length());
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = const_index - 1;
+ if (loc != 0) {
+ __ sub(result, length, Operand(loc));
+ __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
+ } else {
+ __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
+ }
+ } else {
Register length = ToRegister(instr->length());
Register index = ToRegister(instr->index());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, length, index);
- __ add(length, length, Operand(1));
- __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
+ __ sub(result, length, index);
+ __ add(result, result, Operand(1));
+ __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
}
}
@@ -3175,20 +3141,28 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
+ int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
+ ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
+ : 0;
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ int base_offset =
+ (instr->additional_index() << element_size_shift) + additional_offset;
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(double_scratch0().low(), scratch0(), additional_offset);
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ __ vldr(double_scratch0().low(), scratch0(), base_offset);
__ vcvt_f64_f32(result, double_scratch0().low());
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), additional_offset);
+ } else { // loading doubles, not floats.
+ __ vldr(result, scratch0(), base_offset);
}
} else {
Register result = ToRegister(instr->result());
@@ -3197,31 +3171,40 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
element_size_shift, shift_size,
instr->additional_index(), additional_offset);
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
__ ldrsb(result, mem_operand);
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ ldrb(result, mem_operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
__ ldrsh(result, mem_operand);
break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ ldrh(result, mem_operand);
break;
- case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
__ ldr(result, mem_operand);
break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
DeoptimizeIf(cs, instr->environment());
}
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
@@ -3317,7 +3300,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3335,14 +3318,26 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
int shift_size,
int additional_index,
int additional_offset) {
- if (additional_index != 0 && !key_is_constant) {
- additional_index *= 1 << (element_size - shift_size);
- __ add(scratch0(), key, Operand(additional_index));
- }
-
+ int base_offset = (additional_index << element_size) + additional_offset;
if (key_is_constant) {
return MemOperand(base,
- (constant_key << element_size) + additional_offset);
+ base_offset + (constant_key << element_size));
+ }
+
+ if (additional_offset != 0) {
+ __ mov(scratch0(), Operand(base_offset));
+ if (shift_size >= 0) {
+ __ add(scratch0(), scratch0(), Operand(key, LSL, shift_size));
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ __ add(scratch0(), scratch0(), Operand(key, LSR, 1));
+ }
+ return MemOperand(base, scratch0());
+ }
+
+ if (additional_index != 0) {
+ additional_index *= 1 << (element_size - shift_size);
+ __ add(scratch0(), key, Operand(additional_index));
}
if (additional_index == 0) {
@@ -3419,26 +3414,29 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
Register scratch = scratch0();
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
+ Label global_object, result_in_receiver;
- // Do not transform the receiver to object for strict mode
- // functions.
- __ ldr(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(scratch,
- Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
- __ b(ne, &receiver_ok);
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+ __ tst(scratch, Operand(mask));
+ __ b(ne, &result_in_receiver);
- // Do not transform the receiver to object for builtins.
- __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &receiver_ok);
+ // Do not transform the receiver to object for builtins.
+ __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &result_in_receiver);
+ }
// Normal function. Replace undefined or null with global receiver.
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
@@ -3453,14 +3451,24 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
DeoptimizeIf(eq, instr->environment());
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
DeoptimizeIf(lt, instr->environment());
- __ jmp(&receiver_ok);
+ __ b(&result_in_receiver);
__ bind(&global_object);
- __ ldr(receiver, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ ldr(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
- __ bind(&receiver_ok);
+ __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ ldr(result,
+ ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(result,
+ FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+
+ if (result.is(receiver)) {
+ __ bind(&result_in_receiver);
+ } else {
+ Label result_ok;
+ __ b(&result_ok);
+ __ bind(&result_in_receiver);
+ __ mov(result, receiver);
+ __ bind(&result_ok);
+ }
}
@@ -3507,8 +3515,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is r0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
}
@@ -3546,14 +3553,6 @@ void LCodeGen::DoContext(LContext* instr) {
}
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result,
- MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
__ push(cp); // The context is the first argument.
@@ -3565,25 +3564,10 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
}
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global_object());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
R1State r1_state) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -3607,7 +3591,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function.
- __ SetCallKind(r5, call_kind);
__ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ Call(ip);
@@ -3617,23 +3600,11 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(
- function, expected, count, CALL_FUNCTION, generator, call_kind);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
}
}
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- CallKnownFunction(instr->hydrogen()->function(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- R1_UNINITIALIZED);
-}
-
-
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
ASSERT(instr->context() != NULL);
ASSERT(ToRegister(instr->context()).is(cp));
@@ -3821,7 +3792,7 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
- DwVfpRegister temp = ToDoubleRegister(instr->temp());
+ DwVfpRegister temp = double_scratch0();
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
@@ -3844,11 +3815,11 @@ void LCodeGen::DoPower(LPower* instr) {
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
ASSERT(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(d2));
+ ToDoubleRegister(instr->right()).is(d1));
ASSERT(!instr->right()->IsRegister() ||
ToRegister(instr->right()).is(r2));
- ASSERT(ToDoubleRegister(instr->left()).is(d1));
- ASSERT(ToDoubleRegister(instr->result()).is(d3));
+ ASSERT(ToDoubleRegister(instr->left()).is(d0));
+ ASSERT(ToDoubleRegister(instr->result()).is(d2));
if (exponent_type.IsSmi()) {
MathPowStub stub(MathPowStub::TAGGED);
@@ -3874,68 +3845,6 @@ void LCodeGen::DoPower(LPower* instr) {
}
-void LCodeGen::DoRandom(LRandom* instr) {
- // Assert that the register size is indeed the size of each seed.
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- // Load native context
- Register global_object = ToRegister(instr->global_object());
- Register native_context = global_object;
- __ ldr(native_context, FieldMemOperand(
- global_object, GlobalObject::kNativeContextOffset));
-
- // Load state (FixedArray of the native context's random seeds)
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- Register state = native_context;
- __ ldr(state, FieldMemOperand(native_context, kRandomSeedOffset));
-
- // Load state[0].
- Register state0 = ToRegister(instr->scratch());
- __ ldr(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
- // Load state[1].
- Register state1 = ToRegister(instr->scratch2());
- __ ldr(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- Register scratch3 = ToRegister(instr->scratch3());
- Register scratch4 = scratch0();
- __ and_(scratch3, state0, Operand(0xFFFF));
- __ mov(scratch4, Operand(18273));
- __ mul(scratch3, scratch3, scratch4);
- __ add(state0, scratch3, Operand(state0, LSR, 16));
- // Save state[0].
- __ str(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ and_(scratch3, state1, Operand(0xFFFF));
- __ mov(scratch4, Operand(36969));
- __ mul(scratch3, scratch3, scratch4);
- __ add(state1, scratch3, Operand(state1, LSR, 16));
- // Save state[1].
- __ str(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- Register random = scratch4;
- __ and_(random, state1, Operand(0x3FFFF));
- __ add(random, random, Operand(state0, LSL, 14));
-
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(scratch3, Operand(0x41000000));
- __ orr(scratch3, scratch3, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- DwVfpRegister result = ToDoubleRegister(instr->result());
- __ vmov(result, random, scratch3);
- // Move 0x4130000000000000 to VFP.
- __ mov(scratch4, Operand::Zero());
- DwVfpRegister scratch5 = double_scratch0();
- __ vmov(scratch5, scratch4, scratch3);
- __ vsub(result, result, scratch5);
-}
-
-
void LCodeGen::DoMathExp(LMathExp* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
@@ -3951,46 +3860,11 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, Operand::Zero());
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathTan(LMathTan* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, Operand::Zero());
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, Operand::Zero());
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, Operand::Zero());
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+ 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
@@ -4004,74 +3878,69 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
- CALL_AS_METHOD,
R1_CONTAINS_TARGET);
}
}
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ PlatformCallInterfaceDescriptor* call_descriptor =
+ instr->descriptor()->platform_specific_descriptor();
+ __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
+ call_descriptor->storage_mode());
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(target);
+ }
+ generator.AfterCall();
}
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ mov(r0, Operand(instr->arity()));
+ }
+ // Change context.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->function()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
+ // Load the code entry address
+ __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
- int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- CallKnownFunction(instr->hydrogen()->target(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- R1_UNINITIALIZED);
+ CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -4095,16 +3964,15 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r0, Operand(instr->arity()));
- __ mov(r2, Operand(instr->hydrogen()->property_cell()));
+ __ mov(r2, Operand(factory()->undefined_value()));
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
- ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
+ ArrayNoArgumentConstructorStub stub(kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
@@ -4117,18 +3985,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ b(eq, &packed_case);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
- override_mode);
+ ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done);
__ bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
+ ArraySingleArgumentConstructorStub stub(kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
+ ArrayNArgumentsConstructorStub stub(kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -4151,7 +4018,13 @@ void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
- __ add(result, base, Operand(instr->offset()));
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ add(result, base, Operand(ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ add(result, base, offset);
+ }
}
@@ -4166,23 +4039,25 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (access.IsExternalMemory()) {
Register value = ToRegister(instr->value());
MemOperand operand = MemOperand(object, offset);
- if (representation.IsByte()) {
- __ strb(value, operand);
- } else {
- __ str(value, operand);
- }
+ __ Store(value, operand, representation);
return;
}
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ SmiTst(value);
DeoptimizeIf(eq, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
ASSERT(transition.is_null());
ASSERT(access.IsInobject());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -4210,17 +4085,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// Do the store.
Register value = ToRegister(instr->value());
- ASSERT(!object.is(value));
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset);
- if (representation.IsByte()) {
- __ strb(value, operand);
- } else {
- __ str(value, operand);
- }
+ __ Store(value, operand, representation);
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
@@ -4235,11 +4102,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
MemOperand operand = FieldMemOperand(scratch, offset);
- if (representation.IsByte()) {
- __ strb(value, operand);
- } else {
- __ str(value, operand);
- }
+ __ Store(value, operand, representation);
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
@@ -4263,9 +4126,8 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(),
+ instr->strict_mode_flag());
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4319,10 +4181,16 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
+ ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
+ : 0;
+
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ int base_offset =
+ (instr->additional_index() << element_size_shift) + additional_offset;
Register address = scratch0();
DwVfpRegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
@@ -4335,11 +4203,12 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
} else {
__ add(address, external_pointer, Operand(key, LSL, shift_size));
}
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
- __ vstr(double_scratch0().low(), address, additional_offset);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vstr(value, address, additional_offset);
+ __ vstr(double_scratch0().low(), address, base_offset);
+ } else { // Storing doubles, not floats.
+ __ vstr(value, address, base_offset);
}
} else {
Register value(ToRegister(instr->value()));
@@ -4348,21 +4217,30 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
element_size_shift, shift_size,
instr->additional_index(), additional_offset);
switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
__ strb(value, mem_operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ strh(value, mem_operand);
break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ str(value, mem_operand);
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -4470,7 +4348,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases: external, fast double
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -4541,9 +4419,10 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- StringAddStub stub(instr->hydrogen()->flags());
+ ASSERT(ToRegister(instr->left()).is(r1));
+ ASSERT(ToRegister(instr->right()).is(r0));
+ StringAddStub stub(instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -4674,10 +4553,13 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
- __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
+ ASSERT(output->IsRegister());
if (!instr->hydrogen()->value()->HasRange() ||
!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ SmiTag(ToRegister(output), ToRegister(input));
}
}
@@ -4744,14 +4626,13 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
LNumberTagU* instr_;
};
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ cmp(reg, Operand(Smi::kMaxValue));
+ __ cmp(input, Operand(Smi::kMaxValue));
__ b(hi, deferred->entry());
- __ SmiTag(reg, reg);
+ __ SmiTag(result, input);
__ bind(deferred->exit());
}
@@ -5215,7 +5096,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(object);
__ mov(cp, Operand::Zero());
- __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(r0, scratch0());
@@ -5417,19 +5298,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(size));
}
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
- instr->context());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
- instr->context());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
} else {
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
- instr->context());
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
+ __ Push(Smi::FromInt(flags));
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -5533,22 +5417,21 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Register scratch = scratch0();
if (type_name->Equals(heap()->number_string())) {
__ JumpIfSmi(input, true_label);
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(input, Operand(ip));
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
final_branch_condition = eq;
} else if (type_name->Equals(heap()->string_string())) {
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
+ __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
__ b(ge, false_label);
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
} else if (type_name->Equals(heap()->symbol_string())) {
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch, SYMBOL_TYPE);
+ __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
final_branch_condition = eq;
} else if (type_name->Equals(heap()->boolean_string())) {
@@ -5566,33 +5449,35 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ b(eq, true_label);
__ JumpIfSmi(input, false_label);
// Check for undetectable objects => true.
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = ne;
} else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ Register type_reg = scratch;
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
+ __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
__ b(eq, true_label);
- __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
final_branch_condition = eq;
} else if (type_name->Equals(heap()->object_string())) {
+ Register map = scratch;
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ b(eq, true_label);
}
- __ CompareObjectType(input, input, scratch,
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(lt, false_label);
- __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(gt, false_label);
+ __ CheckObjectTypeRange(input,
+ map,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ false_label);
// Check for undetectable objects => false.
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
} else {
@@ -5617,40 +5502,38 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
__ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
__ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
__ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &check_frame_marker);
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
// Check the marker in the calling frame.
- __ bind(&check_frame_marker);
__ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
__ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
}
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- // Block literal pool emission for duration of padding.
- Assembler::BlockConstPoolScope block_const_pool(masm());
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block literal pool emission for duration of padding.
+ Assembler::BlockConstPoolScope block_const_pool(masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= Assembler::kInstrSize;
+ }
}
}
+ last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5725,7 +5608,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
RelocInfo::CODE_TARGET,
instr);
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5738,7 +5620,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index ef03e60015..5251b85fa9 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -246,7 +246,6 @@ class LCodeGen: public LCodeGenBase {
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
R1State r1_state);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
@@ -267,7 +266,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -276,6 +274,10 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister(int index) const;
DwVfpRegister ToDoubleRegister(int index) const;
+ MemOperand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
@@ -296,6 +298,8 @@ class LCodeGen: public LCodeGenBase {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition condition);
template<class InstrType>
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index d8771cb702..c377274efb 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -44,7 +44,6 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -60,8 +59,8 @@ void MacroAssembler::Jump(Register target, Condition cond) {
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
- mov(ip, Operand(target, rmode));
- bx(ip, cond);
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ mov(pc, Operand(target, rmode), LeaveCC, cond);
}
@@ -384,6 +383,38 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
}
+void MacroAssembler::Load(Register dst,
+ const MemOperand& src,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ ldrsb(dst, src);
+ } else if (r.IsUInteger8()) {
+ ldrb(dst, src);
+ } else if (r.IsInteger16()) {
+ ldrsh(dst, src);
+ } else if (r.IsUInteger16()) {
+ ldrh(dst, src);
+ } else {
+ ldr(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src,
+ const MemOperand& dst,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ strb(src, dst);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ strh(src, dst);
+ } else {
+ str(src, dst);
+ }
+}
+
+
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
@@ -478,12 +509,19 @@ void MacroAssembler::RecordWrite(Register object,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
+ ASSERT(!object.is(value));
if (emit_debug_code()) {
ldr(ip, MemOperand(address));
cmp(ip, value);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
@@ -564,6 +602,26 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
+void MacroAssembler::PushFixedFrame(Register marker_reg) {
+ ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
+ stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
+ cp.bit() |
+ (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
+ fp.bit() |
+ lr.bit());
+}
+
+
+void MacroAssembler::PopFixedFrame(Register marker_reg) {
+ ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
+ ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
+ cp.bit() |
+ (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
+ fp.bit() |
+ lr.bit());
+}
+
+
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of contiguous register values starting with r0:
@@ -588,22 +646,26 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
// Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled());
PushSafepointRegisters();
- sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
- kDoubleSize));
- for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
- vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
+ // Only save allocatable registers.
+ ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
+ ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ vstm(db_w, sp, d16, d31);
}
+ vstm(db_w, sp, d0, d13);
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
// Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled());
- for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
- vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
+ // Only save allocatable registers.
+ ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
+ ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
+ vldm(ia_w, sp, d0, d13);
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ vldm(ia_w, sp, d16, d31);
}
- add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
- kDoubleSize));
PopSafepointRegisters();
}
@@ -774,11 +836,11 @@ void MacroAssembler::Vmov(const DwVfpRegister dst,
const Register scratch) {
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value(imm);
+ DoubleRepresentation value_rep(imm);
// Handle special values first.
- if (value.bits == zero.bits) {
+ if (value_rep == zero) {
vmov(dst, kDoubleRegZero);
- } else if (value.bits == minus_zero.bits) {
+ } else if (value_rep == minus_zero) {
vneg(dst, kDoubleRegZero);
} else {
vmov(dst, imm, scratch);
@@ -826,102 +888,12 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
-void MacroAssembler::LoadNumber(Register object,
- LowDwVfpRegister dst,
- Register heap_number_map,
- Register scratch,
- Label* not_number) {
- Label is_smi, done;
-
- UntagAndJumpIfSmi(scratch, object, &is_smi);
- JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
-
- vldr(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
- b(&done);
-
- // Handle loading a double from a smi.
- bind(&is_smi);
- vmov(dst.high(), scratch);
- vcvt_f64_s32(dst, dst.high());
-
- bind(&done);
-}
-
-
-void MacroAssembler::LoadNumberAsInt32Double(Register object,
- DwVfpRegister double_dst,
- Register heap_number_map,
- Register scratch,
- LowDwVfpRegister double_scratch,
- Label* not_int32) {
- ASSERT(!scratch.is(object));
- ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch));
-
- Label done, obj_is_not_smi;
-
- UntagAndJumpIfNotSmi(scratch, object, &obj_is_not_smi);
- vmov(double_scratch.low(), scratch);
- vcvt_f64_s32(double_dst, double_scratch.low());
- b(&done);
-
- bind(&obj_is_not_smi);
- JumpIfNotHeapNumber(object, heap_number_map, scratch, not_int32);
-
- // Load the number.
- // Load the double value.
- vldr(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- TestDoubleIsInt32(double_dst, double_scratch);
- // Jump to not_int32 if the operation did not succeed.
- b(ne, not_int32);
-
- bind(&done);
-}
-
-
-void MacroAssembler::LoadNumberAsInt32(Register object,
- Register dst,
- Register heap_number_map,
- Register scratch,
- DwVfpRegister double_scratch0,
- LowDwVfpRegister double_scratch1,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch.is(object));
-
- Label done, maybe_undefined;
-
- UntagAndJumpIfSmi(dst, object, &done);
-
- JumpIfNotHeapNumber(object, heap_number_map, scratch, &maybe_undefined);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- // Load the double value.
- vldr(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1);
- // Jump to not_int32 if the operation did not succeed.
- b(ne, not_int32);
- b(&done);
-
- bind(&maybe_undefined);
- CompareRoot(object, Heap::kUndefinedValueRootIndex);
- b(ne, not_int32);
- // |undefined| is truncated to 0.
- mov(dst, Operand(Smi::FromInt(0)));
- // Fall through.
-
- bind(&done);
-}
-
-
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
if (frame_mode == BUILD_STUB_FRAME) {
- stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ PushFixedFrame();
Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
- add(fp, sp, Operand(2 * kPointerSize));
+ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
} else {
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
@@ -932,37 +904,59 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
add(r0, pc, Operand(-8));
ldr(pc, MemOperand(pc, -4));
- dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ emit_code_stub_address(stub);
} else {
- stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ PushFixedFrame(r1);
nop(ip.code());
// Adjust FP to point to saved FP.
- add(fp, sp, Operand(2 * kPointerSize));
+ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
}
+void MacroAssembler::LoadConstantPoolPointerRegister() {
+ if (FLAG_enable_ool_constant_pool) {
+ int constant_pool_offset =
+ Code::kConstantPoolOffset - Code::kHeaderSize - pc_offset() - 8;
+ ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
+ ldr(pp, MemOperand(pc, constant_pool_offset));
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
// r0-r3: preserved
- stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ PushFixedFrame();
mov(ip, Operand(Smi::FromInt(type)));
push(ip);
mov(ip, Operand(CodeObject()));
push(ip);
- add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
+ // Adjust FP to point to saved FP.
+ add(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+int MacroAssembler::LeaveFrame(StackFrame::Type type) {
// r0: preserved
// r1: preserved
// r2: preserved
// Drop the execution stack down to the frame pointer and restore
- // the caller frame pointer and return address.
- mov(sp, fp);
- ldm(ia_w, sp, fp.bit() | lr.bit());
+ // the caller frame pointer, return address and constant pool pointer
+ // (if FLAG_enable_ool_constant_pool).
+ int frame_ends;
+ if (FLAG_enable_ool_constant_pool) {
+ add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ frame_ends = pc_offset();
+ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
+ } else {
+ mov(sp, fp);
+ frame_ends = pc_offset();
+ ldm(ia_w, sp, fp.bit() | lr.bit());
+ }
+ return frame_ends;
}
@@ -974,11 +968,14 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
Push(lr, fp);
mov(fp, Operand(sp)); // Set up new frame pointer.
// Reserve room for saved entry sp and code object.
- sub(sp, sp, Operand(2 * kPointerSize));
+ sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
if (emit_debug_code()) {
mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
+ if (FLAG_enable_ool_constant_pool) {
+ str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+ }
mov(ip, Operand(CodeObject()));
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
@@ -992,8 +989,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
if (save_doubles) {
SaveFPRegs(sp, ip);
// Note that d0 will be accessible at
- // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
- // since the sp slot and code slot were pushed after the fp.
+ // fp - ExitFrameConstants::kFrameSize -
+ // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
+ // since the sp slot, code slot and constant pool slot (if
+ // FLAG_enable_ool_constant_pool) were pushed after the fp.
}
// Reserve place for the return address and stack space and align the frame
@@ -1049,7 +1048,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
- const int offset = 2 * kPointerSize;
+ const int offset = ExitFrameConstants::kFrameSize;
sub(r3, fp,
Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
RestoreFPRegs(r3, ip);
@@ -1072,6 +1071,9 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
#endif
// Tear down the exit frame, pop the arguments, and return.
+ if (FLAG_enable_ool_constant_pool) {
+ ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+ }
mov(sp, Operand(fp));
ldm(ia_w, sp, fp.bit() | lr.bit());
if (argument_count.is_valid()) {
@@ -1080,7 +1082,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
-void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
+void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
if (use_eabi_hardfloat()) {
Move(dst, d0);
} else {
@@ -1089,17 +1091,9 @@ void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
}
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be r5 to
- // follow the calling convention which requires the call type to be
- // in r5.
- ASSERT(dst.is(r5));
- if (call_kind == CALL_AS_FUNCTION) {
- mov(dst, Operand(Smi::FromInt(1)));
- } else {
- mov(dst, Operand(Smi::FromInt(0)));
- }
+// On ARM this is just a synonym to make the purpose clear.
+void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
+ MovFromFloatResult(dst);
}
@@ -1110,8 +1104,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label regular_invoke;
@@ -1121,7 +1114,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// r0: actual arguments count
// r1: function (passed through to callee)
// r2: expected arguments count
- // r3: callee code entry
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
@@ -1169,14 +1161,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(r5, call_kind);
Call(adaptor);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
b(done);
}
} else {
- SetCallKind(r5, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&regular_invoke);
@@ -1188,8 +1178,7 @@ void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -1197,16 +1186,14 @@ void MacroAssembler::InvokeCode(Register code,
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, &definitely_mismatches, flag,
- call_wrapper, call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(r5, call_kind);
Call(code);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(r5, call_kind);
Jump(code);
}
@@ -1217,41 +1204,10 @@ void MacroAssembler::InvokeCode(Register code,
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, no_reg,
- &done, &definitely_mismatches, flag,
- NullCallWrapper(), call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- SetCallKind(r5, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(r5, call_kind);
- Jump(code, rmode);
- }
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -1271,28 +1227,39 @@ void MacroAssembler::InvokeFunction(Register fun,
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
+ // Contract with called JS functions requires that function is passed in r1.
+ ASSERT(function.is(r1));
+
// Get the function and setup the context.
- Move(r1, function);
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(r3, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ Move(r1, function);
+ InvokeFunction(r1, expected, actual, flag, call_wrapper);
}
@@ -1558,6 +1525,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
// First of all we assign the hash seed to scratch.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -1624,8 +1594,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
sub(t1, t1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use t2 for index calculations and keep the hash intact in t0.
mov(t2, t0);
// Compute the masked index: (hash + i + i * i) & mask.
@@ -1642,7 +1611,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
cmp(key, Operand(ip));
- if (i != kProbes - 1) {
+ if (i != kNumberDictionaryProbes - 1) {
b(eq, &done);
} else {
b(ne, miss);
@@ -1671,7 +1640,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2066,14 +2035,36 @@ void MacroAssembler::CompareObjectType(Register object,
Register map,
Register type_reg,
InstanceType type) {
+ const Register temp = type_reg.is(no_reg) ? ip : type_reg;
+
+ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(map, temp, type);
+}
+
+
+void MacroAssembler::CheckObjectTypeRange(Register object,
+ Register map,
+ InstanceType min_type,
+ InstanceType max_type,
+ Label* false_label) {
+ STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
+ STATIC_ASSERT(LAST_TYPE < 256);
ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(map, type_reg, type);
+ ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ sub(ip, ip, Operand(min_type));
+ cmp(ip, Operand(max_type - min_type));
+ b(hi, false_label);
}
void MacroAssembler::CompareInstanceType(Register map,
Register type_reg,
InstanceType type) {
+ // Registers map and type_reg can be ip. These two lines assert
+ // that ip can be used with the two instructions (the constants
+ // will never need ip).
+ STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
+ STATIC_ASSERT(LAST_TYPE < 256);
ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmp(type_reg, Operand(type));
}
@@ -2298,8 +2289,6 @@ void MacroAssembler::CallStub(CodeStub* stub,
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls_ ||
- stub->CompilingCallsToThisStubIsGCSafe(isolate()));
Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
}
@@ -2310,10 +2299,8 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::CallApiFunctionAndReturn(
- ExternalReference function,
- Address function_address,
+ Register function_address,
ExternalReference thunk_ref,
- Register thunk_last_arg,
int stack_space,
MemOperand return_value_operand,
MemOperand* context_restore_operand) {
@@ -2327,7 +2314,25 @@ void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference::handle_scope_level_address(isolate()),
next_address);
- ASSERT(!thunk_last_arg.is(r3));
+ ASSERT(function_address.is(r1) || function_address.is(r2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag =
+ isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ mov(r9, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
+ ldrb(r9, MemOperand(r9, 0));
+ cmp(r9, Operand(0));
+ b(eq, &profiler_disabled);
+
+ // Additional parameter is the address of the actual callback.
+ mov(r3, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ Move(r3, function_address);
+ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
mov(r9, Operand(next_address));
@@ -2346,25 +2351,6 @@ void MacroAssembler::CallApiFunctionAndReturn(
PopSafepointRegisters();
}
- Label profiler_disabled;
- Label end_profiler_check;
- bool* is_profiling_flag =
- isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- mov(r3, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
- ldrb(r3, MemOperand(r3, 0));
- cmp(r3, Operand(0));
- b(eq, &profiler_disabled);
-
- // Additional parameter is the address of the actual callback.
- mov(thunk_last_arg, Operand(reinterpret_cast<int32_t>(function_address)));
- mov(r3, Operand(thunk_ref));
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- mov(r3, Operand(function));
- bind(&end_profiler_check);
-
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
@@ -2444,8 +2430,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -2718,12 +2703,10 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
GetBuiltinEntry(r2, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(r2));
- SetCallKind(r5, CALL_AS_METHOD);
Call(r2);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(r5, CALL_AS_METHOD);
Jump(r2);
}
}
@@ -2823,16 +2806,8 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) {
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -2844,25 +2819,24 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- mov(r0, Operand(p0));
- push(r0);
- mov(r0, Operand(Smi::FromInt(p1 - p0)));
+ mov(r0, Operand(Smi::FromInt(reason)));
push(r0);
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
- static const int kExpectedAbortInstructions = 10;
+ static const int kExpectedAbortInstructions = 7;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
@@ -3443,6 +3417,42 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ SmiTst(string);
+ Check(ne, kNonObject);
+
+ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+
+ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ cmp(ip, Operand(encoding_mask));
+ Check(eq, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ Label index_tag_ok, index_tag_bad;
+ TrySmiTag(index, index, &index_tag_bad);
+ b(&index_tag_ok);
+ bind(&index_tag_bad);
+ Abort(kIndexIsTooLarge);
+ bind(&index_tag_ok);
+
+ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
+ cmp(index, ip);
+ Check(lt, kIndexIsTooLarge);
+
+ cmp(index, Operand(Smi::FromInt(0)));
+ Check(ge, kIndexIsNegative);
+
+ SmiUntag(index, index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
@@ -3469,41 +3479,27 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
}
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
- if (use_eabi_hardfloat()) {
- Move(d0, dreg);
- } else {
- vmov(r0, r1, dreg);
+void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
+ ASSERT(src.is(d0));
+ if (!use_eabi_hardfloat()) {
+ vmov(r0, r1, src);
}
}
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
- DwVfpRegister dreg2) {
- if (use_eabi_hardfloat()) {
- if (dreg2.is(d0)) {
- ASSERT(!dreg1.is(d1));
- Move(d1, dreg2);
- Move(d0, dreg1);
- } else {
- Move(d0, dreg1);
- Move(d1, dreg2);
- }
- } else {
- vmov(r0, r1, dreg1);
- vmov(r2, r3, dreg2);
- }
+// On ARM this is just a synonym to make the purpose clear.
+void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
+ MovToFloatParameter(src);
}
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
- Register reg) {
- if (use_eabi_hardfloat()) {
- Move(d0, dreg);
- Move(r0, reg);
- } else {
- Move(r2, reg);
- vmov(r0, r1, dreg);
+void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
+ DwVfpRegister src2) {
+ ASSERT(src1.is(d0));
+ ASSERT(src2.is(d1));
+ if (!use_eabi_hardfloat()) {
+ vmov(r0, r1, src1);
+ vmov(r2, r3, src2);
}
}
@@ -3826,6 +3822,52 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
}
+void MacroAssembler::Throw(BailoutReason reason) {
+ Label throw_start;
+ bind(&throw_start);
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ mov(r0, Operand(Smi::FromInt(reason)));
+ push(r0);
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // will not return here
+ if (is_const_pool_blocked()) {
+ // If the calling code cares throw the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the ThrowMessage macro constant.
+ static const int kExpectedThrowMessageInstructions = 10;
+ int throw_instructions = InstructionsGeneratedSince(&throw_start);
+ ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
+ while (throw_instructions++ < kExpectedThrowMessageInstructions) {
+ nop();
+ }
+ }
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label L;
+ b(NegateCondition(cc), &L);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
@@ -3856,7 +3898,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
EnumLength(r3, r1);
- cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
+ cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
b(eq, call_runtime);
jmp(&start);
@@ -3873,10 +3915,16 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
// Check that there are no elements. Register r2 contains the current JS
// object we've reached through the prototype chain.
+ Label no_elements;
ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
cmp(r2, empty_fixed_array_value);
+ b(eq, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
b(ne, call_runtime);
+ bind(&no_elements);
ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
cmp(r2, null_value);
b(ne, &next);
@@ -3929,6 +3977,32 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
}
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // scratch contained elements pointer.
+ mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
+ b(eq, found);
+ ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ cmp(current, Operand(factory->null_value()));
+ b(ne, &loop_again);
+}
+
+
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 282a8052e4..7861d42aab 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -45,8 +45,8 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
// Give alias names to registers
-const Register pp = { kRegister_r7_Code }; // Constant pool pointer.
-const Register cp = { kRegister_r8_Code }; // JavaScript context pointer.
+const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
+const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
// Flags used for AllocateHeapNumber
@@ -161,6 +161,9 @@ class MacroAssembler: public Assembler {
void Move(Register dst, Register src, Condition cond = al);
void Move(DwVfpRegister dst, DwVfpRegister src);
+ void Load(Register dst, const MemOperand& src, Representation r);
+ void Store(Register src, const MemOperand& dst, Representation r);
+
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index,
@@ -386,7 +389,7 @@ class MacroAssembler: public Assembler {
}
} else {
Pop(src2, src3, cond);
- str(src1, MemOperand(sp, 4, PostIndex), cond);
+ ldr(src1, MemOperand(sp, 4, PostIndex), cond);
}
}
@@ -423,6 +426,12 @@ class MacroAssembler: public Assembler {
}
}
+ // Push a fixed frame, consisting of lr, fp, constant pool (if
+ // FLAG_enable_ool_constant_pool), context and JS function / marker id if
+ // marker_reg is a valid register.
+ void PushFixedFrame(Register marker_reg = no_reg);
+ void PopFixedFrame(Register marker_reg = no_reg);
+
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@@ -531,6 +540,9 @@ class MacroAssembler: public Assembler {
// Generates function and stub prologue code.
void Prologue(PrologueFrameMode frame_mode);
+ // Loads the constant pool pointer (pp) register.
+ void LoadConstantPoolPointerRegister();
+
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
@@ -582,40 +594,31 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Set up call kind marking in ecx. The method takes ecx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void IsObjectJSObjectType(Register heap_object,
Register map,
@@ -658,6 +661,12 @@ class MacroAssembler: public Assembler {
// handler chain.
void ThrowUncatchable(Register value);
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason);
+
// ---------------------------------------------------------------------------
// Inline caching support
@@ -836,11 +845,21 @@ class MacroAssembler: public Assembler {
// are the same register). It leaves the heap object in the heap_object
// register unless the heap_object register is the same register as one of the
// other registers.
+ // Type_reg can be no_reg. In that case ip is used.
void CompareObjectType(Register heap_object,
Register map,
Register type_reg,
InstanceType type);
+ // Compare object type for heap object. Branch to false_label if type
+ // is lower than min_type or greater than max_type.
+ // Load map into the register map.
+ void CheckObjectTypeRange(Register heap_object,
+ Register map,
+ InstanceType min_type,
+ InstanceType max_type,
+ Label* false_label);
+
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -1090,9 +1109,9 @@ class MacroAssembler: public Assembler {
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
- void SetCallCDoubleArguments(DwVfpRegister dreg);
- void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
- void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
+ void MovToFloatParameter(DwVfpRegister src);
+ void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
+ void MovToFloatResult(DwVfpRegister src);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -1108,16 +1127,15 @@ class MacroAssembler: public Assembler {
int num_reg_arguments,
int num_double_arguments);
- void GetCFunctionDoubleResult(const DwVfpRegister dst);
+ void MovFromFloatParameter(DwVfpRegister dst);
+ void MovFromFloatResult(DwVfpRegister dst);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function,
- Address function_address,
+ void CallApiFunctionAndReturn(Register function_address,
ExternalReference thunk_ref,
- Register thunk_last_arg,
int stack_space,
MemOperand return_value_operand,
MemOperand* context_restore_operand);
@@ -1172,8 +1190,6 @@ class MacroAssembler: public Assembler {
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
@@ -1340,6 +1356,11 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask);
+
// ---------------------------------------------------------------------------
// Patching helpers.
@@ -1370,7 +1391,8 @@ class MacroAssembler: public Assembler {
// Activation support.
void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
+ // Returns the pc offset at which the frame ends.
+ int LeaveFrame(StackFrame::Type type);
// Expects object in r0 and returns map with validated enum cache
// in r0. Assumes that any other register can be used as a scratch.
@@ -1396,6 +1418,10 @@ class MacroAssembler: public Assembler {
bind(&no_memento_found);
}
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
@@ -1411,8 +1437,7 @@ class MacroAssembler: public Assembler {
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InitializeNewString(Register string,
Register length,
@@ -1443,7 +1468,6 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
bool generating_stub_;
- bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 461d032b99..ac36687fca 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -25,9 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <stdarg.h>
#include <stdlib.h>
#include <cmath>
-#include <cstdarg>
+
#include "v8.h"
#if V8_TARGET_ARCH_ARM
@@ -845,6 +846,12 @@ class Redirection {
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
+ static void* ReverseRedirection(int32_t reg) {
+ Redirection* redirection = FromSwiInstruction(
+ reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+ return redirection->external_function();
+ }
+
private:
void* external_function_;
uint32_t swi_instruction_;
@@ -1688,12 +1695,12 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, void* arg1);
// This signature supports direct call to accessor getter callback.
typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
typedef void (*SimulatorRuntimeProfilingGetterCall)(
- int32_t arg0, int32_t arg1, int32_t arg2);
+ int32_t arg0, int32_t arg1, void* arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
@@ -1832,7 +1839,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
CHECK(stack_aligned);
SimulatorRuntimeProfilingApiCall target =
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
- target(arg0, arg1);
+ target(arg0, Redirection::ReverseRedirection(arg1));
} else if (
redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -1861,7 +1868,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
external);
- target(arg0, arg1, arg2);
+ target(arg0, arg1, Redirection::ReverseRedirection(arg2));
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
@@ -2733,7 +2740,11 @@ void Simulator::DecodeType3(Instruction* instr) {
int32_t rs_val = get_register(rs);
int32_t ret_val = 0;
ASSERT(rs_val != 0);
- ret_val = rm_val/rs_val;
+ if ((rm_val == kMinInt) && (rs_val == -1)) {
+ ret_val = kMinInt;
+ } else {
+ ret_val = rm_val / rs_val;
+ }
set_register(rn, ret_val);
return;
}
@@ -2905,7 +2916,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
double dm_value = get_double_from_d_register(vm);
- double dd_value = fabs(dm_value);
+ double dd_value = std::fabs(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
@@ -2934,7 +2945,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
double dm_value = get_double_from_d_register(vm);
- double dd_value = sqrt(dm_value);
+ double dd_value = std::sqrt(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if (instr->Opc3Value() == 0x0) {
@@ -3270,8 +3281,8 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
double abs_diff =
- unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
- : fabs(val - temp);
+ unsigned_integer ? std::fabs(val - static_cast<uint32_t>(temp))
+ : std::fabs(val - temp);
inexact_vfp_flag_ = (abs_diff != 0);
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index e392c5cb36..24d7fe58c4 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -207,6 +207,10 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
@@ -269,7 +273,7 @@ class Simulator {
// Checks if the current instruction should be executed based on its
// condition bits.
- bool ConditionallyExecute(Instruction* instr);
+ inline bool ConditionallyExecute(Instruction* instr);
// Helper functions to set the conditional flags in the architecture state.
void SetNZFlags(int32_t val);
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 923011fe08..3bc9554594 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -295,15 +295,20 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Register prototype,
Label* miss) {
Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ ldr(prototype,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ Move(ip, isolate->global_object());
- __ cmp(prototype, ip);
- __ b(ne, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ ldr(scratch, MemOperand(cp, offset));
+ __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ ldr(scratch, MemOperand(scratch, Context::SlotOffset(index)));
+ __ Move(ip, function);
+ __ cmp(ip, scratch);
+ __ b(ne, miss);
+
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -742,36 +747,6 @@ void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
}
-static void GenerateCallFunction(MacroAssembler* masm,
- Handle<Object> object,
- const ParameterCount& arguments,
- Label* miss,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- r0: receiver
- // -- r1: function to call
- // -----------------------------------
-
- // Check that the function really is a function.
- __ JumpIfSmi(r1, miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
@@ -798,377 +773,90 @@ static void CompileCallLoadPropertyWithInterceptor(
Register receiver,
Register holder,
Register name,
- Handle<JSObject> holder_obj) {
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ mov(r0, Operand(StubCache::kInterceptorArgsLength));
- __ mov(r1, Operand(ref));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
}
-static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
- Register scratch) {
- __ mov(scratch, Operand(Smi::FromInt(0)));
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(scratch);
+// Generate call to api function.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
+ ASSERT(!receiver.is(scratch_in));
+ __ push(receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch_in.is(arg));
+ __ push(arg);
}
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
- __ Drop(kFastApiCallArguments);
-}
+ ASSERT(optimization.is_simple_api_call());
+ // Abi for CallApiFunctionStub.
+ Register callee = r0;
+ Register call_data = r4;
+ Register holder = r2;
+ Register api_function_address = r1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ Move(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
-static void GenerateFastApiDirectCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context) {
- // ----------- S t a t e -------------
- // -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
- // : holder (set by CheckPrototypes)
- // -- sp[28] : last JS argument
- // -- ...
- // -- sp[(argc + 6) * 4] : first JS argument
- // -- sp[(argc + 7) * 4] : receiver
- // -----------------------------------
- typedef FunctionCallbackArguments FCA;
- // Save calling context.
- __ str(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
- // Get the function and setup the context.
+ Isolate* isolate = masm->isolate();
Handle<JSFunction> function = optimization.constant_function();
- __ Move(r5, function);
- __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
- __ str(r5, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
-
- // Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ Move(r0, api_call_info);
- __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ Move(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ Move(call_data, api_call_info);
+ __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
} else {
- __ Move(r6, call_data);
+ __ Move(call_data, call_data_obj);
}
- // Store call data.
- __ str(r6, MemOperand(sp, FCA::kDataIndex * kPointerSize));
- // Store isolate.
- __ mov(r5, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ str(r5, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
- // Store ReturnValue default and ReturnValue.
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ str(r5, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
- __ str(r5, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
-
- // Prepare arguments.
- __ mov(r2, sp);
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // r0 = FunctionCallbackInfo&
- // Arguments is after the return address.
- __ add(r0, sp, Operand(1 * kPointerSize));
- // FunctionCallbackInfo::implicit_args_
- __ str(r2, MemOperand(r0, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ add(ip, r2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
- __ str(ip, MemOperand(r0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ mov(ip, Operand(argc));
- __ str(ip, MemOperand(r0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call = 0
- __ mov(ip, Operand::Zero());
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+
+ // Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
ExternalReference ref = ExternalReference(&fun,
type,
masm->isolate());
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- masm->isolate());
-
- AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- MemOperand return_value_operand(fp,
- (2 + FCA::kReturnValueOffset) * kPointerSize);
-
- __ CallApiFunctionAndReturn(ref,
- function_address,
- thunk_ref,
- r1,
- kStackUnwindSpace,
- return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
-}
-
-
-// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch,
- int argc,
- Register* values) {
- ASSERT(optimization.is_simple_api_call());
- ASSERT(!receiver.is(scratch));
-
- typedef FunctionCallbackArguments FCA;
- const int stack_space = kFastApiCallArguments + argc + 1;
- // Assign stack space for the call arguments.
- __ sub(sp, sp, Operand(stack_space * kPointerSize));
- // Write holder to stack frame.
- __ str(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- // Write receiver to stack frame.
- int index = stack_space - 1;
- __ str(receiver, MemOperand(sp, index * kPointerSize));
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- ASSERT(!receiver.is(values[i]));
- ASSERT(!scratch.is(values[i]));
- __ str(receiver, MemOperand(sp, index-- * kPointerSize));
- }
-
- GenerateFastApiDirectCall(masm, optimization, argc, true);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- Code::ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<Name> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
- Counters* counters = masm->isolate()->counters();
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- __ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
- scratch1, scratch2);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(
- masm, optimization, arguments_.immediate(), false);
- } else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- Handle<JSFunction> function = optimization.constant_function();
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
- }
+ __ mov(api_function_address, Operand(ref));
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm);
- __ b(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<Name> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
-
- // Call a runtime function to load the interceptor property.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- StubCache::kInterceptorArgsLength);
- // Restore the name_ register.
- __ pop(name_);
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Register scratch,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(holder, name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- }
- // If interceptor returns no-result sentinel, call the constant function.
- __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch);
- __ b(ne, interceptor_succeeded);
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- Code::ExtraICState extra_ic_state_;
-};
-
-
-void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<JSGlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
+ // Jump to stub.
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
}
@@ -1181,21 +869,20 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
Handle<Name> name,
- int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ mov(scratch1, Operand(Handle<Map>(object->map())));
+ __ mov(scratch1, Operand(receiver_map));
- Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1205,30 +892,31 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
- typedef FunctionCallbackArguments FCA;
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
if (!name->IsUniqueName()) {
ASSERT(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
+ ASSERT(current.is_null() ||
+ current->property_dictionary()->FindEntry(*name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -1239,8 +927,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Register map_reg = scratch1;
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- Handle<Map> current_map(current->map());
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
// CheckMap implicitly loads the map of |reg| into |map_reg|.
__ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
} else {
@@ -1250,9 +937,14 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
- if (current->IsJSGlobalProxy()) {
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
}
+
reg = holder_reg; // From now on the object will be in holder_reg.
if (heap()->InNewSpace(*prototype)) {
@@ -1265,71 +957,62 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
}
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- }
-
// Go to the next object in the prototype chain.
current = prototype;
+ current_map = handle(current->map());
}
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
- DONT_DO_SMI_CHECK);
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
// Return the register containing the holder.
return reg;
}
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ b(success);
+ Label success;
+ __ b(&success);
__ bind(miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ b(success);
+ Label success;
+ __ b(&success);
GenerateRestoreName(masm(), miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
Register LoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
- Label* success,
Handle<Object> callback) {
Label miss;
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
ASSERT(!reg.is(scratch2()));
@@ -1362,7 +1045,7 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
__ b(ne, &miss);
}
- HandlerFrontendFooter(name, success, &miss);
+ HandlerFrontendFooter(name, &miss);
return reg;
}
@@ -1394,13 +1077,6 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
@@ -1432,43 +1108,24 @@ void LoadStubCompiler::GenerateLoadCallback(
__ Push(scratch4(), reg);
__ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_
__ push(name());
- __ mov(r0, sp); // r0 = Handle<Name>
- const int kApiStackSpace = 1;
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
+ // Abi for CallApiGetter
+ Register getter_address_reg = r2;
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object** args_) as the data.
- __ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
- __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
-
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
-
ApiFunction fun(getter_address);
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ mov(getter_address_reg, Operand(ref));
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
- ExternalReference::Type thunk_type =
- ExternalReference::PROFILING_GETTER_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- isolate());
- __ CallApiFunctionAndReturn(ref,
- getter_address,
- thunk_ref,
- r2,
- kStackUnwindSpace,
- MemOperand(fp, 6 * kPointerSize),
- NULL);
+ CallApiGetterStub stub;
+ __ TailCallStub(&stub);
}
void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
- Handle<JSObject> object,
+ Handle<Object> object,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Handle<Name> name) {
@@ -1517,11 +1174,10 @@ void LoadStubCompiler::GenerateLoadInterceptor(
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
+
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
@@ -1555,1228 +1211,16 @@ void LoadStubCompiler::GenerateLoadInterceptor(
}
-void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(r2, Operand(name));
- __ b(ne, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(r0, miss);
- CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ mov(r3, Operand(cell));
- __ ldr(r1, FieldMemOperand(r3, Cell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(r1, miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ cmp(r4, r3);
- } else {
- __ cmp(r1, Operand(function));
- }
- __ b(ne, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- const int argc = arguments().immediate();
-
- // Get the receiver of the function from the stack into r0.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r0, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
- GenerateFastPropertyLoad(masm(), r1, reg, index.is_inobject(holder),
- index.translate(holder), Representation::Tagged());
-
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- Label miss;
-
- // Check that function is still array
- const int argc = arguments().immediate();
- GenerateNameCheck(name, &miss);
- Register receiver = r1;
-
- if (cell.is_null()) {
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0,
- r4, name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
- Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
- __ mov(r0, Operand(argc));
- __ mov(r2, Operand(site_feedback_cell));
- __ mov(r1, Operand(function));
-
- ArrayConstructorStub stub(isolate());
- __ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- Register receiver = r1;
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
- name, &miss);
-
- if (argc == 0) {
- // Nothing to do, just return the length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Drop(argc + 1);
- __ Ret();
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- Register elements = r6;
- Register end_elements = r5;
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedArrayMapRootIndex,
- &check_double,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r0 and calculate new length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ add(r0, r0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(r0, r4);
- __ b(gt, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ JumpIfNotSmi(r4, &with_write_barrier);
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
- const int kEndElementsOffset =
- FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&check_double);
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedDoubleArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r0 and calculate new length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ add(r0, r0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(r0, r4);
- __ b(gt, &call_builtin);
-
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(r4, r0, elements, r5, d0,
- &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&with_write_barrier);
-
- __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(r3, r9, &not_fast_object);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(r3, r9, &call_builtin);
-
- __ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r9, ip);
- __ b(eq, &call_builtin);
- // edx: receiver
- // r3: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- r3,
- r9,
- &try_holey_map);
- __ mov(r2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- r3,
- r9,
- &call_builtin);
- __ mov(r2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(r3, r3, &call_builtin);
- }
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
- __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- __ RecordWrite(elements,
- end_elements,
- r4,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&attempt_to_grow_elements);
- // r0: array's length + 1.
-
- if (!FLAG_inline_new) {
- __ b(&call_builtin);
- }
-
- __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(r2, &no_fast_elements_check);
- __ ldr(r9, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(r9, r9, &call_builtin);
- __ bind(&no_fast_elements_check);
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top and check if it is the end of elements.
- __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
- __ add(end_elements, end_elements, Operand(kEndElementsOffset));
- __ mov(r4, Operand(new_space_allocation_top));
- __ ldr(r3, MemOperand(r4));
- __ cmp(end_elements, r3);
- __ b(ne, &call_builtin);
-
- __ mov(r9, Operand(new_space_allocation_limit));
- __ ldr(r9, MemOperand(r9));
- __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
- __ cmp(r3, r9);
- __ b(hi, &call_builtin);
-
- // We fit and could grow elements.
- // Update new_space_allocation_top.
- __ str(r3, MemOperand(r4));
- // Push the argument.
- __ str(r2, MemOperand(end_elements));
- // Fill the rest with holes.
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ str(r3, MemOperand(end_elements, i * kPointerSize));
- }
-
- // Update elements' and array's sizes.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
- __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Elements are in new space, so write barrier is not required.
- __ Drop(argc + 1);
- __ Ret();
- }
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
- }
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss, return_undefined, call_builtin;
- Register receiver = r1;
- Register elements = r3;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
- r4, r0, name, &miss);
-
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r4 and calculate new length.
- __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ sub(r4, r4, Operand(Smi::FromInt(1)), SetCC);
- __ b(lt, &return_undefined);
-
- // Get the last element.
- __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
- // We can't address the last element in one operation. Compute the more
- // expensive shift first, and use an offset later on.
- __ add(elements, elements, Operand::PointerOffsetFromSmiKey(r4));
- __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ cmp(r0, r6);
- __ b(eq, &call_builtin);
-
- // Set the array's length.
- __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Fill with the hole.
- __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&return_undefined);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r1, r3, r4, name, &miss);
-
- Register receiver = r1;
- Register index = r4;
- Register result = r0;
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in r2.
- __ Move(r2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r1, r3, r4, name, &miss);
-
- Register receiver = r0;
- Register index = r4;
- Register scratch = r3;
- Register result = r0;
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kempty_stringRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in r2.
- __ Move(r2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- __ JumpIfSmi(r1, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = r1;
- __ ldr(code, MemOperand(sp, 0 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ and_(code, code, Operand(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, r0);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss, slow;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into r0.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
-
- // If the argument is a smi, just return.
- __ SmiTst(r0);
- __ Drop(argc + 1, eq);
- __ Ret(eq);
-
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
-
- Label smi_check, just_return;
-
- // Load the HeapNumber value.
- // We will need access to the value in the core registers, so we load it
- // with ldrd and move it to the fpu. It also spares a sub instruction for
- // updating the HeapNumber value address, as vldr expects a multiple
- // of 4 offset.
- __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(d1, r4, r5);
-
- // Check for NaN, Infinities and -0.
- // They are invariant through a Math.Floor call, so just
- // return the original argument.
- __ Sbfx(r3, r5, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- __ cmp(r3, Operand(-1));
- __ b(eq, &just_return);
- __ eor(r3, r5, Operand(0x80000000u));
- __ orr(r3, r3, r4, SetCC);
- __ b(eq, &just_return);
- // Test for values that can be exactly represented as a
- // signed 32-bit integer.
- __ TryDoubleToInt32Exact(r0, d1, d2);
- // If exact, check smi
- __ b(eq, &smi_check);
- __ cmp(r5, Operand(0));
-
- // If input is in ]+0, +inf[, the cmp has cleared overflow and negative
- // (V=0 and N=0), the two following instructions won't execute and
- // we fall through smi_check to check if the result can fit into a smi.
-
- // If input is in ]-inf, -0[, sub one and, go to slow if we have
- // an overflow. Else we fall through smi check.
- // Hint: if x is a negative, non integer number,
- // floor(x) <=> round_to_zero(x) - 1.
- __ sub(r0, r0, Operand(1), SetCC, mi);
- __ b(vs, &slow);
-
- __ bind(&smi_check);
- // Check if the result can fit into an smi. If we had an overflow,
- // the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi.
- // If result doesn't fit into an smi, branch to slow.
- __ SmiTag(r0, SetCC);
- __ b(vs, &slow);
-
- __ bind(&just_return);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&slow);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into r0.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- __ JumpIfNotSmi(r0, &not_smi);
-
- // Do bitwise not or do nothing depending on the sign of the
- // argument.
- __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
-
- // Add 1 or do nothing depending on the sign of the argument.
- __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ b(mi, &slow);
-
- // Smi case done.
- __ Drop(argc + 1);
- __ Ret();
-
- // Check if the argument is a heap number and load its exponent and
- // sign.
- __ bind(&not_smi);
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ tst(r1, Operand(HeapNumber::kSignMask));
- __ b(ne, &negative_sign);
- __ Drop(argc + 1);
- __ Ret();
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ eor(r1, r1, Operand(HeapNumber::kSignMask));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r0, r4, r5, r6, &slow);
- __ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ Drop(argc + 1);
- __ Ret();
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- Counters* counters = isolate()->counters();
-
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r1, &miss_before_stack_reserved);
-
- __ IncrementCounter(counters->call_const(), 1, r0, r3);
- __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
-
- ReserveSpaceForFastApiCall(masm(), r0);
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
- depth, &miss);
-
- GenerateFastApiDirectCall(masm(), optimization, argc, false);
-
- __ bind(&miss);
- FreeSpaceForFastApiCall(masm());
-
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(r1, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(isolate()->counters()->call_const(), 1, r0, r3);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
- break;
-
- case STRING_CHECK:
- // Check that the object is a string.
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
- break;
-
- case SYMBOL_CHECK:
- // Check that the object is a symbol.
- __ CompareObjectType(r1, r1, r3, SYMBOL_TYPE);
- __ b(ne, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
- break;
-
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(r1, &fast);
- __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
- break;
- }
- case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &fast);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
- break;
- }
- }
-
- __ b(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Handle<JSFunction> function) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<Cell>::null(),
- function, Handle<String>::cast(name),
- Code::CONSTANT);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
Label success;
-
- CompileHandlerFrontend(object, holder, name, check, &success);
+ // Check that the object is a boolean.
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(object, ip);
+ __ b(eq, &success);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(object, ip);
+ __ b(ne, miss);
__ bind(&success);
- CompileHandlerBackend(function);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
- compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
- &miss);
-
- // Move returned value, the function to call, to r1.
- __ mov(r1, r0);
- // Restore receiver.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function,
- Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name),
- Code::NORMAL);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Set up the context (function already in r1).
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
}
@@ -2785,14 +1229,14 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
// Stub never generated for non-global objects that require access checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ push(receiver()); // receiver
+ __ push(holder_reg);
__ mov(ip, Operand(callback)); // callback info
__ push(ip);
__ mov(ip, Operand(name));
@@ -2801,28 +1245,10 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 1, values);
+ __ TailCallExternalReference(store_callback_property, 5, 1);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2832,6 +1258,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
+ Handle<HeapType> type,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- r0 : value
@@ -2841,17 +1268,25 @@ void StoreStubCompiler::GenerateStoreViaSetter(
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ Register receiver = r1;
+ Register value = r0;
// Save value register, so we can restore it later.
- __ push(r0);
+ __ push(value);
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
- __ Push(r1, r0);
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver, value);
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2875,57 +1310,30 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ Push(receiver(), this->name(), value());
- __ mov(scratch1(), Operand(Smi::FromInt(strict_mode())));
- __ push(scratch1()); // strict mode
-
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ TailCallExternalReference(store_ic_property, 3, 1);
// Return the generated code.
- return GetCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Handle<JSGlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
- __ bind(&success);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ Ret();
// Return the generated code.
- return GetCode(kind(), Code::NONEXISTENT, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2957,27 +1365,12 @@ Register* KeyedStoreStubCompiler::registers() {
}
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ cmp(name_reg, Operand(name));
- __ b(ne, miss);
-}
-
-
-void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ cmp(name_reg, Operand(name));
- __ b(ne, miss);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
@@ -2990,11 +1383,17 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
__ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -3013,17 +1412,13 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
- Label success, miss;
-
- __ CheckMap(
- receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
+ Label miss;
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(cell));
@@ -3036,43 +1431,52 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ b(eq, &miss);
}
- HandlerFrontendFooter(name, &success, &miss);
- __ bind(&success);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
__ mov(r0, r4);
__ Ret();
+ HandlerFrontendFooter(name, &miss);
+
// Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), Code::NORMAL, name);
}
Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
+ TypeHandleList* types,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
IcCheckType check) {
Label miss;
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ cmp(this->name(), Operand(name));
+ __ b(ne, &miss);
}
- __ JumpIfSmi(receiver(), &miss);
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
Register map_reg = scratch1();
- int receiver_count = receiver_maps->length();
+ int receiver_count = types->length();
int number_of_handled_maps = 0;
__ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = receiver_maps->at(current);
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
- __ mov(ip, Operand(receiver_maps->at(current)));
+ __ mov(ip, Operand(map));
__ cmp(map_reg, ip);
+ if (type->Is(HeapType::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ bind(&number_case);
+ }
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
}
}
@@ -3131,12 +1535,12 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- Label slow, miss_force_generic;
+ Label slow, miss;
Register key = r0;
Register receiver = r1;
- __ UntagAndJumpIfNotSmi(r2, key, &miss_force_generic);
+ __ UntagAndJumpIfNotSmi(r2, key, &miss);
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
__ Ret();
@@ -3154,14 +1558,14 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
// Miss case, call the runtime.
- __ bind(&miss_force_generic);
+ __ bind(&miss);
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
diff --git a/deps/v8/src/array-iterator.js b/deps/v8/src/array-iterator.js
index e734986840..a8c5e001c4 100644
--- a/deps/v8/src/array-iterator.js
+++ b/deps/v8/src/array-iterator.js
@@ -36,9 +36,9 @@ var ARRAY_ITERATOR_KIND_VALUES = 2;
var ARRAY_ITERATOR_KIND_ENTRIES = 3;
// The spec draft also has "sparse" but it is never used.
-var iteratorObjectSymbol = %CreateSymbol(UNDEFINED);
-var arrayIteratorNextIndexSymbol = %CreateSymbol(UNDEFINED);
-var arrayIterationKindSymbol = %CreateSymbol(UNDEFINED);
+var iteratorObjectSymbol = NEW_PRIVATE("iterator_object");
+var arrayIteratorNextIndexSymbol = NEW_PRIVATE("iterator_next");
+var arrayIterationKindSymbol = NEW_PRIVATE("iterator_kind");
function ArrayIterator() {}
@@ -46,9 +46,9 @@ function ArrayIterator() {}
function CreateArrayIterator(array, kind) {
var object = ToObject(array);
var iterator = new ArrayIterator;
- iterator[iteratorObjectSymbol] = object;
- iterator[arrayIteratorNextIndexSymbol] = 0;
- iterator[arrayIterationKindSymbol] = kind;
+ SET_PRIVATE(iterator, iteratorObjectSymbol, object);
+ SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, 0);
+ SET_PRIVATE(iterator, arrayIterationKindSymbol, kind);
return iterator;
}
@@ -60,24 +60,24 @@ function CreateIteratorResultObject(value, done) {
// 15.4.5.2.2 ArrayIterator.prototype.next( )
function ArrayIteratorNext() {
var iterator = ToObject(this);
- var array = iterator[iteratorObjectSymbol];
+ var array = GET_PRIVATE(iterator, iteratorObjectSymbol);
if (!array) {
throw MakeTypeError('incompatible_method_receiver',
['Array Iterator.prototype.next']);
}
- var index = iterator[arrayIteratorNextIndexSymbol];
- var itemKind = iterator[arrayIterationKindSymbol];
+ var index = GET_PRIVATE(iterator, arrayIteratorNextIndexSymbol);
+ var itemKind = GET_PRIVATE(iterator, arrayIterationKindSymbol);
var length = TO_UINT32(array.length);
// "sparse" is never used.
if (index >= length) {
- iterator[arrayIteratorNextIndexSymbol] = 1 / 0; // Infinity
+ SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, INFINITY);
return CreateIteratorResultObject(UNDEFINED, true);
}
- iterator[arrayIteratorNextIndexSymbol] = index + 1;
+ SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, index + 1);
if (itemKind == ARRAY_ITERATOR_KIND_VALUES)
return CreateIteratorResultObject(array[index], false);
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index e98d7f5b53..372b7ece63 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -376,10 +376,7 @@ function ArrayToLocaleString() {
function ArrayJoin(separator) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.join"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
var length = TO_UINT32(this.length);
if (IS_UNDEFINED(separator)) {
@@ -414,10 +411,7 @@ function ObservedArrayPop(n) {
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPop() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.pop"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.pop");
var n = TO_UINT32(this.length);
if (n == 0) {
@@ -425,6 +419,11 @@ function ArrayPop() {
return;
}
+ if ($Object.isSealed(this)) {
+ throw MakeTypeError("array_functions_change_sealed",
+ ["Array.prototype.pop"]);
+ }
+
if (%IsObserved(this))
return ObservedArrayPop.call(this, n);
@@ -457,16 +456,18 @@ function ObservedArrayPush() {
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.push");
+
+ var n = TO_UINT32(this.length);
+ var m = %_ArgumentsLength();
+ if (m > 0 && $Object.isSealed(this)) {
+ throw MakeTypeError("array_functions_change_sealed",
["Array.prototype.push"]);
}
if (%IsObserved(this))
return ObservedArrayPush.apply(this, arguments);
- var n = TO_UINT32(this.length);
- var m = %_ArgumentsLength();
for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
}
@@ -479,10 +480,7 @@ function ArrayPush() {
// by the array elements of each argument in order. See ECMA-262,
// section 15.4.4.7.
function ArrayConcat(arg1) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.concat"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.concat");
var array = ToObject(this);
var arg_count = %_ArgumentsLength();
@@ -541,10 +539,7 @@ function SparseReverse(array, len) {
function ArrayReverse() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.reverse"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
var j = TO_UINT32(this.length) - 1;
@@ -592,10 +587,7 @@ function ObservedArrayShift(len) {
}
function ArrayShift() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.shift"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift");
var len = TO_UINT32(this.length);
@@ -604,6 +596,11 @@ function ArrayShift() {
return;
}
+ if ($Object.isSealed(this)) {
+ throw MakeTypeError("array_functions_change_sealed",
+ ["Array.prototype.shift"]);
+ }
+
if (%IsObserved(this))
return ObservedArrayShift.call(this, len);
@@ -640,20 +637,34 @@ function ObservedArrayUnshift() {
}
function ArrayUnshift(arg1) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.unshift");
+
+ var len = TO_UINT32(this.length);
+ var num_arguments = %_ArgumentsLength();
+ var is_sealed = $Object.isSealed(this);
+
+ if (num_arguments > 0 && is_sealed) {
+ throw MakeTypeError("array_functions_change_sealed",
["Array.prototype.unshift"]);
}
if (%IsObserved(this))
return ObservedArrayUnshift.apply(this, arguments);
- var len = TO_UINT32(this.length);
- var num_arguments = %_ArgumentsLength();
-
- if (IS_ARRAY(this)) {
+ if (IS_ARRAY(this) && !is_sealed) {
SmartMove(this, 0, 0, len, num_arguments);
} else {
+ if (num_arguments == 0 && $Object.isFrozen(this)) {
+ // In the zero argument case, values from the prototype come into the
+ // object. This can't be allowed on frozen arrays.
+ for (var i = 0; i < len; i++) {
+ if (!this.hasOwnProperty(i) && !IS_UNDEFINED(this[i])) {
+ throw MakeTypeError("array_functions_on_frozen",
+ ["Array.prototype.shift"]);
+ }
+ }
+ }
+
SimpleMove(this, 0, 0, len, num_arguments);
}
@@ -663,15 +674,12 @@ function ArrayUnshift(arg1) { // length == 1
this.length = len + num_arguments;
- return len + num_arguments;
+ return this.length;
}
function ArraySlice(start, end) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.slice"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.slice");
var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start);
@@ -785,10 +793,7 @@ function ObservedArraySplice(start, delete_count) {
function ArraySplice(start, delete_count) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.splice"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.splice");
if (%IsObserved(this))
return ObservedArraySplice.apply(this, arguments);
@@ -802,6 +807,14 @@ function ArraySplice(start, delete_count) {
deleted_elements.length = del_count;
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
+ if (del_count != num_elements_to_add && $Object.isSealed(this)) {
+ throw MakeTypeError("array_functions_change_sealed",
+ ["Array.prototype.splice"]);
+ } else if (del_count > 0 && $Object.isFrozen(this)) {
+ throw MakeTypeError("array_functions_on_frozen",
+ ["Array.prototype.splice"]);
+ }
+
var use_simple_splice = true;
if (IS_ARRAY(this) &&
num_elements_to_add !== del_count) {
@@ -838,10 +851,7 @@ function ArraySplice(start, delete_count) {
function ArraySort(comparefn) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.sort"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
// In-place QuickSort algorithm.
// For short (length <= 22) arrays, insertion sort is used for efficiency.
@@ -1131,10 +1141,7 @@ function ArraySort(comparefn) {
// preserving the semantics, since the calls to the receiver function can add
// or delete elements from the array.
function ArrayFilter(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.filter"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.filter");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
@@ -1182,10 +1189,7 @@ function ArrayFilter(f, receiver) {
function ArrayForEach(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.forEach"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.forEach");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
@@ -1226,10 +1230,7 @@ function ArrayForEach(f, receiver) {
// Executes the function once for each element present in the
// array until it finds one where callback returns true.
function ArraySome(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.some"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.some");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
@@ -1269,10 +1270,7 @@ function ArraySome(f, receiver) {
function ArrayEvery(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.every"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.every");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
@@ -1311,10 +1309,7 @@ function ArrayEvery(f, receiver) {
}
function ArrayMap(f, receiver) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.map"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
@@ -1357,10 +1352,7 @@ function ArrayMap(f, receiver) {
function ArrayIndexOf(element, index) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.indexOf"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.indexOf");
var length = TO_UINT32(this.length);
if (length == 0) return -1;
@@ -1416,10 +1408,7 @@ function ArrayIndexOf(element, index) {
function ArrayLastIndexOf(element, index) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.lastIndexOf"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
var length = TO_UINT32(this.length);
if (length == 0) return -1;
@@ -1471,10 +1460,7 @@ function ArrayLastIndexOf(element, index) {
function ArrayReduce(callback, current) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.reduce"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduce");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
@@ -1524,10 +1510,7 @@ function ArrayReduce(callback, current) {
}
function ArrayReduceRight(callback, current) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.reduceRight"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduceRight");
// Pull out the length so that side effects are visible before the
// callback function is checked.
diff --git a/deps/v8/src/arraybuffer.js b/deps/v8/src/arraybuffer.js
index c5c98dbe4b..6125f0f61c 100644
--- a/deps/v8/src/arraybuffer.js
+++ b/deps/v8/src/arraybuffer.js
@@ -107,5 +107,3 @@ function SetUpArrayBuffer() {
}
SetUpArrayBuffer();
-
-
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 9ed43601c5..4b4c3d4daf 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -59,6 +59,8 @@
#include "ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/assembler-a64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
@@ -73,6 +75,8 @@
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/regexp-macro-assembler-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -122,7 +126,6 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
-
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -306,7 +309,9 @@ int Label::pos() const {
// dropped, and last non-zero chunk tagged with 1.)
+#ifdef DEBUG
const int kMaxStandardNonCompactModes = 14;
+#endif
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
@@ -764,8 +769,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "embedded object";
case RelocInfo::CONSTRUCT_CALL:
return "code target (js construct call)";
- case RelocInfo::CODE_TARGET_CONTEXT:
- return "code target (context)";
case RelocInfo::DEBUG_BREAK:
#ifndef ENABLE_DEBUGGER_SUPPORT
UNREACHABLE();
@@ -819,14 +822,14 @@ void RelocInfo::Print(Isolate* isolate, FILE* out) {
} else if (rmode_ == EXTERNAL_REFERENCE) {
ExternalReferenceEncoder ref_encoder(isolate);
PrintF(out, " (%s) (%p)",
- ref_encoder.NameOfAddress(*target_reference_address()),
- *target_reference_address());
+ ref_encoder.NameOfAddress(target_reference()),
+ target_reference());
} else if (IsCodeTarget(rmode_)) {
Code* code = Code::GetCodeFromTargetAddress(target_address());
PrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()),
target_address());
if (rmode_ == CODE_TARGET_WITH_ID) {
- PrintF(" (id=%d)", static_cast<int>(data_));
+ PrintF(out, " (id=%d)", static_cast<int>(data_));
}
} else if (IsPosition(rmode_)) {
PrintF(out, " (%" V8_PTR_PREFIX "d)", data());
@@ -860,7 +863,6 @@ void RelocInfo::Verify() {
break;
#endif
case CONSTRUCT_CALL:
- case CODE_TARGET_CONTEXT:
case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
// convert inline target address to code object
@@ -936,7 +938,7 @@ void ExternalReference::InitializeMathExpData() {
// The rest is black magic. Do not attempt to understand it. It is
// loosely based on the "expd" function published at:
// http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
- const double constant3 = (1 << kTableSizeBits) / log(2.0);
+ const double constant3 = (1 << kTableSizeBits) / std::log(2.0);
math_exp_constants_array[3] = constant3;
math_exp_constants_array[4] =
static_cast<double>(static_cast<int64_t>(3) << 51);
@@ -947,7 +949,7 @@ void ExternalReference::InitializeMathExpData() {
math_exp_log_table_array = new double[kTableSize];
for (int i = 0; i < kTableSize; i++) {
- double value = pow(2, i / kTableSizeDouble);
+ double value = std::pow(2, i / kTableSizeDouble);
uint64_t bits = BitCast<uint64_t, double>(value);
bits &= (static_cast<uint64_t>(1) << 52) - 1;
double mantissa = BitCast<double, uint64_t>(bits);
@@ -1053,14 +1055,6 @@ ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
}
-ExternalReference ExternalReference::fill_heap_number_with_random_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(V8::FillHeapNumberWithRandom)));
-}
-
-
ExternalReference ExternalReference::delete_handle_scope_extensions(
Isolate* isolate) {
return ExternalReference(Redirect(
@@ -1069,12 +1063,6 @@ ExternalReference ExternalReference::delete_handle_scope_extensions(
}
-ExternalReference ExternalReference::random_uint32_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(V8::Random)));
-}
-
-
ExternalReference ExternalReference::get_date_field_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
@@ -1105,13 +1093,6 @@ ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
}
-ExternalReference ExternalReference::transcendental_cache_array_address(
- Isolate* isolate) {
- return ExternalReference(
- isolate->transcendental_cache()->cache_array_address());
-}
-
-
ExternalReference ExternalReference::new_deoptimizer_function(
Isolate* isolate) {
return ExternalReference(
@@ -1343,14 +1324,6 @@ ExternalReference ExternalReference::address_of_the_hole_nan() {
}
-ExternalReference ExternalReference::record_object_allocation_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate,
- FUNCTION_ADDR(HeapProfiler::RecordObjectAllocationFromMasm)));
-}
-
-
ExternalReference ExternalReference::address_of_uint32_bias() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.uint32_bias));
@@ -1366,6 +1339,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
+#elif V8_TARGET_ARCH_A64
+ function = FUNCTION_ADDR(RegExpMacroAssemblerA64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
@@ -1415,79 +1390,11 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
#endif // V8_INTERPRETED_REGEXP
-static double add_two_doubles(double x, double y) {
- return x + y;
-}
-
-
-static double sub_two_doubles(double x, double y) {
- return x - y;
-}
-
-
-static double mul_two_doubles(double x, double y) {
- return x * y;
-}
-
-
-static double div_two_doubles(double x, double y) {
- return x / y;
-}
-
-
-static double mod_two_doubles(double x, double y) {
- return modulo(x, y);
-}
-
-
-static double math_sin_double(double x) {
- return sin(x);
-}
-
-
-static double math_cos_double(double x) {
- return cos(x);
-}
-
-
-static double math_tan_double(double x) {
- return tan(x);
-}
-
-
-static double math_log_double(double x) {
- return log(x);
-}
-
-
-ExternalReference ExternalReference::math_sin_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_sin_double),
- BUILTIN_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::math_cos_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_cos_double),
- BUILTIN_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::math_tan_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_tan_double),
- BUILTIN_FP_CALL));
-}
-
-
ExternalReference ExternalReference::math_log_double_function(
Isolate* isolate) {
+ typedef double (*d2d)(double x);
return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(math_log_double),
+ FUNCTION_ADDR(static_cast<d2d>(std::log)),
BUILTIN_FP_CALL));
}
@@ -1558,12 +1465,16 @@ double power_double_double(double x, double y) {
// special cases that are different.
if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) {
double f;
- if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
+ if (std::modf(y, &f) != 0.0) {
+ return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
+ }
}
if (x == 2.0) {
int y_int = static_cast<int>(y);
- if (y == y_int) return ldexp(1.0, y_int);
+ if (y == y_int) {
+ return std::ldexp(1.0, y_int);
+ }
}
#endif
@@ -1572,7 +1483,7 @@ double power_double_double(double x, double y) {
if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
return OS::nan_value();
}
- return pow(x, y);
+ return std::pow(x, y);
}
@@ -1592,12 +1503,6 @@ ExternalReference ExternalReference::power_double_int_function(
}
-static int native_compare_doubles(double y, double x) {
- if (x == y) return EQUAL;
- return x < y ? LESS : GREATER;
-}
-
-
bool EvalComparison(Token::Value op, double op1, double op2) {
ASSERT(Token::IsCompareOp(op));
switch (op) {
@@ -1615,42 +1520,14 @@ bool EvalComparison(Token::Value op, double op1, double op2) {
}
-ExternalReference ExternalReference::double_fp_operation(
- Token::Value operation, Isolate* isolate) {
- typedef double BinaryFPOperation(double x, double y);
- BinaryFPOperation* function = NULL;
- switch (operation) {
- case Token::ADD:
- function = &add_two_doubles;
- break;
- case Token::SUB:
- function = &sub_two_doubles;
- break;
- case Token::MUL:
- function = &mul_two_doubles;
- break;
- case Token::DIV:
- function = &div_two_doubles;
- break;
- case Token::MOD:
- function = &mod_two_doubles;
- break;
- default:
- UNREACHABLE();
- }
+ExternalReference ExternalReference::mod_two_doubles_operation(
+ Isolate* isolate) {
return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(function),
+ FUNCTION_ADDR(modulo),
BUILTIN_FP_FP_CALL));
}
-ExternalReference ExternalReference::compare_doubles(Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(native_compare_doubles),
- BUILTIN_COMPARE_CALL));
-}
-
-
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference ExternalReference::debug_break(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index f0b7fed909..89b0e5a622 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -262,7 +262,6 @@ class RelocInfo BASE_EMBEDDED {
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
- CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores.
DEBUG_BREAK, // Code target for the debugger statement.
EMBEDDED_OBJECT,
CELL,
@@ -372,6 +371,9 @@ class RelocInfo BASE_EMBEDDED {
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
double data64() const { return data64_; }
+ uint64_t raw_data64() {
+ return BitCast<uint64_t>(data64_);
+ }
Code* host() const { return host_; }
// Apply a relocation by delta bytes
@@ -390,7 +392,6 @@ class RelocInfo BASE_EMBEDDED {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(Object* target_object());
INLINE(Handle<Object> target_object_handle(Assembler* origin));
- INLINE(Object** target_object_address());
INLINE(void set_target_object(Object* target,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(Address target_runtime_entry(Assembler* origin));
@@ -425,7 +426,7 @@ class RelocInfo BASE_EMBEDDED {
// Read/modify the reference in the instruction this relocation
// applies to; can only be called if rmode_ is external_reference
- INLINE(Address* target_reference_address());
+ INLINE(Address target_reference());
// Read/modify the address of a call instruction. This is used to relocate
// the break points where straight-line code is patched with a call
@@ -436,6 +437,10 @@ class RelocInfo BASE_EMBEDDED {
INLINE(void set_call_object(Object* target));
INLINE(Object** call_object_address());
+ // Wipe out a relocation to a fixed value, used for making snapshots
+ // reproducible.
+ INLINE(void WipeOut());
+
template<typename StaticVisitor> inline void Visit(Heap* heap);
inline void Visit(Isolate* isolate, ObjectVisitor* v);
@@ -486,12 +491,6 @@ class RelocInfo BASE_EMBEDDED {
double data64_;
};
Code* host_;
- // Code and Embedded Object pointers on some platforms are stored split
- // across two consecutive 32-bit instructions. Heap management
- // routines expect to access these pointers indirectly. The following
- // location provides a place for these pointers to exist naturally
- // when accessed via the Iterator.
- Object* reconstructed_obj_ptr_;
// External-reference pointers are also split across instruction-pairs
// on some platforms, but are accessed via indirect pointers. This location
// provides a place for that pointer to exist naturally. Its address
@@ -718,10 +717,6 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference flush_icache_function(Isolate* isolate);
static ExternalReference perform_gc_function(Isolate* isolate);
- static ExternalReference fill_heap_number_with_random_function(
- Isolate* isolate);
- static ExternalReference random_uint32_function(Isolate* isolate);
- static ExternalReference transcendental_cache_array_address(Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
static ExternalReference get_date_field_function(Isolate* isolate);
@@ -730,9 +725,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference get_make_code_young_function(Isolate* isolate);
static ExternalReference get_mark_code_as_executed_function(Isolate* isolate);
- // New heap objects tracking support.
- static ExternalReference record_object_allocation_function(Isolate* isolate);
-
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
@@ -790,9 +782,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_space_high_promotion_mode_active_address(
Isolate* isolate);
- static ExternalReference double_fp_operation(Token::Value operation,
- Isolate* isolate);
- static ExternalReference compare_doubles(Isolate* isolate);
+ static ExternalReference mod_two_doubles_operation(Isolate* isolate);
static ExternalReference power_double_double_function(Isolate* isolate);
static ExternalReference power_double_int_function(Isolate* isolate);
@@ -817,9 +807,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference address_of_the_hole_nan();
static ExternalReference address_of_uint32_bias();
- static ExternalReference math_sin_double_function(Isolate* isolate);
- static ExternalReference math_cos_double_function(Isolate* isolate);
- static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
static ExternalReference math_exp_constants(int constant_index);
@@ -1015,32 +1002,6 @@ class PreservePositionScope BASE_EMBEDDED {
// -----------------------------------------------------------------------------
// Utility functions
-inline bool is_intn(int x, int n) {
- return -(1 << (n-1)) <= x && x < (1 << (n-1));
-}
-
-inline bool is_int8(int x) { return is_intn(x, 8); }
-inline bool is_int16(int x) { return is_intn(x, 16); }
-inline bool is_int18(int x) { return is_intn(x, 18); }
-inline bool is_int24(int x) { return is_intn(x, 24); }
-
-inline bool is_uintn(int x, int n) {
- return (x & -(1 << n)) == 0;
-}
-
-inline bool is_uint2(int x) { return is_uintn(x, 2); }
-inline bool is_uint3(int x) { return is_uintn(x, 3); }
-inline bool is_uint4(int x) { return is_uintn(x, 4); }
-inline bool is_uint5(int x) { return is_uintn(x, 5); }
-inline bool is_uint6(int x) { return is_uintn(x, 6); }
-inline bool is_uint8(int x) { return is_uintn(x, 8); }
-inline bool is_uint10(int x) { return is_uintn(x, 10); }
-inline bool is_uint12(int x) { return is_uintn(x, 12); }
-inline bool is_uint16(int x) { return is_uintn(x, 16); }
-inline bool is_uint24(int x) { return is_uintn(x, 24); }
-inline bool is_uint26(int x) { return is_uintn(x, 26); }
-inline bool is_uint28(int x) { return is_uintn(x, 28); }
-
inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) {
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index b489314329..6b2f48f017 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -82,8 +82,8 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) {
}
-VariableProxy::VariableProxy(Isolate* isolate, Variable* var, int position)
- : Expression(isolate, position),
+VariableProxy::VariableProxy(Zone* zone, Variable* var, int position)
+ : Expression(zone, position),
name_(var->name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
@@ -94,12 +94,12 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var, int position)
}
-VariableProxy::VariableProxy(Isolate* isolate,
+VariableProxy::VariableProxy(Zone* zone,
Handle<String> name,
bool is_this,
Interface* interface,
int position)
- : Expression(isolate, position),
+ : Expression(zone, position),
name_(name),
var_(NULL),
is_this_(is_this),
@@ -126,20 +126,18 @@ void VariableProxy::BindTo(Variable* var) {
}
-Assignment::Assignment(Isolate* isolate,
+Assignment::Assignment(Zone* zone,
Token::Value op,
Expression* target,
Expression* value,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
op_(op),
target_(target),
value_(value),
binary_operation_(NULL),
- assignment_id_(GetNextId(isolate)),
- is_monomorphic_(false),
+ assignment_id_(GetNextId(zone)),
is_uninitialized_(false),
- is_pre_monomorphic_(false),
store_mode_(STANDARD_STORE) { }
@@ -187,15 +185,31 @@ LanguageMode FunctionLiteral::language_mode() const {
}
-ObjectLiteralProperty::ObjectLiteralProperty(Literal* key,
- Expression* value,
- Isolate* isolate) {
+void FunctionLiteral::InitializeSharedInfo(
+ Handle<Code> unoptimized_code) {
+ for (RelocIterator it(*unoptimized_code); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
+ Object* obj = rinfo->target_object();
+ if (obj->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+ if (shared->start_position() == start_position()) {
+ shared_info_ = Handle<SharedFunctionInfo>(shared);
+ break;
+ }
+ }
+ }
+}
+
+
+ObjectLiteralProperty::ObjectLiteralProperty(
+ Zone* zone, Literal* key, Expression* value) {
emit_store_ = true;
key_ = key;
value_ = value;
Object* k = *key->value();
if (k->IsInternalizedString() &&
- isolate->heap()->proto_string()->Equals(String::cast(k))) {
+ zone->isolate()->heap()->proto_string()->Equals(String::cast(k))) {
kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) {
kind_ = MATERIALIZED_LITERAL;
@@ -207,8 +221,8 @@ ObjectLiteralProperty::ObjectLiteralProperty(Literal* key,
}
-ObjectLiteralProperty::ObjectLiteralProperty(bool is_getter,
- FunctionLiteral* value) {
+ObjectLiteralProperty::ObjectLiteralProperty(
+ Zone* zone, bool is_getter, FunctionLiteral* value) {
emit_store_ = true;
value_ = value;
kind_ = is_getter ? GETTER : SETTER;
@@ -256,6 +270,170 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
}
+bool ObjectLiteral::IsBoilerplateProperty(ObjectLiteral::Property* property) {
+ return property != NULL &&
+ property->kind() != ObjectLiteral::Property::PROTOTYPE;
+}
+
+
+void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
+ if (!constant_properties_.is_null()) return;
+
+ // Allocate a fixed array to hold all the constant properties.
+ Handle<FixedArray> constant_properties = isolate->factory()->NewFixedArray(
+ boilerplate_properties_ * 2, TENURED);
+
+ int position = 0;
+ // Accumulate the value in local variables and store it at the end.
+ bool is_simple = true;
+ int depth_acc = 1;
+ uint32_t max_element_index = 0;
+ uint32_t elements = 0;
+ for (int i = 0; i < properties()->length(); i++) {
+ ObjectLiteral::Property* property = properties()->at(i);
+ if (!IsBoilerplateProperty(property)) {
+ is_simple = false;
+ continue;
+ }
+ MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
+ if (m_literal != NULL) {
+ m_literal->BuildConstants(isolate);
+ if (m_literal->depth() >= depth_acc) depth_acc = m_literal->depth() + 1;
+ }
+
+ // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
+ // value for COMPUTED properties, the real value is filled in at
+ // runtime. The enumeration order is maintained.
+ Handle<Object> key = property->key()->value();
+ Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
+
+ // Ensure objects that may, at any point in time, contain fields with double
+ // representation are always treated as nested objects. This is true for
+ // computed fields (value is undefined), and smi and double literals
+ // (value->IsNumber()).
+ // TODO(verwaest): Remove once we can store them inline.
+ if (FLAG_track_double_fields &&
+ (value->IsNumber() || value->IsUninitialized())) {
+ may_store_doubles_ = true;
+ }
+
+ is_simple = is_simple && !value->IsUninitialized();
+
+ // Keep track of the number of elements in the object literal and
+ // the largest element index. If the largest element index is
+ // much larger than the number of elements, creating an object
+ // literal with fast elements will be a waste of space.
+ uint32_t element_index = 0;
+ if (key->IsString()
+ && Handle<String>::cast(key)->AsArrayIndex(&element_index)
+ && element_index > max_element_index) {
+ max_element_index = element_index;
+ elements++;
+ } else if (key->IsSmi()) {
+ int key_value = Smi::cast(*key)->value();
+ if (key_value > 0
+ && static_cast<uint32_t>(key_value) > max_element_index) {
+ max_element_index = key_value;
+ }
+ elements++;
+ }
+
+ // Add name, value pair to the fixed array.
+ constant_properties->set(position++, *key);
+ constant_properties->set(position++, *value);
+ }
+
+ constant_properties_ = constant_properties;
+ fast_elements_ =
+ (max_element_index <= 32) || ((2 * elements) >= max_element_index);
+ set_is_simple(is_simple);
+ set_depth(depth_acc);
+}
+
+
+void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
+ if (!constant_elements_.is_null()) return;
+
+ // Allocate a fixed array to hold all the object literals.
+ Handle<JSArray> array =
+ isolate->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
+ isolate->factory()->SetElementsCapacityAndLength(
+ array, values()->length(), values()->length());
+
+ // Fill in the literals.
+ bool is_simple = true;
+ int depth_acc = 1;
+ bool is_holey = false;
+ for (int i = 0, n = values()->length(); i < n; i++) {
+ Expression* element = values()->at(i);
+ MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
+ if (m_literal != NULL) {
+ m_literal->BuildConstants(isolate);
+ if (m_literal->depth() + 1 > depth_acc) {
+ depth_acc = m_literal->depth() + 1;
+ }
+ }
+ Handle<Object> boilerplate_value = GetBoilerplateValue(element, isolate);
+ if (boilerplate_value->IsTheHole()) {
+ is_holey = true;
+ } else if (boilerplate_value->IsUninitialized()) {
+ is_simple = false;
+ JSObject::SetOwnElement(
+ array, i, handle(Smi::FromInt(0), isolate), kNonStrictMode);
+ } else {
+ JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode);
+ }
+ }
+
+ Handle<FixedArrayBase> element_values(array->elements());
+
+ // Simple and shallow arrays can be lazily copied, we transform the
+ // elements array to a copy-on-write array.
+ if (is_simple && depth_acc == 1 && values()->length() > 0 &&
+ array->HasFastSmiOrObjectElements()) {
+ element_values->set_map(isolate->heap()->fixed_cow_array_map());
+ }
+
+ // Remember both the literal's constant values as well as the ElementsKind
+ // in a 2-element FixedArray.
+ Handle<FixedArray> literals = isolate->factory()->NewFixedArray(2, TENURED);
+
+ ElementsKind kind = array->GetElementsKind();
+ kind = is_holey ? GetHoleyElementsKind(kind) : GetPackedElementsKind(kind);
+
+ literals->set(0, Smi::FromInt(kind));
+ literals->set(1, *element_values);
+
+ constant_elements_ = literals;
+ set_is_simple(is_simple);
+ set_depth(depth_acc);
+}
+
+
+Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
+ Isolate* isolate) {
+ if (expression->AsLiteral() != NULL) {
+ return expression->AsLiteral()->value();
+ }
+ if (CompileTimeValue::IsCompileTimeValue(expression)) {
+ return CompileTimeValue::GetValue(isolate, expression);
+ }
+ return isolate->factory()->uninitialized_value();
+}
+
+
+void MaterializedLiteral::BuildConstants(Isolate* isolate) {
+ if (IsArrayLiteral()) {
+ return AsArrayLiteral()->BuildConstantElements(isolate);
+ }
+ if (IsObjectLiteral()) {
+ return AsObjectLiteral()->BuildConstantProperties(isolate);
+ }
+ ASSERT(IsRegExpLiteral());
+ ASSERT(depth() >= 1); // Depth should be initialized.
+}
+
+
void TargetCollector::AddTarget(Label* target, Zone* zone) {
// Add the label to the collector, but discard duplicates.
int length = targets_.length();
@@ -410,153 +588,36 @@ bool FunctionDeclaration::IsInlineable() const {
// TODO(rossberg): all RecordTypeFeedback functions should disappear
// once we use the common type field in the AST consistently.
-
-void ForInStatement::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- for_in_type_ = static_cast<ForInType>(oracle->ForInType(this));
-}
-
-
void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
to_boolean_types_ = oracle->ToBooleanTypes(test_id());
}
-void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
- Zone* zone) {
- // Record type feedback from the oracle in the AST.
- is_uninitialized_ = oracle->LoadIsUninitialized(this);
- if (is_uninitialized_) return;
-
- is_pre_monomorphic_ = oracle->LoadIsPreMonomorphic(this);
- is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
- ASSERT(!is_pre_monomorphic_ || !is_monomorphic_);
- receiver_types_.Clear();
- if (key()->IsPropertyName()) {
- FunctionPrototypeStub proto_stub(Code::LOAD_IC);
- if (oracle->LoadIsStub(this, &proto_stub)) {
- is_function_prototype_ = true;
- } else {
- Literal* lit_key = key()->AsLiteral();
- ASSERT(lit_key != NULL && lit_key->value()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->value());
- oracle->LoadReceiverTypes(this, name, &receiver_types_);
- }
- } else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
- is_string_access_ = true;
- } else if (is_monomorphic_) {
- receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this), zone);
- } else if (oracle->LoadIsPolymorphic(this)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
- oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_);
+int Call::ComputeFeedbackSlotCount(Isolate* isolate) {
+ CallType call_type = GetCallType(isolate);
+ if (call_type == LOOKUP_SLOT_CALL || call_type == OTHER_CALL) {
+ // Call only uses a slot in some cases.
+ return 1;
}
-}
-
-void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
- Zone* zone) {
- Property* prop = target()->AsProperty();
- ASSERT(prop != NULL);
- TypeFeedbackId id = AssignmentFeedbackId();
- is_uninitialized_ = oracle->StoreIsUninitialized(id);
- if (is_uninitialized_) return;
-
- is_pre_monomorphic_ = oracle->StoreIsPreMonomorphic(id);
- is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
- ASSERT(!is_pre_monomorphic_ || !is_monomorphic_);
- receiver_types_.Clear();
- if (prop->key()->IsPropertyName()) {
- Literal* lit_key = prop->key()->AsLiteral();
- ASSERT(lit_key != NULL && lit_key->value()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->value());
- oracle->StoreReceiverTypes(this, name, &receiver_types_);
- } else if (is_monomorphic_) {
- // Record receiver type for monomorphic keyed stores.
- receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone);
- store_mode_ = oracle->GetStoreMode(id);
- } else if (oracle->StoreIsKeyedPolymorphic(id)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
- oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
- store_mode_ = oracle->GetStoreMode(id);
- }
-}
-
-
-void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
- Zone* zone) {
- TypeFeedbackId id = CountStoreFeedbackId();
- is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
- receiver_types_.Clear();
- if (is_monomorphic_) {
- // Record receiver type for monomorphic keyed stores.
- receiver_types_.Add(
- oracle->StoreMonomorphicReceiverType(id), zone);
- } else if (oracle->StoreIsKeyedPolymorphic(id)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
- oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
- } else {
- oracle->CollectPolymorphicStoreReceiverTypes(id, &receiver_types_);
- }
- store_mode_ = oracle->GetStoreMode(id);
- type_ = oracle->IncrementType(this);
+ return 0;
}
-void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- compare_type_ = oracle->ClauseType(CompareId());
-}
-
-
-bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
- // If there is an interceptor, we can't compute the target for a direct call.
- if (type->has_named_interceptor()) return false;
-
- if (check_type_ == RECEIVER_MAP_CHECK) {
- // For primitive checks the holder is set up to point to the corresponding
- // prototype object, i.e. one step of the algorithm below has been already
- // performed. For non-primitive checks we clear it to allow computing
- // targets for polymorphic calls.
- holder_ = Handle<JSObject>::null();
- }
- LookupResult lookup(type->GetIsolate());
- while (true) {
- // If a dictionary map is found in the prototype chain before the actual
- // target, a new target can always appear. In that case, bail out.
- // TODO(verwaest): Alternatively a runtime negative lookup on the normal
- // receiver or prototype could be added.
- if (type->is_dictionary_map()) return false;
- type->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsFound()) {
- switch (lookup.type()) {
- case CONSTANT: {
- // We surely know the target for a constant function.
- Handle<Object> constant(lookup.GetConstantFromMap(*type),
- type->GetIsolate());
- if (constant->IsJSFunction()) {
- target_ = Handle<JSFunction>::cast(constant);
- return true;
- }
- // Fall through.
- }
- case NORMAL:
- case FIELD:
- case CALLBACKS:
- case HANDLER:
- case INTERCEPTOR:
- // We don't know the target.
- return false;
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
+Call::CallType Call::GetCallType(Isolate* isolate) const {
+ VariableProxy* proxy = expression()->AsVariableProxy();
+ if (proxy != NULL) {
+ if (proxy->var()->is_possibly_eval(isolate)) {
+ return POSSIBLY_EVAL_CALL;
+ } else if (proxy->var()->IsUnallocated()) {
+ return GLOBAL_CALL;
+ } else if (proxy->var()->IsLookupSlot()) {
+ return LOOKUP_SLOT_CALL;
}
- // If we reach the end of the prototype chain, we don't know the target.
- if (!type->prototype()->IsJSObject()) return false;
- // Go up the prototype chain, recording where we are currently.
- holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
- JSObject::TryMigrateInstance(holder_);
- type = Handle<Map>(holder()->map());
}
+
+ Property* property = expression()->AsProperty();
+ return property != NULL ? PROPERTY_CALL : OTHER_CALL;
}
@@ -581,89 +642,25 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
}
-Handle<JSObject> Call::GetPrototypeForPrimitiveCheck(
- CheckType check, Isolate* isolate) {
- v8::internal::Context* native_context = isolate->context()->native_context();
- JSFunction* function = NULL;
- switch (check) {
- case RECEIVER_MAP_CHECK:
- UNREACHABLE();
- break;
- case STRING_CHECK:
- function = native_context->string_function();
- break;
- case SYMBOL_CHECK:
- function = native_context->symbol_function();
- break;
- case NUMBER_CHECK:
- function = native_context->number_function();
- break;
- case BOOLEAN_CHECK:
- function = native_context->boolean_function();
- break;
- }
- ASSERT(function != NULL);
- return Handle<JSObject>(JSObject::cast(function->instance_prototype()));
-}
-
-
-void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
- CallKind call_kind) {
- is_monomorphic_ = oracle->CallIsMonomorphic(this);
- Property* property = expression()->AsProperty();
- if (property == NULL) {
- // Function call. Specialize for monomorphic calls.
- if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
- } else {
- // Method call. Specialize for the receiver types seen at runtime.
- Literal* key = property->key()->AsLiteral();
- ASSERT(key != NULL && key->value()->IsString());
- Handle<String> name = Handle<String>::cast(key->value());
- check_type_ = oracle->GetCallCheckType(this);
- receiver_types_.Clear();
- if (check_type_ == RECEIVER_MAP_CHECK) {
- oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
- is_monomorphic_ = is_monomorphic_ && receiver_types_.length() > 0;
- } else {
- holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
- receiver_types_.Add(handle(holder_->map()), oracle->zone());
- }
-#ifdef ENABLE_SLOW_ASSERTS
- if (FLAG_enable_slow_asserts) {
- int length = receiver_types_.length();
- for (int i = 0; i < length; i++) {
- Handle<Map> map = receiver_types_.at(i);
- ASSERT(!map.is_null() && *map != NULL);
- }
- }
-#endif
- if (is_monomorphic_) {
- Handle<Map> map = receiver_types_.first();
- is_monomorphic_ = ComputeTarget(map, name);
- }
- }
-}
-
-
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- allocation_info_cell_ = oracle->GetCallNewAllocationInfoCell(this);
- is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
+ allocation_site_ =
+ oracle->GetCallNewAllocationSite(CallNewFeedbackSlot());
+ is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot());
if (is_monomorphic_) {
- target_ = oracle->GetCallNewTarget(this);
- Object* value = allocation_info_cell_->value();
- ASSERT(!value->IsTheHole());
- if (value->IsAllocationSite()) {
- AllocationSite* site = AllocationSite::cast(value);
- elements_kind_ = site->GetElementsKind();
+ target_ = oracle->GetCallNewTarget(CallNewFeedbackSlot());
+ if (!allocation_site_.is_null()) {
+ elements_kind_ = allocation_site_->GetElementsKind();
}
}
}
void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this)
- ? oracle->GetObjectLiteralStoreMap(this)
- : Handle<Map>::null();
+ TypeFeedbackId id = key()->LiteralFeedbackId();
+ SmallMapList maps;
+ oracle->CollectReceiverTypes(id, &maps);
+ receiver_type_ = maps.length() == 1 ? maps.at(0)
+ : Handle<Map>::null();
}
@@ -1036,16 +1033,16 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
}
-CaseClause::CaseClause(Isolate* isolate,
+CaseClause::CaseClause(Zone* zone,
Expression* label,
ZoneList<Statement*>* statements,
int pos)
- : AstNode(pos),
+ : Expression(zone, pos),
label_(label),
statements_(statements),
- compare_type_(Type::None(), isolate),
- compare_id_(AstNode::GetNextId(isolate)),
- entry_id_(AstNode::GetNextId(isolate)) {
+ compare_type_(Type::None(zone)),
+ compare_id_(AstNode::GetNextId(zone)),
+ entry_id_(AstNode::GetNextId(zone)) {
}
@@ -1053,6 +1050,11 @@ CaseClause::CaseClause(Isolate* isolate,
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
}
+#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+ increase_node_count(); \
+ add_slot_node(node); \
+ }
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@@ -1065,6 +1067,12 @@ CaseClause::CaseClause(Isolate* isolate,
increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
+#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+ increase_node_count(); \
+ add_slot_node(node); \
+ add_flag(kDontSelfOptimize); \
+ }
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@@ -1099,8 +1107,8 @@ REGULAR_NODE(CountOperation)
REGULAR_NODE(BinaryOperation)
REGULAR_NODE(CompareOperation)
REGULAR_NODE(ThisFunction)
-REGULAR_NODE(Call)
-REGULAR_NODE(CallNew)
+REGULAR_NODE_WITH_FEEDBACK_SLOTS(Call)
+REGULAR_NODE_WITH_FEEDBACK_SLOTS(CallNew)
// In theory, for VariableProxy we'd have to add:
// if (node->var()->IsLookupSlot()) add_flag(kDontInline);
// But node->var() is usually not bound yet at VariableProxy creation time, and
@@ -1125,11 +1133,12 @@ DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
-DONT_SELFOPTIMIZE_NODE(ForInStatement)
+DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement)
DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_CACHE_NODE(ModuleLiteral)
+
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count();
if (node->is_jsruntime()) {
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index b4f7348eee..aacc5e4fc8 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -32,6 +32,7 @@
#include "assembler.h"
#include "factory.h"
+#include "feedback-slots.h"
#include "isolate.h"
#include "jsregexp.h"
#include "list-inl.h"
@@ -39,7 +40,6 @@
#include "small-pointer-list.h"
#include "smart-pointers.h"
#include "token.h"
-#include "type-info.h" // TODO(rossberg): this should eventually be removed
#include "types.h"
#include "utils.h"
#include "variables.h"
@@ -115,17 +115,14 @@ namespace internal {
V(CountOperation) \
V(BinaryOperation) \
V(CompareOperation) \
- V(ThisFunction)
-
-#define AUXILIARY_NODE_LIST(V) \
+ V(ThisFunction) \
V(CaseClause)
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
MODULE_NODE_LIST(V) \
STATEMENT_NODE_LIST(V) \
- EXPRESSION_NODE_LIST(V) \
- AUXILIARY_NODE_LIST(V)
+ EXPRESSION_NODE_LIST(V)
// Forward declarations
class AstConstructionVisitor;
@@ -185,7 +182,7 @@ class AstProperties V8_FINAL BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
- AstProperties() : node_count_(0) { }
+ AstProperties() : node_count_(0) {}
Flags* flags() { return &flags_; }
int node_count() { return node_count_; }
@@ -230,13 +227,13 @@ class AstNode: public ZoneObject {
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
protected:
- static int GetNextId(Isolate* isolate) {
- return ReserveIdRange(isolate, 1);
+ static int GetNextId(Zone* zone) {
+ return ReserveIdRange(zone, 1);
}
- static int ReserveIdRange(Isolate* isolate, int n) {
- int tmp = isolate->ast_node_id();
- isolate->set_ast_node_id(tmp + n);
+ static int ReserveIdRange(Zone* zone, int n) {
+ int tmp = zone->isolate()->ast_node_id();
+ zone->isolate()->set_ast_node_id(tmp + n);
return tmp;
}
@@ -259,7 +256,7 @@ class AstNode: public ZoneObject {
class Statement : public AstNode {
public:
- explicit Statement(int position) : AstNode(position) {}
+ explicit Statement(Zone* zone, int position) : AstNode(position) {}
bool IsEmpty() { return AsEmptyStatement() != NULL; }
virtual bool IsJump() const { return false; }
@@ -279,9 +276,8 @@ class SmallMapList V8_FINAL {
int length() const { return list_.length(); }
void AddMapIfMissing(Handle<Map> map, Zone* zone) {
- Map* updated = map->CurrentMapForDeprecated();
- if (updated == NULL) return;
- map = Handle<Map>(updated);
+ map = Map::CurrentMapForDeprecated(map);
+ if (map.is_null()) return;
for (int i = 0; i < length(); ++i) {
if (at(i).is_identical_to(map)) return;
}
@@ -382,11 +378,11 @@ class Expression : public AstNode {
TypeFeedbackId test_id() const { return test_id_; }
protected:
- Expression(Isolate* isolate, int pos)
+ Expression(Zone* zone, int pos)
: AstNode(pos),
- bounds_(Bounds::Unbounded(isolate)),
- id_(GetNextId(isolate)),
- test_id_(GetNextId(isolate)) {}
+ bounds_(Bounds::Unbounded(zone)),
+ id_(GetNextId(zone)),
+ test_id_(GetNextId(zone)) {}
void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
private:
@@ -427,13 +423,13 @@ class BreakableStatement : public Statement {
protected:
BreakableStatement(
- Isolate* isolate, ZoneStringList* labels,
+ Zone* zone, ZoneStringList* labels,
BreakableType breakable_type, int position)
- : Statement(position),
+ : Statement(zone, position),
labels_(labels),
breakable_type_(breakable_type),
- entry_id_(GetNextId(isolate)),
- exit_id_(GetNextId(isolate)) {
+ entry_id_(GetNextId(zone)),
+ exit_id_(GetNextId(zone)) {
ASSERT(labels == NULL || labels->length() > 0);
}
@@ -467,13 +463,12 @@ class Block V8_FINAL : public BreakableStatement {
void set_scope(Scope* scope) { scope_ = scope; }
protected:
- Block(Isolate* isolate,
+ Block(Zone* zone,
ZoneStringList* labels,
int capacity,
bool is_initializer_block,
- int pos,
- Zone* zone)
- : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY, pos),
+ int pos)
+ : BreakableStatement(zone, labels, TARGET_FOR_NAMED_ONLY, pos),
statements_(capacity, zone),
is_initializer_block_(is_initializer_block),
scope_(NULL) {
@@ -495,7 +490,8 @@ class Declaration : public AstNode {
virtual bool IsInlineable() const;
protected:
- Declaration(VariableProxy* proxy,
+ Declaration(Zone* zone,
+ VariableProxy* proxy,
VariableMode mode,
Scope* scope,
int pos)
@@ -524,11 +520,12 @@ class VariableDeclaration V8_FINAL : public Declaration {
}
protected:
- VariableDeclaration(VariableProxy* proxy,
+ VariableDeclaration(Zone* zone,
+ VariableProxy* proxy,
VariableMode mode,
Scope* scope,
int pos)
- : Declaration(proxy, mode, scope, pos) {
+ : Declaration(zone, proxy, mode, scope, pos) {
}
};
@@ -544,12 +541,13 @@ class FunctionDeclaration V8_FINAL : public Declaration {
virtual bool IsInlineable() const V8_OVERRIDE;
protected:
- FunctionDeclaration(VariableProxy* proxy,
+ FunctionDeclaration(Zone* zone,
+ VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
Scope* scope,
int pos)
- : Declaration(proxy, mode, scope, pos),
+ : Declaration(zone, proxy, mode, scope, pos),
fun_(fun) {
// At the moment there are no "const functions" in JavaScript...
ASSERT(mode == VAR || mode == LET);
@@ -571,11 +569,12 @@ class ModuleDeclaration V8_FINAL : public Declaration {
}
protected:
- ModuleDeclaration(VariableProxy* proxy,
+ ModuleDeclaration(Zone* zone,
+ VariableProxy* proxy,
Module* module,
Scope* scope,
int pos)
- : Declaration(proxy, MODULE, scope, pos),
+ : Declaration(zone, proxy, MODULE, scope, pos),
module_(module) {
}
@@ -594,11 +593,12 @@ class ImportDeclaration V8_FINAL : public Declaration {
}
protected:
- ImportDeclaration(VariableProxy* proxy,
+ ImportDeclaration(Zone* zone,
+ VariableProxy* proxy,
Module* module,
Scope* scope,
int pos)
- : Declaration(proxy, LET, scope, pos),
+ : Declaration(zone, proxy, LET, scope, pos),
module_(module) {
}
@@ -616,8 +616,8 @@ class ExportDeclaration V8_FINAL : public Declaration {
}
protected:
- ExportDeclaration(VariableProxy* proxy, Scope* scope, int pos)
- : Declaration(proxy, LET, scope, pos) {}
+ ExportDeclaration(Zone* zone, VariableProxy* proxy, Scope* scope, int pos)
+ : Declaration(zone, proxy, LET, scope, pos) {}
};
@@ -631,7 +631,7 @@ class Module : public AstNode {
: AstNode(pos),
interface_(Interface::NewModule(zone)),
body_(NULL) {}
- Module(Interface* interface, int pos, Block* body = NULL)
+ Module(Zone* zone, Interface* interface, int pos, Block* body = NULL)
: AstNode(pos),
interface_(interface),
body_(body) {}
@@ -647,8 +647,8 @@ class ModuleLiteral V8_FINAL : public Module {
DECLARE_NODE_TYPE(ModuleLiteral)
protected:
- ModuleLiteral(Block* body, Interface* interface, int pos)
- : Module(interface, pos, body) {}
+ ModuleLiteral(Zone* zone, Block* body, Interface* interface, int pos)
+ : Module(zone, interface, pos, body) {}
};
@@ -659,7 +659,7 @@ class ModuleVariable V8_FINAL : public Module {
VariableProxy* proxy() const { return proxy_; }
protected:
- inline ModuleVariable(VariableProxy* proxy, int pos);
+ inline ModuleVariable(Zone* zone, VariableProxy* proxy, int pos);
private:
VariableProxy* proxy_;
@@ -674,7 +674,7 @@ class ModulePath V8_FINAL : public Module {
Handle<String> name() const { return name_; }
protected:
- ModulePath(Module* module, Handle<String> name, Zone* zone, int pos)
+ ModulePath(Zone* zone, Module* module, Handle<String> name, int pos)
: Module(zone, pos),
module_(module),
name_(name) {
@@ -693,7 +693,7 @@ class ModuleUrl V8_FINAL : public Module {
Handle<String> url() const { return url_; }
protected:
- ModuleUrl(Handle<String> url, Zone* zone, int pos)
+ ModuleUrl(Zone* zone, Handle<String> url, int pos)
: Module(zone, pos), url_(url) {
}
@@ -710,8 +710,8 @@ class ModuleStatement V8_FINAL : public Statement {
Block* body() const { return body_; }
protected:
- ModuleStatement(VariableProxy* proxy, Block* body, int pos)
- : Statement(pos),
+ ModuleStatement(Zone* zone, VariableProxy* proxy, Block* body, int pos)
+ : Statement(zone, pos),
proxy_(proxy),
body_(body) {
}
@@ -739,10 +739,10 @@ class IterationStatement : public BreakableStatement {
Label* continue_target() { return &continue_target_; }
protected:
- IterationStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS, pos),
+ IterationStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
body_(NULL),
- osr_entry_id_(GetNextId(isolate)) {
+ osr_entry_id_(GetNextId(zone)) {
}
void Initialize(Statement* body) {
@@ -773,11 +773,11 @@ class DoWhileStatement V8_FINAL : public IterationStatement {
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- DoWhileStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : IterationStatement(isolate, labels, pos),
+ DoWhileStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : IterationStatement(zone, labels, pos),
cond_(NULL),
- continue_id_(GetNextId(isolate)),
- back_edge_id_(GetNextId(isolate)) {
+ continue_id_(GetNextId(zone)),
+ back_edge_id_(GetNextId(zone)) {
}
private:
@@ -810,11 +810,11 @@ class WhileStatement V8_FINAL : public IterationStatement {
BailoutId BodyId() const { return body_id_; }
protected:
- WhileStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : IterationStatement(isolate, labels, pos),
+ WhileStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : IterationStatement(zone, labels, pos),
cond_(NULL),
may_have_function_literal_(true),
- body_id_(GetNextId(isolate)) {
+ body_id_(GetNextId(zone)) {
}
private:
@@ -861,15 +861,15 @@ class ForStatement V8_FINAL : public IterationStatement {
void set_loop_variable(Variable* var) { loop_variable_ = var; }
protected:
- ForStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : IterationStatement(isolate, labels, pos),
+ ForStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : IterationStatement(zone, labels, pos),
init_(NULL),
cond_(NULL),
next_(NULL),
may_have_function_literal_(true),
loop_variable_(NULL),
- continue_id_(GetNextId(isolate)),
- body_id_(GetNextId(isolate)) {
+ continue_id_(GetNextId(zone)),
+ body_id_(GetNextId(zone)) {
}
private:
@@ -903,8 +903,8 @@ class ForEachStatement : public IterationStatement {
Expression* subject() const { return subject_; }
protected:
- ForEachStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : IterationStatement(isolate, labels, pos),
+ ForEachStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : IterationStatement(zone, labels, pos),
each_(NULL),
subject_(NULL) {
}
@@ -915,7 +915,8 @@ class ForEachStatement : public IterationStatement {
};
-class ForInStatement V8_FINAL : public ForEachStatement {
+class ForInStatement V8_FINAL : public ForEachStatement,
+ public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(ForInStatement)
@@ -923,10 +924,19 @@ class ForInStatement V8_FINAL : public ForEachStatement {
return subject();
}
- TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ // Type feedback information.
+ virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; }
+ virtual int ComputeFeedbackSlotCount(Isolate* isolate) { return 1; }
+ virtual void SetFirstFeedbackSlot(int slot) { for_in_feedback_slot_ = slot; }
+
+ int ForInFeedbackSlot() {
+ ASSERT(for_in_feedback_slot_ != kInvalidFeedbackSlot);
+ return for_in_feedback_slot_;
+ }
+
enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
ForInType for_in_type() const { return for_in_type_; }
+ void set_for_in_type(ForInType type) { for_in_type_ = type; }
BailoutId BodyId() const { return body_id_; }
BailoutId PrepareId() const { return prepare_id_; }
@@ -934,14 +944,16 @@ class ForInStatement V8_FINAL : public ForEachStatement {
virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
protected:
- ForInStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : ForEachStatement(isolate, labels, pos),
+ ForInStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : ForEachStatement(zone, labels, pos),
for_in_type_(SLOW_FOR_IN),
- body_id_(GetNextId(isolate)),
- prepare_id_(GetNextId(isolate)) {
+ for_in_feedback_slot_(kInvalidFeedbackSlot),
+ body_id_(GetNextId(zone)),
+ prepare_id_(GetNextId(zone)) {
}
ForInType for_in_type_;
+ int for_in_feedback_slot_;
const BailoutId body_id_;
const BailoutId prepare_id_;
};
@@ -995,13 +1007,13 @@ class ForOfStatement V8_FINAL : public ForEachStatement {
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- ForOfStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : ForEachStatement(isolate, labels, pos),
+ ForOfStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : ForEachStatement(zone, labels, pos),
assign_iterator_(NULL),
next_result_(NULL),
result_done_(NULL),
assign_each_(NULL),
- back_edge_id_(GetNextId(isolate)) {
+ back_edge_id_(GetNextId(zone)) {
}
Expression* assign_iterator_;
@@ -1021,8 +1033,8 @@ class ExpressionStatement V8_FINAL : public Statement {
virtual bool IsJump() const V8_OVERRIDE { return expression_->IsThrow(); }
protected:
- ExpressionStatement(Expression* expression, int pos)
- : Statement(pos), expression_(expression) { }
+ ExpressionStatement(Zone* zone, Expression* expression, int pos)
+ : Statement(zone, pos), expression_(expression) { }
private:
Expression* expression_;
@@ -1034,7 +1046,7 @@ class JumpStatement : public Statement {
virtual bool IsJump() const V8_FINAL V8_OVERRIDE { return true; }
protected:
- explicit JumpStatement(int pos) : Statement(pos) {}
+ explicit JumpStatement(Zone* zone, int pos) : Statement(zone, pos) {}
};
@@ -1045,8 +1057,8 @@ class ContinueStatement V8_FINAL : public JumpStatement {
IterationStatement* target() const { return target_; }
protected:
- explicit ContinueStatement(IterationStatement* target, int pos)
- : JumpStatement(pos), target_(target) { }
+ explicit ContinueStatement(Zone* zone, IterationStatement* target, int pos)
+ : JumpStatement(zone, pos), target_(target) { }
private:
IterationStatement* target_;
@@ -1060,8 +1072,8 @@ class BreakStatement V8_FINAL : public JumpStatement {
BreakableStatement* target() const { return target_; }
protected:
- explicit BreakStatement(BreakableStatement* target, int pos)
- : JumpStatement(pos), target_(target) { }
+ explicit BreakStatement(Zone* zone, BreakableStatement* target, int pos)
+ : JumpStatement(zone, pos), target_(target) { }
private:
BreakableStatement* target_;
@@ -1075,8 +1087,8 @@ class ReturnStatement V8_FINAL : public JumpStatement {
Expression* expression() const { return expression_; }
protected:
- explicit ReturnStatement(Expression* expression, int pos)
- : JumpStatement(pos), expression_(expression) { }
+ explicit ReturnStatement(Zone* zone, Expression* expression, int pos)
+ : JumpStatement(zone, pos), expression_(expression) { }
private:
Expression* expression_;
@@ -1093,8 +1105,9 @@ class WithStatement V8_FINAL : public Statement {
protected:
WithStatement(
- Scope* scope, Expression* expression, Statement* statement, int pos)
- : Statement(pos),
+ Zone* zone, Scope* scope,
+ Expression* expression, Statement* statement, int pos)
+ : Statement(zone, pos),
scope_(scope),
expression_(expression),
statement_(statement) { }
@@ -1106,7 +1119,7 @@ class WithStatement V8_FINAL : public Statement {
};
-class CaseClause V8_FINAL : public AstNode {
+class CaseClause V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CaseClause)
@@ -1122,11 +1135,11 @@ class CaseClause V8_FINAL : public AstNode {
// Type feedback information.
TypeFeedbackId CompareId() { return compare_id_; }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- Handle<Type> compare_type() { return compare_type_; }
+ Type* compare_type() { return compare_type_; }
+ void set_compare_type(Type* type) { compare_type_ = type; }
private:
- CaseClause(Isolate* isolate,
+ CaseClause(Zone* zone,
Expression* label,
ZoneList<Statement*>* statements,
int pos);
@@ -1134,7 +1147,7 @@ class CaseClause V8_FINAL : public AstNode {
Expression* label_;
Label body_target_;
ZoneList<Statement*>* statements_;
- Handle<Type> compare_type_;
+ Type* compare_type_;
const TypeFeedbackId compare_id_;
const BailoutId entry_id_;
@@ -1148,26 +1161,20 @@ class SwitchStatement V8_FINAL : public BreakableStatement {
void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
tag_ = tag;
cases_ = cases;
- switch_type_ = UNKNOWN_SWITCH;
}
Expression* tag() const { return tag_; }
ZoneList<CaseClause*>* cases() const { return cases_; }
- enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH, GENERIC_SWITCH };
- SwitchType switch_type() const { return switch_type_; }
- void set_switch_type(SwitchType switch_type) { switch_type_ = switch_type; }
-
protected:
- SwitchStatement(Isolate* isolate, ZoneStringList* labels, int pos)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS, pos),
+ SwitchStatement(Zone* zone, ZoneStringList* labels, int pos)
+ : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
tag_(NULL),
cases_(NULL) { }
private:
Expression* tag_;
ZoneList<CaseClause*>* cases_;
- SwitchType switch_type_;
};
@@ -1197,18 +1204,18 @@ class IfStatement V8_FINAL : public Statement {
BailoutId ElseId() const { return else_id_; }
protected:
- IfStatement(Isolate* isolate,
+ IfStatement(Zone* zone,
Expression* condition,
Statement* then_statement,
Statement* else_statement,
int pos)
- : Statement(pos),
+ : Statement(zone, pos),
condition_(condition),
then_statement_(then_statement),
else_statement_(else_statement),
- if_id_(GetNextId(isolate)),
- then_id_(GetNextId(isolate)),
- else_id_(GetNextId(isolate)) {
+ if_id_(GetNextId(zone)),
+ then_id_(GetNextId(zone)),
+ else_id_(GetNextId(zone)) {
}
private:
@@ -1256,8 +1263,8 @@ class TryStatement : public Statement {
ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
protected:
- TryStatement(int index, Block* try_block, int pos)
- : Statement(pos),
+ TryStatement(Zone* zone, int index, Block* try_block, int pos)
+ : Statement(zone, pos),
index_(index),
try_block_(try_block),
escaping_targets_(NULL) { }
@@ -1280,13 +1287,14 @@ class TryCatchStatement V8_FINAL : public TryStatement {
Block* catch_block() const { return catch_block_; }
protected:
- TryCatchStatement(int index,
+ TryCatchStatement(Zone* zone,
+ int index,
Block* try_block,
Scope* scope,
Variable* variable,
Block* catch_block,
int pos)
- : TryStatement(index, try_block, pos),
+ : TryStatement(zone, index, try_block, pos),
scope_(scope),
variable_(variable),
catch_block_(catch_block) {
@@ -1307,8 +1315,8 @@ class TryFinallyStatement V8_FINAL : public TryStatement {
protected:
TryFinallyStatement(
- int index, Block* try_block, Block* finally_block, int pos)
- : TryStatement(index, try_block, pos),
+ Zone* zone, int index, Block* try_block, Block* finally_block, int pos)
+ : TryStatement(zone, index, try_block, pos),
finally_block_(finally_block) { }
private:
@@ -1321,7 +1329,7 @@ class DebuggerStatement V8_FINAL : public Statement {
DECLARE_NODE_TYPE(DebuggerStatement)
protected:
- explicit DebuggerStatement(int pos): Statement(pos) {}
+ explicit DebuggerStatement(Zone* zone, int pos): Statement(zone, pos) {}
};
@@ -1330,7 +1338,7 @@ class EmptyStatement V8_FINAL : public Statement {
DECLARE_NODE_TYPE(EmptyStatement)
protected:
- explicit EmptyStatement(int pos): Statement(pos) {}
+ explicit EmptyStatement(Zone* zone, int pos): Statement(zone, pos) {}
};
@@ -1387,11 +1395,10 @@ class Literal V8_FINAL : public Expression {
TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
protected:
- Literal(
- Isolate* isolate, Handle<Object> value, int position)
- : Expression(isolate, position),
+ Literal(Zone* zone, Handle<Object> value, int position)
+ : Expression(zone, position),
value_(value),
- isolate_(isolate) { }
+ isolate_(zone->isolate()) { }
private:
Handle<String> ToString();
@@ -1409,22 +1416,43 @@ class MaterializedLiteral : public Expression {
int literal_index() { return literal_index_; }
- // A materialized literal is simple if the values consist of only
- // constants and simple object and array literals.
- bool is_simple() const { return is_simple_; }
-
- int depth() const { return depth_; }
+ int depth() const {
+ // only callable after initialization.
+ ASSERT(depth_ >= 1);
+ return depth_;
+ }
protected:
- MaterializedLiteral(Isolate* isolate,
+ MaterializedLiteral(Zone* zone,
int literal_index,
- bool is_simple,
- int depth,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
literal_index_(literal_index),
- is_simple_(is_simple),
- depth_(depth) {}
+ is_simple_(false),
+ depth_(0) {}
+
+ // A materialized literal is simple if the values consist of only
+ // constants and simple object and array literals.
+ bool is_simple() const { return is_simple_; }
+ void set_is_simple(bool is_simple) { is_simple_ = is_simple; }
+ friend class CompileTimeValue;
+
+ void set_depth(int depth) {
+ ASSERT(depth >= 1);
+ depth_ = depth;
+ }
+
+ // Populate the constant properties/elements fixed array.
+ void BuildConstants(Isolate* isolate);
+ friend class ArrayLiteral;
+ friend class ObjectLiteral;
+
+ // If the expression is a literal, return the literal value;
+ // if the expression is a materialized literal and is simple return a
+ // compile time value as encoded by CompileTimeValue::GetValue().
+ // Otherwise, return undefined literal as the placeholder
+ // in the object literal boilerplate.
+ Handle<Object> GetBoilerplateValue(Expression* expression, Isolate* isolate);
private:
int literal_index_;
@@ -1446,7 +1474,7 @@ class ObjectLiteralProperty V8_FINAL : public ZoneObject {
PROTOTYPE // Property is __proto__.
};
- ObjectLiteralProperty(Literal* key, Expression* value, Isolate* isolate);
+ ObjectLiteralProperty(Zone* zone, Literal* key, Expression* value);
Literal* key() { return key_; }
Expression* value() { return value_; }
@@ -1465,7 +1493,7 @@ class ObjectLiteralProperty V8_FINAL : public ZoneObject {
protected:
template<class> friend class AstNodeFactory;
- ObjectLiteralProperty(bool is_getter, FunctionLiteral* value);
+ ObjectLiteralProperty(Zone* zone, bool is_getter, FunctionLiteral* value);
void set_key(Literal* key) { key_ = key; }
private:
@@ -1493,6 +1521,12 @@ class ObjectLiteral V8_FINAL : public MaterializedLiteral {
bool may_store_doubles() const { return may_store_doubles_; }
bool has_function() const { return has_function_; }
+ // Decide if a property should be in the object boilerplate.
+ static bool IsBoilerplateProperty(Property* property);
+
+ // Populate the constant properties fixed array.
+ void BuildConstantProperties(Isolate* isolate);
+
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
@@ -1511,26 +1545,23 @@ class ObjectLiteral V8_FINAL : public MaterializedLiteral {
};
protected:
- ObjectLiteral(Isolate* isolate,
- Handle<FixedArray> constant_properties,
+ ObjectLiteral(Zone* zone,
ZoneList<Property*>* properties,
int literal_index,
- bool is_simple,
- bool fast_elements,
- int depth,
- bool may_store_doubles,
+ int boilerplate_properties,
bool has_function,
int pos)
- : MaterializedLiteral(isolate, literal_index, is_simple, depth, pos),
- constant_properties_(constant_properties),
+ : MaterializedLiteral(zone, literal_index, pos),
properties_(properties),
- fast_elements_(fast_elements),
- may_store_doubles_(may_store_doubles),
+ boilerplate_properties_(boilerplate_properties),
+ fast_elements_(false),
+ may_store_doubles_(false),
has_function_(has_function) {}
private:
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
+ int boilerplate_properties_;
bool fast_elements_;
bool may_store_doubles_;
bool has_function_;
@@ -1546,14 +1577,16 @@ class RegExpLiteral V8_FINAL : public MaterializedLiteral {
Handle<String> flags() const { return flags_; }
protected:
- RegExpLiteral(Isolate* isolate,
+ RegExpLiteral(Zone* zone,
Handle<String> pattern,
Handle<String> flags,
int literal_index,
int pos)
- : MaterializedLiteral(isolate, literal_index, false, 1, pos),
+ : MaterializedLiteral(zone, literal_index, pos),
pattern_(pattern),
- flags_(flags) {}
+ flags_(flags) {
+ set_depth(1);
+ }
private:
Handle<String> pattern_;
@@ -1575,18 +1608,23 @@ class ArrayLiteral V8_FINAL : public MaterializedLiteral {
return BailoutId(first_element_id_.ToInt() + i);
}
+ // Populate the constant elements fixed array.
+ void BuildConstantElements(Isolate* isolate);
+
+ enum Flags {
+ kNoFlags = 0,
+ kShallowElements = 1,
+ kDisableMementos = 1 << 1
+ };
+
protected:
- ArrayLiteral(Isolate* isolate,
- Handle<FixedArray> constant_elements,
+ ArrayLiteral(Zone* zone,
ZoneList<Expression*>* values,
int literal_index,
- bool is_simple,
- int depth,
int pos)
- : MaterializedLiteral(isolate, literal_index, is_simple, depth, pos),
- constant_elements_(constant_elements),
+ : MaterializedLiteral(zone, literal_index, pos),
values_(values),
- first_element_id_(ReserveIdRange(isolate, values->length())) {}
+ first_element_id_(ReserveIdRange(zone, values->length())) {}
private:
Handle<FixedArray> constant_elements_;
@@ -1626,9 +1664,9 @@ class VariableProxy V8_FINAL : public Expression {
void BindTo(Variable* var);
protected:
- VariableProxy(Isolate* isolate, Variable* var, int position);
+ VariableProxy(Zone* zone, Variable* var, int position);
- VariableProxy(Isolate* isolate,
+ VariableProxy(Zone* zone,
Handle<String> name,
bool is_this,
Interface* interface,
@@ -1660,32 +1698,37 @@ class Property V8_FINAL : public Expression {
bool IsFunctionPrototype() const { return is_function_prototype_; }
// Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE {
+ return receiver_types_.length() == 1;
+ }
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
return &receiver_types_;
}
virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return STANDARD_STORE;
}
- bool IsUninitialized() { return is_uninitialized_; }
- bool IsPreMonomorphic() { return is_pre_monomorphic_; }
+ bool IsUninitialized() { return !is_for_call_ && is_uninitialized_; }
bool HasNoTypeInformation() {
- return is_uninitialized_ || is_pre_monomorphic_;
+ return is_uninitialized_;
}
+ void set_is_uninitialized(bool b) { is_uninitialized_ = b; }
+ void set_is_string_access(bool b) { is_string_access_ = b; }
+ void set_is_function_prototype(bool b) { is_function_prototype_ = b; }
+ void mark_for_call() { is_for_call_ = true; }
+ bool IsForCall() { return is_for_call_; }
+
TypeFeedbackId PropertyFeedbackId() { return reuse(id()); }
protected:
- Property(Isolate* isolate,
+ Property(Zone* zone,
Expression* obj,
Expression* key,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
obj_(obj),
key_(key),
- load_id_(GetNextId(isolate)),
- is_monomorphic_(false),
- is_pre_monomorphic_(false),
+ load_id_(GetNextId(zone)),
+ is_for_call_(false),
is_uninitialized_(false),
is_string_access_(false),
is_function_prototype_(false) { }
@@ -1696,15 +1739,14 @@ class Property V8_FINAL : public Expression {
const BailoutId load_id_;
SmallMapList receiver_types_;
- bool is_monomorphic_ : 1;
- bool is_pre_monomorphic_ : 1;
+ bool is_for_call_ : 1;
bool is_uninitialized_ : 1;
bool is_string_access_ : 1;
bool is_function_prototype_ : 1;
};
-class Call V8_FINAL : public Expression {
+class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(Call)
@@ -1712,47 +1754,50 @@ class Call V8_FINAL : public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
- TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, CallKind call_kind);
- virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
- return &receiver_types_;
+ virtual ComputablePhase GetComputablePhase() { return AFTER_SCOPING; }
+ virtual int ComputeFeedbackSlotCount(Isolate* isolate);
+ virtual void SetFirstFeedbackSlot(int slot) {
+ call_feedback_slot_ = slot;
}
- virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
- CheckType check_type() const { return check_type_; }
- void set_string_check(Handle<JSObject> holder) {
- holder_ = holder;
- check_type_ = STRING_CHECK;
+ bool HasCallFeedbackSlot() const {
+ return call_feedback_slot_ != kInvalidFeedbackSlot;
}
+ int CallFeedbackSlot() const { return call_feedback_slot_; }
- void set_number_check(Handle<JSObject> holder) {
- holder_ = holder;
- check_type_ = NUMBER_CHECK;
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ if (expression()->IsProperty()) {
+ return expression()->AsProperty()->GetReceiverTypes();
+ }
+ return NULL;
}
- void set_map_check() {
- holder_ = Handle<JSObject>::null();
- check_type_ = RECEIVER_MAP_CHECK;
+ virtual bool IsMonomorphic() V8_OVERRIDE {
+ if (expression()->IsProperty()) {
+ return expression()->AsProperty()->IsMonomorphic();
+ }
+ return !target_.is_null();
}
Handle<JSFunction> target() { return target_; }
- // A cache for the holder, set as a side effect of computing the target of the
- // call. Note that it contains the null handle when the receiver is the same
- // as the holder!
- Handle<JSObject> holder() { return holder_; }
-
Handle<Cell> cell() { return cell_; }
- bool ComputeTarget(Handle<Map> type, Handle<String> name);
+ void set_target(Handle<JSFunction> target) { target_ = target; }
bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
BailoutId ReturnId() const { return return_id_; }
- // TODO(rossberg): this should really move somewhere else (and be merged with
- // various similar methods in objets.cc), but for now...
- static Handle<JSObject> GetPrototypeForPrimitiveCheck(
- CheckType check, Isolate* isolate);
+ enum CallType {
+ POSSIBLY_EVAL_CALL,
+ GLOBAL_CALL,
+ LOOKUP_SLOT_CALL,
+ PROPERTY_CALL,
+ OTHER_CALL
+ };
+
+ // Helpers to determine how to handle the call.
+ CallType GetCallType(Isolate* isolate) const;
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
@@ -1760,33 +1805,33 @@ class Call V8_FINAL : public Expression {
#endif
protected:
- Call(Isolate* isolate,
+ Call(Zone* zone,
Expression* expression,
ZoneList<Expression*>* arguments,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
expression_(expression),
arguments_(arguments),
- is_monomorphic_(false),
- check_type_(RECEIVER_MAP_CHECK),
- return_id_(GetNextId(isolate)) { }
+ call_feedback_slot_(kInvalidFeedbackSlot),
+ return_id_(GetNextId(zone)) {
+ if (expression->IsProperty()) {
+ expression->AsProperty()->mark_for_call();
+ }
+ }
private:
Expression* expression_;
ZoneList<Expression*>* arguments_;
- bool is_monomorphic_;
- CheckType check_type_;
- SmallMapList receiver_types_;
Handle<JSFunction> target_;
- Handle<JSObject> holder_;
Handle<Cell> cell_;
+ int call_feedback_slot_;
const BailoutId return_id_;
};
-class CallNew V8_FINAL : public Expression {
+class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(CallNew)
@@ -1794,28 +1839,42 @@ class CallNew V8_FINAL : public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
+ virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; }
+ virtual int ComputeFeedbackSlotCount(Isolate* isolate) { return 1; }
+ virtual void SetFirstFeedbackSlot(int slot) {
+ callnew_feedback_slot_ = slot;
+ }
+
+ int CallNewFeedbackSlot() {
+ ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
+ return callnew_feedback_slot_;
+ }
+
TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
ElementsKind elements_kind() const { return elements_kind_; }
- Handle<Cell> allocation_info_cell() const {
- return allocation_info_cell_;
+ Handle<AllocationSite> allocation_site() const {
+ return allocation_site_;
}
+ static int feedback_slots() { return 1; }
+
BailoutId ReturnId() const { return return_id_; }
protected:
- CallNew(Isolate* isolate,
+ CallNew(Zone* zone,
Expression* expression,
ZoneList<Expression*>* arguments,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
expression_(expression),
arguments_(arguments),
is_monomorphic_(false),
elements_kind_(GetInitialFastElementsKind()),
- return_id_(GetNextId(isolate)) { }
+ callnew_feedback_slot_(kInvalidFeedbackSlot),
+ return_id_(GetNextId(zone)) { }
private:
Expression* expression_;
@@ -1824,7 +1883,8 @@ class CallNew V8_FINAL : public Expression {
bool is_monomorphic_;
Handle<JSFunction> target_;
ElementsKind elements_kind_;
- Handle<Cell> allocation_info_cell_;
+ Handle<AllocationSite> allocation_site_;
+ int callnew_feedback_slot_;
const BailoutId return_id_;
};
@@ -1846,12 +1906,12 @@ class CallRuntime V8_FINAL : public Expression {
TypeFeedbackId CallRuntimeFeedbackId() const { return reuse(id()); }
protected:
- CallRuntime(Isolate* isolate,
+ CallRuntime(Zone* zone,
Handle<String> name,
const Runtime::Function* function,
ZoneList<Expression*>* arguments,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
name_(name),
function_(function),
arguments_(arguments) { }
@@ -1877,15 +1937,15 @@ class UnaryOperation V8_FINAL : public Expression {
TypeFeedbackOracle* oracle) V8_OVERRIDE;
protected:
- UnaryOperation(Isolate* isolate,
+ UnaryOperation(Zone* zone,
Token::Value op,
Expression* expression,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
op_(op),
expression_(expression),
- materialize_true_id_(GetNextId(isolate)),
- materialize_false_id_(GetNextId(isolate)) {
+ materialize_true_id_(GetNextId(zone)),
+ materialize_false_id_(GetNextId(zone)) {
ASSERT(Token::IsUnaryOp(op));
}
@@ -1909,6 +1969,10 @@ class BinaryOperation V8_FINAL : public Expression {
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
+ Handle<AllocationSite> allocation_site() const { return allocation_site_; }
+ void set_allocation_site(Handle<AllocationSite> allocation_site) {
+ allocation_site_ = allocation_site;
+ }
BailoutId RightId() const { return right_id_; }
@@ -1920,16 +1984,16 @@ class BinaryOperation V8_FINAL : public Expression {
TypeFeedbackOracle* oracle) V8_OVERRIDE;
protected:
- BinaryOperation(Isolate* isolate,
+ BinaryOperation(Zone* zone,
Token::Value op,
Expression* left,
Expression* right,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
op_(op),
left_(left),
right_(right),
- right_id_(GetNextId(isolate)) {
+ right_id_(GetNextId(zone)) {
ASSERT(Token::IsBinaryOp(op));
}
@@ -1937,6 +2001,7 @@ class BinaryOperation V8_FINAL : public Expression {
Token::Value op_;
Expression* left_;
Expression* right_;
+ Handle<AllocationSite> allocation_site_;
// TODO(rossberg): the fixed arg should probably be represented as a Constant
// type for the RHS.
@@ -1962,15 +2027,18 @@ class CountOperation V8_FINAL : public Expression {
Expression* expression() const { return expression_; }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE {
+ return receiver_types_.length() == 1;
+ }
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
return &receiver_types_;
}
virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
- Handle<Type> type() const { return type_; }
+ Type* type() const { return type_; }
+ void set_store_mode(KeyedAccessStoreMode mode) { store_mode_ = mode; }
+ void set_type(Type* type) { type_ = type; }
BailoutId AssignmentId() const { return assignment_id_; }
@@ -1978,27 +2046,25 @@ class CountOperation V8_FINAL : public Expression {
TypeFeedbackId CountStoreFeedbackId() const { return reuse(id()); }
protected:
- CountOperation(Isolate* isolate,
+ CountOperation(Zone* zone,
Token::Value op,
bool is_prefix,
Expression* expr,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
op_(op),
is_prefix_(is_prefix),
- is_monomorphic_(false),
store_mode_(STANDARD_STORE),
expression_(expr),
- assignment_id_(GetNextId(isolate)),
- count_id_(GetNextId(isolate)) {}
+ assignment_id_(GetNextId(zone)),
+ count_id_(GetNextId(zone)) {}
private:
Token::Value op_;
bool is_prefix_ : 1;
- bool is_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
- Handle<Type> type_;
+ Type* type_;
Expression* expression_;
const BailoutId assignment_id_;
@@ -2017,8 +2083,8 @@ class CompareOperation V8_FINAL : public Expression {
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
- Handle<Type> combined_type() const { return combined_type_; }
- void set_combined_type(Handle<Type> type) { combined_type_ = type; }
+ Type* combined_type() const { return combined_type_; }
+ void set_combined_type(Type* type) { combined_type_ = type; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@@ -2026,16 +2092,16 @@ class CompareOperation V8_FINAL : public Expression {
bool IsLiteralCompareNull(Expression** expr);
protected:
- CompareOperation(Isolate* isolate,
+ CompareOperation(Zone* zone,
Token::Value op,
Expression* left,
Expression* right,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
op_(op),
left_(left),
right_(right),
- combined_type_(Type::None(), isolate) {
+ combined_type_(Type::None(zone)) {
ASSERT(Token::IsCompareOp(op));
}
@@ -2044,7 +2110,7 @@ class CompareOperation V8_FINAL : public Expression {
Expression* left_;
Expression* right_;
- Handle<Type> combined_type_;
+ Type* combined_type_;
};
@@ -2060,17 +2126,17 @@ class Conditional V8_FINAL : public Expression {
BailoutId ElseId() const { return else_id_; }
protected:
- Conditional(Isolate* isolate,
+ Conditional(Zone* zone,
Expression* condition,
Expression* then_expression,
Expression* else_expression,
int position)
- : Expression(isolate, position),
+ : Expression(zone, position),
condition_(condition),
then_expression_(then_expression),
else_expression_(else_expression),
- then_id_(GetNextId(isolate)),
- else_id_(GetNextId(isolate)) { }
+ then_id_(GetNextId(zone)),
+ else_id_(GetNextId(zone)) { }
private:
Expression* condition_;
@@ -2101,12 +2167,12 @@ class Assignment V8_FINAL : public Expression {
// Type feedback information.
TypeFeedbackId AssignmentFeedbackId() { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE {
+ return receiver_types_.length() == 1;
+ }
bool IsUninitialized() { return is_uninitialized_; }
- bool IsPreMonomorphic() { return is_pre_monomorphic_; }
bool HasNoTypeInformation() {
- return is_uninitialized_ || is_pre_monomorphic_;
+ return is_uninitialized_;
}
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
return &receiver_types_;
@@ -2114,16 +2180,18 @@ class Assignment V8_FINAL : public Expression {
virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
+ void set_is_uninitialized(bool b) { is_uninitialized_ = b; }
+ void set_store_mode(KeyedAccessStoreMode mode) { store_mode_ = mode; }
protected:
- Assignment(Isolate* isolate,
+ Assignment(Zone* zone,
Token::Value op,
Expression* target,
Expression* value,
int pos);
template<class Visitor>
- void Init(Isolate* isolate, AstNodeFactory<Visitor>* factory) {
+ void Init(Zone* zone, AstNodeFactory<Visitor>* factory) {
ASSERT(Token::IsAssignmentOp(op_));
if (is_compound()) {
binary_operation_ = factory->NewBinaryOperation(
@@ -2138,9 +2206,7 @@ class Assignment V8_FINAL : public Expression {
BinaryOperation* binary_operation_;
const BailoutId assignment_id_;
- bool is_monomorphic_ : 1;
bool is_uninitialized_ : 1;
- bool is_pre_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
SmallMapList receiver_types_;
@@ -2175,12 +2241,12 @@ class Yield V8_FINAL : public Expression {
}
protected:
- Yield(Isolate* isolate,
+ Yield(Zone* zone,
Expression* generator_object,
Expression* expression,
Kind yield_kind,
int pos)
- : Expression(isolate, pos),
+ : Expression(zone, pos),
generator_object_(generator_object),
expression_(expression),
yield_kind_(yield_kind),
@@ -2201,8 +2267,8 @@ class Throw V8_FINAL : public Expression {
Expression* exception() const { return exception_; }
protected:
- Throw(Isolate* isolate, Expression* exception, int pos)
- : Expression(isolate, pos), exception_(exception) {}
+ Throw(Zone* zone, Expression* exception, int pos)
+ : Expression(zone, pos), exception_(exception) {}
private:
Expression* exception_;
@@ -2260,6 +2326,8 @@ class FunctionLiteral V8_FINAL : public Expression {
bool AllowsLazyCompilation();
bool AllowsLazyCompilationWithoutContext();
+ void InitializeSharedInfo(Handle<Code> code);
+
Handle<String> debug_name() const {
if (name_->length() > 0) return name_;
return inferred_name();
@@ -2270,6 +2338,9 @@ class FunctionLiteral V8_FINAL : public Expression {
inferred_name_ = inferred_name;
}
+ // shared_info may be null if it's not cached in full code.
+ Handle<SharedFunctionInfo> shared_info() { return shared_info_; }
+
bool pretenure() { return Pretenure::decode(bitfield_); }
void set_pretenure() { bitfield_ |= Pretenure::encode(true); }
@@ -2300,7 +2371,15 @@ class FunctionLiteral V8_FINAL : public Expression {
void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties;
}
-
+ void set_slot_processor(DeferredFeedbackSlotProcessor* slot_processor) {
+ slot_processor_ = *slot_processor;
+ }
+ void ProcessFeedbackSlots(Isolate* isolate) {
+ slot_processor_.ProcessFeedbackSlots(isolate);
+ }
+ int slot_count() {
+ return slot_processor_.slot_count();
+ }
bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
void set_dont_optimize_reason(BailoutReason reason) {
@@ -2308,7 +2387,7 @@ class FunctionLiteral V8_FINAL : public Expression {
}
protected:
- FunctionLiteral(Isolate* isolate,
+ FunctionLiteral(Zone* zone,
Handle<String> name,
Scope* scope,
ZoneList<Statement*>* body,
@@ -2322,11 +2401,11 @@ class FunctionLiteral V8_FINAL : public Expression {
IsParenthesizedFlag is_parenthesized,
IsGeneratorFlag is_generator,
int position)
- : Expression(isolate, position),
+ : Expression(zone, position),
name_(name),
scope_(scope),
body_(body),
- inferred_name_(isolate->factory()->empty_string()),
+ inferred_name_(zone->isolate()->factory()->empty_string()),
dont_optimize_reason_(kNoReason),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
@@ -2345,10 +2424,12 @@ class FunctionLiteral V8_FINAL : public Expression {
private:
Handle<String> name_;
+ Handle<SharedFunctionInfo> shared_info_;
Scope* scope_;
ZoneList<Statement*>* body_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
+ DeferredFeedbackSlotProcessor slot_processor_;
BailoutReason dont_optimize_reason_;
int materialized_literal_count_;
@@ -2377,8 +2458,8 @@ class NativeFunctionLiteral V8_FINAL : public Expression {
protected:
NativeFunctionLiteral(
- Isolate* isolate, Handle<String> name, v8::Extension* extension, int pos)
- : Expression(isolate, pos), name_(name), extension_(extension) {}
+ Zone* zone, Handle<String> name, v8::Extension* extension, int pos)
+ : Expression(zone, pos), name_(name), extension_(extension) {}
private:
Handle<String> name_;
@@ -2391,7 +2472,7 @@ class ThisFunction V8_FINAL : public Expression {
DECLARE_NODE_TYPE(ThisFunction)
protected:
- explicit ThisFunction(Isolate* isolate, int pos): Expression(isolate, pos) {}
+ explicit ThisFunction(Zone* zone, int pos): Expression(zone, pos) {}
};
#undef DECLARE_NODE_TYPE
@@ -2758,8 +2839,8 @@ class RegExpEmpty V8_FINAL : public RegExpTree {
// ----------------------------------------------------------------------------
// Out-of-line inline constructors (to side-step cyclic dependencies).
-inline ModuleVariable::ModuleVariable(VariableProxy* proxy, int pos)
- : Module(proxy->interface(), pos),
+inline ModuleVariable::ModuleVariable(Zone* zone, VariableProxy* proxy, int pos)
+ : Module(zone, proxy->interface(), pos),
proxy_(proxy) {
}
@@ -2791,7 +2872,7 @@ class AstVisitor BASE_EMBEDDED {
#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
public: \
- virtual void Visit(AstNode* node) V8_FINAL V8_OVERRIDE { \
+ virtual void Visit(AstNode* node) V8_FINAL V8_OVERRIDE { \
if (!CheckStackOverflow()) node->Accept(this); \
} \
\
@@ -2801,19 +2882,20 @@ public: \
\
bool CheckStackOverflow() { \
if (stack_overflow_) return true; \
- StackLimitCheck check(isolate_); \
+ StackLimitCheck check(zone_->isolate()); \
if (!check.HasOverflowed()) return false; \
return (stack_overflow_ = true); \
} \
\
private: \
- void InitializeAstVisitor(Isolate* isolate) { \
- isolate_ = isolate; \
+ void InitializeAstVisitor(Zone* zone) { \
+ zone_ = zone; \
stack_overflow_ = false; \
} \
- Isolate* isolate() { return isolate_; } \
+ Zone* zone() { return zone_; } \
+ Isolate* isolate() { return zone_->isolate(); } \
\
- Isolate* isolate_; \
+ Zone* zone_; \
bool stack_overflow_
@@ -2822,10 +2904,13 @@ private: \
class AstConstructionVisitor BASE_EMBEDDED {
public:
- AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
+ explicit AstConstructionVisitor(Zone* zone)
+ : dont_optimize_reason_(kNoReason),
+ zone_(zone) { }
AstProperties* ast_properties() { return &properties_; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+ DeferredFeedbackSlotProcessor* slot_processor() { return &slot_processor_; }
private:
template<class> friend class AstNodeFactory;
@@ -2842,13 +2927,21 @@ class AstConstructionVisitor BASE_EMBEDDED {
dont_optimize_reason_ = reason;
}
+ void add_slot_node(FeedbackSlotInterface* slot_node) {
+ slot_processor_.add_slot_node(zone_, slot_node);
+ }
+
AstProperties properties_;
+ DeferredFeedbackSlotProcessor slot_processor_;
BailoutReason dont_optimize_reason_;
+ Zone* zone_;
};
class AstNullVisitor BASE_EMBEDDED {
public:
+ explicit AstNullVisitor(Zone* zone) {}
+
// Node visitors.
#define DEF_VISIT(type) \
void Visit##type(type* node) {}
@@ -2864,9 +2957,9 @@ class AstNullVisitor BASE_EMBEDDED {
template<class Visitor>
class AstNodeFactory V8_FINAL BASE_EMBEDDED {
public:
- AstNodeFactory(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- zone_(zone) { }
+ explicit AstNodeFactory(Zone* zone)
+ : zone_(zone),
+ visitor_(zone) { }
Visitor* visitor() { return &visitor_; }
@@ -2879,7 +2972,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Scope* scope,
int pos) {
VariableDeclaration* decl =
- new(zone_) VariableDeclaration(proxy, mode, scope, pos);
+ new(zone_) VariableDeclaration(zone_, proxy, mode, scope, pos);
VISIT_AND_RETURN(VariableDeclaration, decl)
}
@@ -2889,7 +2982,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Scope* scope,
int pos) {
FunctionDeclaration* decl =
- new(zone_) FunctionDeclaration(proxy, mode, fun, scope, pos);
+ new(zone_) FunctionDeclaration(zone_, proxy, mode, fun, scope, pos);
VISIT_AND_RETURN(FunctionDeclaration, decl)
}
@@ -2898,7 +2991,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Scope* scope,
int pos) {
ModuleDeclaration* decl =
- new(zone_) ModuleDeclaration(proxy, module, scope, pos);
+ new(zone_) ModuleDeclaration(zone_, proxy, module, scope, pos);
VISIT_AND_RETURN(ModuleDeclaration, decl)
}
@@ -2907,7 +3000,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Scope* scope,
int pos) {
ImportDeclaration* decl =
- new(zone_) ImportDeclaration(proxy, module, scope, pos);
+ new(zone_) ImportDeclaration(zone_, proxy, module, scope, pos);
VISIT_AND_RETURN(ImportDeclaration, decl)
}
@@ -2915,27 +3008,28 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Scope* scope,
int pos) {
ExportDeclaration* decl =
- new(zone_) ExportDeclaration(proxy, scope, pos);
+ new(zone_) ExportDeclaration(zone_, proxy, scope, pos);
VISIT_AND_RETURN(ExportDeclaration, decl)
}
ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface, int pos) {
- ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface, pos);
+ ModuleLiteral* module =
+ new(zone_) ModuleLiteral(zone_, body, interface, pos);
VISIT_AND_RETURN(ModuleLiteral, module)
}
ModuleVariable* NewModuleVariable(VariableProxy* proxy, int pos) {
- ModuleVariable* module = new(zone_) ModuleVariable(proxy, pos);
+ ModuleVariable* module = new(zone_) ModuleVariable(zone_, proxy, pos);
VISIT_AND_RETURN(ModuleVariable, module)
}
ModulePath* NewModulePath(Module* origin, Handle<String> name, int pos) {
- ModulePath* module = new(zone_) ModulePath(origin, name, zone_, pos);
+ ModulePath* module = new(zone_) ModulePath(zone_, origin, name, pos);
VISIT_AND_RETURN(ModulePath, module)
}
ModuleUrl* NewModuleUrl(Handle<String> url, int pos) {
- ModuleUrl* module = new(zone_) ModuleUrl(url, zone_, pos);
+ ModuleUrl* module = new(zone_) ModuleUrl(zone_, url, pos);
VISIT_AND_RETURN(ModuleUrl, module)
}
@@ -2944,13 +3038,13 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
bool is_initializer_block,
int pos) {
Block* block = new(zone_) Block(
- isolate_, labels, capacity, is_initializer_block, pos, zone_);
+ zone_, labels, capacity, is_initializer_block, pos);
VISIT_AND_RETURN(Block, block)
}
#define STATEMENT_WITH_LABELS(NodeType) \
NodeType* New##NodeType(ZoneStringList* labels, int pos) { \
- NodeType* stmt = new(zone_) NodeType(isolate_, labels, pos); \
+ NodeType* stmt = new(zone_) NodeType(zone_, labels, pos); \
VISIT_AND_RETURN(NodeType, stmt); \
}
STATEMENT_WITH_LABELS(DoWhileStatement)
@@ -2964,11 +3058,11 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
int pos) {
switch (visit_mode) {
case ForEachStatement::ENUMERATE: {
- ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels, pos);
+ ForInStatement* stmt = new(zone_) ForInStatement(zone_, labels, pos);
VISIT_AND_RETURN(ForInStatement, stmt);
}
case ForEachStatement::ITERATE: {
- ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels, pos);
+ ForOfStatement* stmt = new(zone_) ForOfStatement(zone_, labels, pos);
VISIT_AND_RETURN(ForOfStatement, stmt);
}
}
@@ -2978,27 +3072,28 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
ModuleStatement* NewModuleStatement(
VariableProxy* proxy, Block* body, int pos) {
- ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body, pos);
+ ModuleStatement* stmt = new(zone_) ModuleStatement(zone_, proxy, body, pos);
VISIT_AND_RETURN(ModuleStatement, stmt)
}
ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
- ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression, pos);
+ ExpressionStatement* stmt =
+ new(zone_) ExpressionStatement(zone_, expression, pos);
VISIT_AND_RETURN(ExpressionStatement, stmt)
}
ContinueStatement* NewContinueStatement(IterationStatement* target, int pos) {
- ContinueStatement* stmt = new(zone_) ContinueStatement(target, pos);
+ ContinueStatement* stmt = new(zone_) ContinueStatement(zone_, target, pos);
VISIT_AND_RETURN(ContinueStatement, stmt)
}
BreakStatement* NewBreakStatement(BreakableStatement* target, int pos) {
- BreakStatement* stmt = new(zone_) BreakStatement(target, pos);
+ BreakStatement* stmt = new(zone_) BreakStatement(zone_, target, pos);
VISIT_AND_RETURN(BreakStatement, stmt)
}
ReturnStatement* NewReturnStatement(Expression* expression, int pos) {
- ReturnStatement* stmt = new(zone_) ReturnStatement(expression, pos);
+ ReturnStatement* stmt = new(zone_) ReturnStatement(zone_, expression, pos);
VISIT_AND_RETURN(ReturnStatement, stmt)
}
@@ -3007,7 +3102,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Statement* statement,
int pos) {
WithStatement* stmt = new(zone_) WithStatement(
- scope, expression, statement, pos);
+ zone_, scope, expression, statement, pos);
VISIT_AND_RETURN(WithStatement, stmt)
}
@@ -3016,7 +3111,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Statement* else_statement,
int pos) {
IfStatement* stmt = new(zone_) IfStatement(
- isolate_, condition, then_statement, else_statement, pos);
+ zone_, condition, then_statement, else_statement, pos);
VISIT_AND_RETURN(IfStatement, stmt)
}
@@ -3027,7 +3122,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Block* catch_block,
int pos) {
TryCatchStatement* stmt = new(zone_) TryCatchStatement(
- index, try_block, scope, variable, catch_block, pos);
+ zone_, index, try_block, scope, variable, catch_block, pos);
VISIT_AND_RETURN(TryCatchStatement, stmt)
}
@@ -3035,57 +3130,59 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Block* try_block,
Block* finally_block,
int pos) {
- TryFinallyStatement* stmt =
- new(zone_) TryFinallyStatement(index, try_block, finally_block, pos);
+ TryFinallyStatement* stmt = new(zone_) TryFinallyStatement(
+ zone_, index, try_block, finally_block, pos);
VISIT_AND_RETURN(TryFinallyStatement, stmt)
}
DebuggerStatement* NewDebuggerStatement(int pos) {
- DebuggerStatement* stmt = new(zone_) DebuggerStatement(pos);
+ DebuggerStatement* stmt = new(zone_) DebuggerStatement(zone_, pos);
VISIT_AND_RETURN(DebuggerStatement, stmt)
}
EmptyStatement* NewEmptyStatement(int pos) {
- return new(zone_) EmptyStatement(pos);
+ return new(zone_) EmptyStatement(zone_, pos);
}
CaseClause* NewCaseClause(
Expression* label, ZoneList<Statement*>* statements, int pos) {
CaseClause* clause =
- new(zone_) CaseClause(isolate_, label, statements, pos);
+ new(zone_) CaseClause(zone_, label, statements, pos);
VISIT_AND_RETURN(CaseClause, clause)
}
Literal* NewLiteral(Handle<Object> handle, int pos) {
- Literal* lit = new(zone_) Literal(isolate_, handle, pos);
+ Literal* lit = new(zone_) Literal(zone_, handle, pos);
VISIT_AND_RETURN(Literal, lit)
}
Literal* NewNumberLiteral(double number, int pos) {
- return NewLiteral(isolate_->factory()->NewNumber(number, TENURED), pos);
+ return NewLiteral(
+ zone_->isolate()->factory()->NewNumber(number, TENURED), pos);
}
ObjectLiteral* NewObjectLiteral(
- Handle<FixedArray> constant_properties,
ZoneList<ObjectLiteral::Property*>* properties,
int literal_index,
- bool is_simple,
- bool fast_elements,
- int depth,
- bool may_store_doubles,
+ int boilerplate_properties,
bool has_function,
int pos) {
ObjectLiteral* lit = new(zone_) ObjectLiteral(
- isolate_, constant_properties, properties, literal_index,
- is_simple, fast_elements, depth, may_store_doubles, has_function, pos);
+ zone_, properties, literal_index, boilerplate_properties,
+ has_function, pos);
VISIT_AND_RETURN(ObjectLiteral, lit)
}
+ ObjectLiteral::Property* NewObjectLiteralProperty(Literal* key,
+ Expression* value) {
+ return new(zone_) ObjectLiteral::Property(zone_, key, value);
+ }
+
ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter,
FunctionLiteral* value,
int pos) {
ObjectLiteral::Property* prop =
- new(zone_) ObjectLiteral::Property(is_getter, value);
+ new(zone_) ObjectLiteral::Property(zone_, is_getter, value);
prop->set_key(NewLiteral(value->name(), pos));
return prop; // Not an AST node, will not be visited.
}
@@ -3095,25 +3192,21 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
int literal_index,
int pos) {
RegExpLiteral* lit =
- new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index, pos);
+ new(zone_) RegExpLiteral(zone_, pattern, flags, literal_index, pos);
VISIT_AND_RETURN(RegExpLiteral, lit);
}
- ArrayLiteral* NewArrayLiteral(Handle<FixedArray> constant_elements,
- ZoneList<Expression*>* values,
+ ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int literal_index,
- bool is_simple,
- int depth,
int pos) {
ArrayLiteral* lit = new(zone_) ArrayLiteral(
- isolate_, constant_elements, values, literal_index, is_simple,
- depth, pos);
+ zone_, values, literal_index, pos);
VISIT_AND_RETURN(ArrayLiteral, lit)
}
VariableProxy* NewVariableProxy(Variable* var,
int pos = RelocInfo::kNoPosition) {
- VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var, pos);
+ VariableProxy* proxy = new(zone_) VariableProxy(zone_, var, pos);
VISIT_AND_RETURN(VariableProxy, proxy)
}
@@ -3122,26 +3215,26 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Interface* interface = Interface::NewValue(),
int position = RelocInfo::kNoPosition) {
VariableProxy* proxy =
- new(zone_) VariableProxy(isolate_, name, is_this, interface, position);
+ new(zone_) VariableProxy(zone_, name, is_this, interface, position);
VISIT_AND_RETURN(VariableProxy, proxy)
}
Property* NewProperty(Expression* obj, Expression* key, int pos) {
- Property* prop = new(zone_) Property(isolate_, obj, key, pos);
+ Property* prop = new(zone_) Property(zone_, obj, key, pos);
VISIT_AND_RETURN(Property, prop)
}
Call* NewCall(Expression* expression,
ZoneList<Expression*>* arguments,
int pos) {
- Call* call = new(zone_) Call(isolate_, expression, arguments, pos);
+ Call* call = new(zone_) Call(zone_, expression, arguments, pos);
VISIT_AND_RETURN(Call, call)
}
CallNew* NewCallNew(Expression* expression,
ZoneList<Expression*>* arguments,
int pos) {
- CallNew* call = new(zone_) CallNew(isolate_, expression, arguments, pos);
+ CallNew* call = new(zone_) CallNew(zone_, expression, arguments, pos);
VISIT_AND_RETURN(CallNew, call)
}
@@ -3150,7 +3243,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
ZoneList<Expression*>* arguments,
int pos) {
CallRuntime* call =
- new(zone_) CallRuntime(isolate_, name, function, arguments, pos);
+ new(zone_) CallRuntime(zone_, name, function, arguments, pos);
VISIT_AND_RETURN(CallRuntime, call)
}
@@ -3158,7 +3251,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* expression,
int pos) {
UnaryOperation* node =
- new(zone_) UnaryOperation(isolate_, op, expression, pos);
+ new(zone_) UnaryOperation(zone_, op, expression, pos);
VISIT_AND_RETURN(UnaryOperation, node)
}
@@ -3167,7 +3260,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* right,
int pos) {
BinaryOperation* node =
- new(zone_) BinaryOperation(isolate_, op, left, right, pos);
+ new(zone_) BinaryOperation(zone_, op, left, right, pos);
VISIT_AND_RETURN(BinaryOperation, node)
}
@@ -3176,7 +3269,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* expr,
int pos) {
CountOperation* node =
- new(zone_) CountOperation(isolate_, op, is_prefix, expr, pos);
+ new(zone_) CountOperation(zone_, op, is_prefix, expr, pos);
VISIT_AND_RETURN(CountOperation, node)
}
@@ -3185,7 +3278,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* right,
int pos) {
CompareOperation* node =
- new(zone_) CompareOperation(isolate_, op, left, right, pos);
+ new(zone_) CompareOperation(zone_, op, left, right, pos);
VISIT_AND_RETURN(CompareOperation, node)
}
@@ -3194,7 +3287,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* else_expression,
int position) {
Conditional* cond = new(zone_) Conditional(
- isolate_, condition, then_expression, else_expression, position);
+ zone_, condition, then_expression, else_expression, position);
VISIT_AND_RETURN(Conditional, cond)
}
@@ -3203,8 +3296,8 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Expression* value,
int pos) {
Assignment* assign =
- new(zone_) Assignment(isolate_, op, target, value, pos);
- assign->Init(isolate_, this);
+ new(zone_) Assignment(zone_, op, target, value, pos);
+ assign->Init(zone_, this);
VISIT_AND_RETURN(Assignment, assign)
}
@@ -3213,12 +3306,12 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Yield::Kind yield_kind,
int pos) {
Yield* yield = new(zone_) Yield(
- isolate_, generator_object, expression, yield_kind, pos);
+ zone_, generator_object, expression, yield_kind, pos);
VISIT_AND_RETURN(Yield, yield)
}
Throw* NewThrow(Expression* exception, int pos) {
- Throw* t = new(zone_) Throw(isolate_, exception, pos);
+ Throw* t = new(zone_) Throw(zone_, exception, pos);
VISIT_AND_RETURN(Throw, t)
}
@@ -3237,7 +3330,7 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
FunctionLiteral::IsGeneratorFlag is_generator,
int position) {
FunctionLiteral* lit = new(zone_) FunctionLiteral(
- isolate_, name, scope, body,
+ zone_, name, scope, body,
materialized_literal_count, expected_property_count, handler_count,
parameter_count, function_type, has_duplicate_parameters, is_function,
is_parenthesized, is_generator, position);
@@ -3251,19 +3344,18 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
NativeFunctionLiteral* NewNativeFunctionLiteral(
Handle<String> name, v8::Extension* extension, int pos) {
NativeFunctionLiteral* lit =
- new(zone_) NativeFunctionLiteral(isolate_, name, extension, pos);
+ new(zone_) NativeFunctionLiteral(zone_, name, extension, pos);
VISIT_AND_RETURN(NativeFunctionLiteral, lit)
}
ThisFunction* NewThisFunction(int pos) {
- ThisFunction* fun = new(zone_) ThisFunction(isolate_, pos);
+ ThisFunction* fun = new(zone_) ThisFunction(zone_, pos);
VISIT_AND_RETURN(ThisFunction, fun)
}
#undef VISIT_AND_RETURN
private:
- Isolate* isolate_;
Zone* zone_;
Visitor visitor_;
};
diff --git a/deps/v8/src/atomicops.h b/deps/v8/src/atomicops.h
index 789721edfc..d7d4df6763 100644
--- a/deps/v8/src/atomicops.h
+++ b/deps/v8/src/atomicops.h
@@ -159,6 +159,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_x86_macosx.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_A64
+#include "atomicops_internals_a64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
diff --git a/deps/v8/src/atomicops_internals_a64_gcc.h b/deps/v8/src/atomicops_internals_a64_gcc.h
new file mode 100644
index 0000000000..074da5841e
--- /dev/null
+++ b/deps/v8/src/atomicops_internals_a64_gcc.h
@@ -0,0 +1,416 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+namespace v8 {
+namespace internal {
+
+inline void MemoryBarrier() { /* Not used. */ }
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ "clrex \n\t" // In case we didn't swap.
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
+ "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
+ "add %w[result], %w[result], %w[increment]\n\t"
+ "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
+ "cbnz %w[temp], 0b \n\t" // Retry on failure.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ "0: \n\t"
+ "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
+ "add %w[result], %w[result], %w[increment]\n\t"
+ "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
+ "cbnz %w[temp], 0b \n\t" // Retry on failure.
+ "dmb ish \n\t" // Data memory barrier.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "dmb ish \n\t" // Data memory barrier.
+ "1: \n\t"
+ // If the compare failed the 'dmb' is unnecessary, but we still need a
+ // 'clrex'.
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ "0: \n\t"
+ "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ // If the compare failed the we still need a 'clrex'.
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ ::: "memory" // Prevent gcc from reordering before the store above.
+ ); // NOLINT
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ ::: "memory" // Prevent gcc from reordering after the store below.
+ ); // NOLINT
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ ::: "memory" // Prevent gcc from reordering before the load above.
+ ); // NOLINT
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ ::: "memory" // Prevent gcc from reordering after the load below.
+ ); // NOLINT
+ return *ptr;
+}
+
+// 64-bit versions of the operations.
+// See the 32-bit versions for comments.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], [%[ptr]] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], [%[ptr]] \n\t"
+ "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], [%[ptr]] \n\t"
+ "add %[result], %[result], %[increment] \n\t"
+ "stxr %w[temp], %[result], [%[ptr]] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t"
+ "0: \n\t"
+ "ldxr %[result], [%[ptr]] \n\t"
+ "add %[result], %[result], %[increment] \n\t"
+ "stxr %w[temp], %[result], [%[ptr]] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "dmb ish \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], [%[ptr]] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "dmb ish \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t"
+ "0: \n\t"
+ "ldxr %[prev], [%[ptr]] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t"
+ ::: "memory"
+ ); // NOLINT
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t"
+ ::: "memory"
+ ); // NOLINT
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t"
+ ::: "memory"
+ ); // NOLINT
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t"
+ ::: "memory"
+ ); // NOLINT
+ return *ptr;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/deps/v8/src/bignum-dtoa.cc b/deps/v8/src/bignum-dtoa.cc
index c5ad4420c8..2b91122af5 100644
--- a/deps/v8/src/bignum-dtoa.cc
+++ b/deps/v8/src/bignum-dtoa.cc
@@ -394,7 +394,8 @@ static int EstimatePower(int exponent) {
// For doubles len(f) == 53 (don't forget the hidden bit).
const int kSignificandSize = 53;
- double estimate = ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10);
+ double estimate =
+ std::ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10);
return static_cast<int>(estimate);
}
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 234a2118bd..d11ff34754 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -40,9 +40,12 @@
#include "objects-visiting.h"
#include "platform.h"
#include "snapshot.h"
+#include "trig-table.h"
#include "extensions/externalize-string-extension.h"
+#include "extensions/free-buffer-extension.h"
#include "extensions/gc-extension.h"
#include "extensions/statistics-extension.h"
+#include "extensions/trigger-failure-extension.h"
#include "code-stubs.h"
namespace v8 {
@@ -98,10 +101,39 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
}
+static const char* GCFunctionName() {
+ bool flag_given = FLAG_expose_gc_as != NULL && strlen(FLAG_expose_gc_as) != 0;
+ return flag_given ? FLAG_expose_gc_as : "gc";
+}
+
+
+v8::Extension* Bootstrapper::free_buffer_extension_ = NULL;
+v8::Extension* Bootstrapper::gc_extension_ = NULL;
+v8::Extension* Bootstrapper::externalize_string_extension_ = NULL;
+v8::Extension* Bootstrapper::statistics_extension_ = NULL;
+v8::Extension* Bootstrapper::trigger_failure_extension_ = NULL;
+
+
void Bootstrapper::InitializeOncePerProcess() {
- GCExtension::Register();
- ExternalizeStringExtension::Register();
- StatisticsExtension::Register();
+ free_buffer_extension_ = new FreeBufferExtension;
+ v8::RegisterExtension(free_buffer_extension_);
+ gc_extension_ = new GCExtension(GCFunctionName());
+ v8::RegisterExtension(gc_extension_);
+ externalize_string_extension_ = new ExternalizeStringExtension;
+ v8::RegisterExtension(externalize_string_extension_);
+ statistics_extension_ = new StatisticsExtension;
+ v8::RegisterExtension(statistics_extension_);
+ trigger_failure_extension_ = new TriggerFailureExtension;
+ v8::RegisterExtension(trigger_failure_extension_);
+}
+
+
+void Bootstrapper::TearDownExtensions() {
+ delete free_buffer_extension_;
+ delete gc_extension_;
+ delete externalize_string_extension_;
+ delete statistics_extension_;
+ delete trigger_failure_extension_;
}
@@ -231,13 +263,18 @@ class Genesis BASE_EMBEDDED {
// provided.
static bool InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions);
+ static bool InstallAutoExtensions(Isolate* isolate,
+ ExtensionStates* extension_states);
+ static bool InstallRequestedExtensions(Isolate* isolate,
+ v8::ExtensionConfiguration* extensions,
+ ExtensionStates* extension_states);
static bool InstallExtension(Isolate* isolate,
const char* name,
ExtensionStates* extension_states);
static bool InstallExtension(Isolate* isolate,
v8::RegisteredExtension* current,
ExtensionStates* extension_states);
- static void InstallSpecialObjects(Handle<Context> native_context);
+ static bool InstallSpecialObjects(Handle<Context> native_context);
bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
bool ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template);
@@ -334,17 +371,6 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
global_proxy->set_native_context(*factory->null_value());
SetObjectPrototype(global_proxy, factory->null_value());
- env->set_global_proxy(env->global_object());
- env->global_object()->set_global_receiver(env->global_object());
-}
-
-
-void Bootstrapper::ReattachGlobal(Handle<Context> env,
- Handle<JSGlobalProxy> global_proxy) {
- env->global_object()->set_global_receiver(*global_proxy);
- env->set_global_proxy(*global_proxy);
- SetObjectPrototype(global_proxy, Handle<JSObject>(env->global_object()));
- global_proxy->set_native_context(*env);
}
@@ -549,7 +575,7 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
map->AppendDescriptor(&d, witness);
}
{ // Add name.
- CallbacksDescriptor d(*factory()->name_string(), *name, rw_attribs);
+ CallbacksDescriptor d(*factory()->name_string(), *name, ro_attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add arguments.
@@ -677,7 +703,7 @@ void Genesis::CreateRoots() {
// Allocate the message listeners object.
{
- v8::NeanderArray listeners;
+ v8::NeanderArray listeners(isolate());
native_context()->set_message_listeners(*listeners.value());
}
}
@@ -1068,33 +1094,14 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
{ // -- T y p e d A r r a y s
- Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array",
- EXTERNAL_BYTE_ELEMENTS);
- native_context()->set_int8_array_fun(*int8_fun);
- Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array",
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS);
- native_context()->set_uint8_array_fun(*uint8_fun);
- Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array",
- EXTERNAL_SHORT_ELEMENTS);
- native_context()->set_int16_array_fun(*int16_fun);
- Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array",
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS);
- native_context()->set_uint16_array_fun(*uint16_fun);
- Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array",
- EXTERNAL_INT_ELEMENTS);
- native_context()->set_int32_array_fun(*int32_fun);
- Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array",
- EXTERNAL_UNSIGNED_INT_ELEMENTS);
- native_context()->set_uint32_array_fun(*uint32_fun);
- Handle<JSFunction> float_fun = InstallTypedArray("Float32Array",
- EXTERNAL_FLOAT_ELEMENTS);
- native_context()->set_float_array_fun(*float_fun);
- Handle<JSFunction> double_fun = InstallTypedArray("Float64Array",
- EXTERNAL_DOUBLE_ELEMENTS);
- native_context()->set_double_array_fun(*double_fun);
- Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray",
- EXTERNAL_PIXEL_ELEMENTS);
- native_context()->set_uint8c_array_fun(*uint8c_fun);
+#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { \
+ Handle<JSFunction> fun = InstallTypedArray(#Type "Array", \
+ EXTERNAL_##TYPE##_ELEMENTS); \
+ native_context()->set_##type##_array_fun(*fun); \
+ }
+ TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
+#undef INSTALL_TYPED_ARRAY
Handle<JSFunction> data_view_fun =
InstallFunction(
@@ -1306,12 +1313,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
native_context()->set_out_of_memory(heap->false_value());
// Initialize the embedder data slot.
- Handle<FixedArray> embedder_data = factory->NewFixedArray(2);
+ Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
native_context()->set_embedder_data(*embedder_data);
-
- // Allocate the random seed slot.
- Handle<ByteArray> random_seed = factory->NewByteArray(kRandomStateSize);
- native_context()->set_random_seed(*random_seed);
}
@@ -1507,7 +1510,7 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
if (cache == NULL || !cache->Lookup(name, &function_info)) {
ASSERT(source->IsOneByteRepresentation());
Handle<String> script_name = factory->NewStringFromUtf8(name);
- function_info = Compiler::Compile(
+ function_info = Compiler::CompileScript(
source,
script_name,
0,
@@ -1578,6 +1581,10 @@ void Genesis::InstallNativeFunctions() {
void Genesis::InstallExperimentalNativeFunctions() {
+ INSTALL_NATIVE(JSFunction, "RunMicrotasks", run_microtasks);
+ INSTALL_NATIVE(JSFunction, "EnqueueExternalMicrotask",
+ enqueue_external_microtask);
+
if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
@@ -1591,8 +1598,6 @@ void Genesis::InstallExperimentalNativeFunctions() {
observers_begin_perform_splice);
INSTALL_NATIVE(JSFunction, "EndPerformSplice",
observers_end_perform_splice);
- INSTALL_NATIVE(JSFunction, "DeliverChangeRecords",
- observers_deliver_changes);
}
}
@@ -1678,6 +1683,8 @@ bool Genesis::InstallNatives() {
builtins->set_native_context(*native_context());
builtins->set_global_context(*native_context());
builtins->set_global_receiver(*builtins);
+ builtins->set_global_receiver(native_context()->global_proxy());
+
// Set up the 'global' properties of the builtins object. The
// 'global' property that refers to the global object is the only
@@ -1691,6 +1698,11 @@ bool Genesis::InstallNatives() {
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
builtins, global_string, global_obj, attributes));
+ Handle<String> builtins_string =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins"));
+ CHECK_NOT_EMPTY_HANDLE(isolate(),
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ builtins, builtins_string, builtins, attributes));
// Set up the reference from the global object to the builtins object.
JSGlobalObject::cast(native_context()->global_object())->
@@ -2023,55 +2035,28 @@ bool Genesis::InstallNatives() {
}
+#define INSTALL_EXPERIMENTAL_NATIVE(i, flag, file) \
+ if (FLAG_harmony_##flag && \
+ strcmp(ExperimentalNatives::GetScriptName(i).start(), \
+ "native " file) == 0) { \
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false; \
+ }
+
+
bool Genesis::InstallExperimentalNatives() {
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount();
i++) {
- if (FLAG_harmony_symbols &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native symbol.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_proxies &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native proxy.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_collections &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native collection.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_observation &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native object-observe.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_generators &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native generator.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_iteration &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native array-iterator.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_strings &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native harmony-string.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_arrays &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native harmony-array.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_maths &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native harmony-math.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
+ INSTALL_EXPERIMENTAL_NATIVE(i, symbols, "symbol.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, proxies, "proxy.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, collections, "collection.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, observation, "object-observe.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, promises, "promise.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, generators, "generator.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, iteration, "array-iterator.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, strings, "harmony-string.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, arrays, "harmony-array.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, maths, "harmony-math.js")
}
InstallExperimentalNativeFunctions();
@@ -2181,13 +2166,12 @@ bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
BootstrapperActive active(this);
SaveContext saved_context(isolate_);
isolate_->set_context(*native_context);
- if (!Genesis::InstallExtensions(native_context, extensions)) return false;
- Genesis::InstallSpecialObjects(native_context);
- return true;
+ return Genesis::InstallExtensions(native_context, extensions) &&
+ Genesis::InstallSpecialObjects(native_context);
}
-void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
+bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Isolate* isolate = native_context->GetIsolate();
Factory* factory = isolate->factory();
HandleScope scope(isolate);
@@ -2197,11 +2181,9 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives =
factory->InternalizeUtf8String(FLAG_expose_natives_as);
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- global, natives,
- Handle<JSObject>(global->builtins()),
- DONT_ENUM));
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ global, natives, Handle<JSObject>(global->builtins()), DONT_ENUM);
+ if (isolate->has_pending_exception()) return false;
}
Handle<Object> Error = GetProperty(global, "Error");
@@ -2210,10 +2192,9 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
STATIC_ASCII_VECTOR("stackTraceLimit"));
Handle<Smi> stack_trace_limit(
Smi::FromInt(FLAG_stack_trace_limit), isolate);
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- Handle<JSObject>::cast(Error), name,
- stack_trace_limit, NONE));
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ Handle<JSObject>::cast(Error), name, stack_trace_limit, NONE);
+ if (isolate->has_pending_exception()) return false;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -2222,7 +2203,7 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Debug* debug = isolate->debug();
// If loading fails we just bail out without installing the
// debugger but without tanking the whole context.
- if (!debug->Load()) return;
+ if (!debug->Load()) return true;
// Set the security token for the debugger context to the same as
// the shell native context to allow calling between these (otherwise
// exposing debug global object doesn't make much sense).
@@ -2233,11 +2214,12 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
factory->InternalizeUtf8String(FLAG_expose_debug_as);
Handle<Object> global_proxy(
debug->debug_context()->global_proxy(), isolate);
- CHECK_NOT_EMPTY_HANDLE(isolate,
- JSObject::SetLocalPropertyIgnoreAttributes(
- global, debug_string, global_proxy, DONT_ENUM));
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ global, debug_string, global_proxy, DONT_ENUM);
+ if (isolate->has_pending_exception()) return false;
}
#endif
+ return true;
}
@@ -2269,35 +2251,46 @@ void Genesis::ExtensionStates::set_state(RegisteredExtension* extension,
reinterpret_cast<void*>(static_cast<intptr_t>(state));
}
+
bool Genesis::InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
Isolate* isolate = native_context->GetIsolate();
ExtensionStates extension_states; // All extensions have state UNVISITED.
- // Install auto extensions.
- v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
- while (current != NULL) {
- if (current->extension()->auto_enable())
- InstallExtension(isolate, current, &extension_states);
- current = current->next();
+ return InstallAutoExtensions(isolate, &extension_states) &&
+ (!FLAG_expose_free_buffer ||
+ InstallExtension(isolate, "v8/free-buffer", &extension_states)) &&
+ (!FLAG_expose_gc ||
+ InstallExtension(isolate, "v8/gc", &extension_states)) &&
+ (!FLAG_expose_externalize_string ||
+ InstallExtension(isolate, "v8/externalize", &extension_states)) &&
+ (!FLAG_track_gc_object_stats ||
+ InstallExtension(isolate, "v8/statistics", &extension_states)) &&
+ (!FLAG_expose_trigger_failure ||
+ InstallExtension(isolate, "v8/trigger-failure", &extension_states)) &&
+ InstallRequestedExtensions(isolate, extensions, &extension_states);
+}
+
+
+bool Genesis::InstallAutoExtensions(Isolate* isolate,
+ ExtensionStates* extension_states) {
+ for (v8::RegisteredExtension* it = v8::RegisteredExtension::first_extension();
+ it != NULL;
+ it = it->next()) {
+ if (it->extension()->auto_enable() &&
+ !InstallExtension(isolate, it, extension_states)) {
+ return false;
+ }
}
+ return true;
+}
- if (FLAG_expose_gc) InstallExtension(isolate, "v8/gc", &extension_states);
- if (FLAG_expose_externalize_string) {
- InstallExtension(isolate, "v8/externalize", &extension_states);
- }
- if (FLAG_track_gc_object_stats) {
- InstallExtension(isolate, "v8/statistics", &extension_states);
- }
- if (extensions == NULL) return true;
- // Install required extensions
- int count = v8::ImplementationUtilities::GetNameCount(extensions);
- const char** names = v8::ImplementationUtilities::GetNames(extensions);
- for (int i = 0; i < count; i++) {
- if (!InstallExtension(isolate, names[i], &extension_states))
- return false;
+bool Genesis::InstallRequestedExtensions(Isolate* isolate,
+ v8::ExtensionConfiguration* extensions,
+ ExtensionStates* extension_states) {
+ for (const char** it = extensions->begin(); it != extensions->end(); ++it) {
+ if (!InstallExtension(isolate, *it, extension_states)) return false;
}
-
return true;
}
@@ -2307,19 +2300,16 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
bool Genesis::InstallExtension(Isolate* isolate,
const char* name,
ExtensionStates* extension_states) {
- v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
- // Loop until we find the relevant extension
- while (current != NULL) {
- if (strcmp(name, current->extension()->name()) == 0) break;
- current = current->next();
- }
- // Didn't find the extension; fail.
- if (current == NULL) {
- v8::Utils::ReportApiFailure(
- "v8::Context::New()", "Cannot find required extension");
- return false;
+ for (v8::RegisteredExtension* it = v8::RegisteredExtension::first_extension();
+ it != NULL;
+ it = it->next()) {
+ if (strcmp(name, it->extension()->name()) == 0) {
+ return InstallExtension(isolate, it, extension_states);
+ }
}
- return InstallExtension(isolate, current, extension_states);
+ return Utils::ApiCheck(false,
+ "v8::Context::New()",
+ "Cannot find required extension");
}
@@ -2331,9 +2321,9 @@ bool Genesis::InstallExtension(Isolate* isolate,
if (extension_states->get_state(current) == INSTALLED) return true;
// The current node has already been visited so there must be a
// cycle in the dependency graph; fail.
- if (extension_states->get_state(current) == VISITED) {
- v8::Utils::ReportApiFailure(
- "v8::Context::New()", "Circular extension dependency");
+ if (!Utils::ApiCheck(extension_states->get_state(current) != VISITED,
+ "v8::Context::New()",
+ "Circular extension dependency")) {
return false;
}
ASSERT(extension_states->get_state(current) == UNVISITED);
@@ -2382,7 +2372,7 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
Handle<JSFunction> function
= Handle<JSFunction>(JSFunction::cast(function_object));
builtins->set_javascript_builtin(id, *function);
- if (!JSFunction::CompileLazy(function, CLEAR_EXCEPTION)) {
+ if (!Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
return false;
}
builtins->set_javascript_builtin_code(id, function->shared()->code());
@@ -2426,8 +2416,8 @@ bool Genesis::ConfigureGlobalObjects(
bool Genesis::ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template) {
ASSERT(!object_template.is_null());
- ASSERT(object->IsInstanceOf(
- FunctionTemplateInfo::cast(object_template->constructor())));
+ ASSERT(FunctionTemplateInfo::cast(object_template->constructor())
+ ->IsTemplateFor(object->map()));;
bool pending_exception = false;
Handle<JSObject> obj =
@@ -2569,12 +2559,33 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
}
+class NoTrackDoubleFieldsForSerializerScope {
+ public:
+ NoTrackDoubleFieldsForSerializerScope() : flag_(FLAG_track_double_fields) {
+ if (Serializer::enabled()) {
+ // Disable tracking double fields because heap numbers treated as
+ // immutable by the serializer.
+ FLAG_track_double_fields = false;
+ }
+ }
+ ~NoTrackDoubleFieldsForSerializerScope() {
+ if (Serializer::enabled()) {
+ FLAG_track_double_fields = flag_;
+ }
+ }
+
+ private:
+ bool flag_;
+};
+
+
Genesis::Genesis(Isolate* isolate,
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions)
: isolate_(isolate),
active_(isolate->bootstrapper()) {
+ NoTrackDoubleFieldsForSerializerScope disable_double_tracking_for_serializer;
result_ = Handle<Context>::null();
// If V8 cannot be initialized, just return.
if (!V8::Initialize(NULL)) return;
@@ -2609,6 +2620,8 @@ Genesis::Genesis(Isolate* isolate,
HookUpGlobalProxy(inner_global, global_proxy);
HookUpInnerGlobal(inner_global);
+ native_context()->builtins()->set_global_receiver(
+ native_context()->global_proxy());
if (!ConfigureGlobalObjects(global_template)) return;
} else {
@@ -2635,13 +2648,67 @@ Genesis::Genesis(Isolate* isolate,
InitializeExperimentalGlobal();
if (!InstallExperimentalNatives()) return;
- // Initially seed the per-context random number generator
- // using the per-isolate random number generator.
- uint32_t* state = reinterpret_cast<uint32_t*>(
- native_context()->random_seed()->GetDataStartAddress());
- do {
- isolate->random_number_generator()->NextBytes(state, kRandomStateSize);
- } while (state[0] == 0 || state[1] == 0);
+ // We can't (de-)serialize typed arrays currently, but we are lucky: The state
+ // of the random number generator needs no initialization during snapshot
+ // creation time and we don't need trigonometric functions then.
+ if (!Serializer::enabled()) {
+ // Initially seed the per-context random number generator using the
+ // per-isolate random number generator.
+ const int num_elems = 2;
+ const int num_bytes = num_elems * sizeof(uint32_t);
+ uint32_t* state = reinterpret_cast<uint32_t*>(malloc(num_bytes));
+
+ do {
+ isolate->random_number_generator()->NextBytes(state, num_bytes);
+ } while (state[0] == 0 || state[1] == 0);
+
+ v8::Local<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(
+ reinterpret_cast<v8::Isolate*>(isolate), state, num_bytes);
+ Utils::OpenHandle(*buffer)->set_should_be_freed(true);
+ v8::Local<v8::Uint32Array> ta = v8::Uint32Array::New(buffer, 0, num_elems);
+ Handle<JSBuiltinsObject> builtins(native_context()->builtins());
+ ForceSetProperty(builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("rngstate")),
+ Utils::OpenHandle(*ta),
+ NONE);
+
+ // Initialize trigonometric lookup tables and constants.
+ const int table_num_bytes = TrigonometricLookupTable::table_num_bytes();
+ v8::Local<v8::ArrayBuffer> sin_buffer = v8::ArrayBuffer::New(
+ reinterpret_cast<v8::Isolate*>(isolate),
+ TrigonometricLookupTable::sin_table(), table_num_bytes);
+ v8::Local<v8::ArrayBuffer> cos_buffer = v8::ArrayBuffer::New(
+ reinterpret_cast<v8::Isolate*>(isolate),
+ TrigonometricLookupTable::cos_x_interval_table(), table_num_bytes);
+ v8::Local<v8::Float64Array> sin_table = v8::Float64Array::New(
+ sin_buffer, 0, TrigonometricLookupTable::table_size());
+ v8::Local<v8::Float64Array> cos_table = v8::Float64Array::New(
+ cos_buffer, 0, TrigonometricLookupTable::table_size());
+
+ ForceSetProperty(builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("kSinTable")),
+ Utils::OpenHandle(*sin_table),
+ NONE);
+ ForceSetProperty(builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("kCosXIntervalTable")),
+ Utils::OpenHandle(*cos_table),
+ NONE);
+ ForceSetProperty(builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("kSamples")),
+ factory()->NewHeapNumber(
+ TrigonometricLookupTable::samples()),
+ NONE);
+ ForceSetProperty(builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("kIndexConvert")),
+ factory()->NewHeapNumber(
+ TrigonometricLookupTable::samples_over_pi_half()),
+ NONE);
+ }
result_ = native_context();
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index bac9f40372..14dd1bd997 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -90,6 +90,7 @@ class SourceCodeCache BASE_EMBEDDED {
class Bootstrapper {
public:
static void InitializeOncePerProcess();
+ static void TearDownExtensions();
// Requires: Heap::SetUp has been called.
void Initialize(bool create_heap_objects);
@@ -105,9 +106,6 @@ class Bootstrapper {
// Detach the environment from its outer global object.
void DetachGlobal(Handle<Context> env);
- // Reattach an outer global object to an environment.
- void ReattachGlobal(Handle<Context> env, Handle<JSGlobalProxy> global_proxy);
-
// Traverses the pointers for memory management.
void Iterate(ObjectVisitor* v);
@@ -149,6 +147,12 @@ class Bootstrapper {
explicit Bootstrapper(Isolate* isolate);
+ static v8::Extension* free_buffer_extension_;
+ static v8::Extension* gc_extension_;
+ static v8::Extension* externalize_string_extension_;
+ static v8::Extension* statistics_extension_;
+ static v8::Extension* trigger_failure_extension_;
+
DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
};
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index b614904c9f..b9ff9e1344 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -153,8 +153,8 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
#endif
-static inline bool CalledAsConstructor(Isolate* isolate) {
#ifdef DEBUG
+static inline bool CalledAsConstructor(Isolate* isolate) {
// Calculate the result using a full stack frame iterator and check
// that the state of the stack is as we assume it to be in the
// code below.
@@ -163,7 +163,6 @@ static inline bool CalledAsConstructor(Isolate* isolate) {
it.Advance();
StackFrame* frame = it.frame();
bool reference_result = frame->is_construct();
-#endif
Address fp = Isolate::c_entry_fp(isolate->thread_local_top());
// Because we know fp points to an exit frame we can use the relevant
// part of ExitFrame::ComputeCallerState directly.
@@ -180,6 +179,7 @@ static inline bool CalledAsConstructor(Isolate* isolate) {
ASSERT_EQ(result, reference_result);
return result;
}
+#endif
// ----------------------------------------------------------------------------
@@ -276,15 +276,10 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
FixedArrayBase* new_elms = FixedArrayBase::cast(HeapObject::FromAddress(
elms->address() + size_delta));
HeapProfiler* profiler = heap->isolate()->heap_profiler();
- if (profiler->is_profiling()) {
+ if (profiler->is_tracking_object_moves()) {
profiler->ObjectMoveEvent(elms->address(),
new_elms->address(),
new_elms->Size());
- if (profiler->is_tracking_allocations()) {
- // Report filler object as a new allocation.
- // Otherwise it will become an untracked object.
- profiler->NewObjectEvent(elms->address(), elms->Size());
- }
}
return new_elms;
}
@@ -311,6 +306,8 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
if (!receiver->IsJSArray()) return NULL;
JSArray* array = JSArray::cast(receiver);
+ if (array->map()->is_observed()) return NULL;
+ if (!array->map()->is_extensible()) return NULL;
HeapObject* elms = array->elements();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
@@ -1103,7 +1100,7 @@ BUILTIN(StrictModePoisonPill) {
static inline Object* FindHidden(Heap* heap,
Object* object,
FunctionTemplateInfo* type) {
- if (object->IsInstanceOf(type)) return object;
+ if (type->IsTemplateFor(object)) return object;
Object* proto = object->GetPrototype(heap->isolate());
if (proto->IsJSObject() &&
JSObject::cast(proto)->map()->is_hidden_prototype()) {
@@ -1176,6 +1173,15 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
fun_data = *desc;
}
+ SharedFunctionInfo* shared = function->shared();
+ if (shared->is_classic_mode() && !shared->native()) {
+ Object* recv = args[0];
+ ASSERT(!recv->IsNull());
+ if (recv->IsUndefined()) {
+ args[0] = function->context()->global_object()->global_receiver();
+ }
+ }
+
Object* raw_holder = TypeCheck(heap, args.length(), &args[0], fun_data);
if (raw_holder->IsNull()) {
@@ -1303,26 +1309,11 @@ BUILTIN(HandleApiCallAsConstructor) {
}
-static void Generate_LoadIC_Initialize(MacroAssembler* masm) {
- LoadIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_LoadIC_PreMonomorphic(MacroAssembler* masm) {
- LoadIC::GeneratePreMonomorphic(masm);
-}
-
-
static void Generate_LoadIC_Miss(MacroAssembler* masm) {
LoadIC::GenerateMiss(masm);
}
-static void Generate_LoadIC_Megamorphic(MacroAssembler* masm) {
- LoadIC::GenerateMegamorphic(masm);
-}
-
-
static void Generate_LoadIC_Normal(MacroAssembler* masm) {
LoadIC::GenerateNormal(masm);
}
@@ -1330,7 +1321,8 @@ static void Generate_LoadIC_Normal(MacroAssembler* masm) {
static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
LoadStubCompiler::GenerateLoadViaGetter(
- masm, LoadStubCompiler::registers()[0], Handle<JSFunction>());
+ masm, Handle<HeapType>::null(),
+ LoadStubCompiler::registers()[0], Handle<JSFunction>());
}
@@ -1350,12 +1342,7 @@ static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm, MISS);
-}
-
-
-static void Generate_KeyedLoadIC_MissForceGeneric(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm, MISS_FORCE_GENERIC);
+ KeyedLoadIC::GenerateMiss(masm);
}
@@ -1389,31 +1376,6 @@ static void Generate_StoreIC_Slow(MacroAssembler* masm) {
}
-static void Generate_StoreIC_Slow_Strict(MacroAssembler* masm) {
- StoreIC::GenerateSlow(masm);
-}
-
-
-static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
- StoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) {
- StoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_StoreIC_PreMonomorphic(MacroAssembler* masm) {
- StoreIC::GeneratePreMonomorphic(masm);
-}
-
-
-static void Generate_StoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
- StoreIC::GeneratePreMonomorphic(masm);
-}
-
-
static void Generate_StoreIC_Miss(MacroAssembler* masm) {
StoreIC::GenerateMiss(masm);
}
@@ -1424,43 +1386,9 @@ static void Generate_StoreIC_Normal(MacroAssembler* masm) {
}
-static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) {
- StoreIC::GenerateNormal(masm);
-}
-
-
-static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
- StoreIC::GenerateMegamorphic(masm, kNonStrictMode);
-}
-
-
-static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
- StoreIC::GenerateMegamorphic(masm, kStrictMode);
-}
-
-
-static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
- StoreIC::GenerateRuntimeSetProperty(masm, kNonStrictMode);
-}
-
-
-static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
- StoreIC::GenerateRuntimeSetProperty(masm, kStrictMode);
-}
-
-
static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
- StoreStubCompiler::GenerateStoreViaSetter(masm, Handle<JSFunction>());
-}
-
-
-static void Generate_StoreIC_Generic(MacroAssembler* masm) {
- StoreIC::GenerateRuntimeSetProperty(masm, kNonStrictMode);
-}
-
-
-static void Generate_StoreIC_Generic_Strict(MacroAssembler* masm) {
- StoreIC::GenerateRuntimeSetProperty(masm, kStrictMode);
+ StoreStubCompiler::GenerateStoreViaSetter(
+ masm, Handle<HeapType>::null(), Handle<JSFunction>());
}
@@ -1475,12 +1403,7 @@ static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm, MISS);
-}
-
-
-static void Generate_KeyedStoreIC_MissForceGeneric(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm, MISS_FORCE_GENERIC);
+ KeyedStoreIC::GenerateMiss(masm);
}
@@ -1489,11 +1412,6 @@ static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
}
-static void Generate_KeyedStoreIC_Slow_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateSlow(masm);
-}
-
-
static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
KeyedStoreIC::GenerateInitialize(masm);
}
@@ -1676,13 +1594,12 @@ void Builtins::InitBuiltinFunctionTable() {
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
-#define DEF_FUNCTION_PTR_H(aname, kind, extra) \
+#define DEF_FUNCTION_PTR_H(aname, kind) \
functions->generator = FUNCTION_ADDR(Generate_##aname); \
functions->c_code = NULL; \
functions->s_name = #aname; \
functions->name = k##aname; \
- functions->flags = Code::ComputeFlags( \
- Code::HANDLER, MONOMORPHIC, extra, Code::NORMAL, Code::kind); \
+ functions->flags = Code::ComputeHandlerFlags(Code::kind); \
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
@@ -1708,7 +1625,9 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code.
- union { int force_alignment; byte buffer[8*KB]; } u;
+ // TODO(jbramley): I had to increase the size of this buffer from 8KB because
+ // we can generate a lot of debug code on A64.
+ union { int force_alignment; byte buffer[16*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
@@ -1750,9 +1669,10 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
builtins_[i] = code;
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
- PrintF("Builtin: %s\n", functions[i].s_name);
- Code::cast(code)->Disassemble(functions[i].s_name);
- PrintF("\n");
+ CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
+ PrintF(trace_scope.file(), "Builtin: %s\n", functions[i].s_name);
+ Code::cast(code)->Disassemble(functions[i].s_name, trace_scope.file());
+ PrintF(trace_scope.file(), "\n");
}
#endif
} else {
@@ -1813,7 +1733,7 @@ Handle<Code> Builtins::name() { \
reinterpret_cast<Code**>(builtin_address(k##name)); \
return Handle<Code>(code_address); \
}
-#define DEFINE_BUILTIN_ACCESSOR_H(name, kind, extra) \
+#define DEFINE_BUILTIN_ACCESSOR_H(name, kind) \
Handle<Code> Builtins::name() { \
Code** code_address = \
reinterpret_cast<Code**>(builtin_address(k##name)); \
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 43e9164596..d977a4817c 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -50,15 +50,17 @@ enum BuiltinExtraArguments {
#define CODE_AGE_LIST(V) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
-#define CODE_AGE_LIST_WITH_NO_AGE(V) \
+#define CODE_AGE_LIST_COMPLETE(V) \
+ V(NotExecuted) \
+ V(ExecutedOnce) \
V(NoAge) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
#define DECLARE_CODE_AGE_BUILTIN(C, V) \
V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
- UNINITIALIZED, Code::kNoExtraICState) \
+ UNINITIALIZED, kNoExtraICState) \
V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \
- UNINITIALIZED, Code::kNoExtraICState)
+ UNINITIALIZED, kNoExtraICState)
// Define list of builtins implemented in C++.
@@ -85,149 +87,115 @@ enum BuiltinExtraArguments {
// Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
+ V(InOptimizationQueue, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LazyCompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LazyRecompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(ConcurrentRecompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
+ V(CompileUnoptimized, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(CompileOptimized, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- \
+ kNoExtraICState) \
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
- V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(StoreIC_PreMonomorphic, STORE_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Generic, STORE_IC, GENERIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Generic_Strict, STORE_IC, GENERIC, \
- kStrictMode) \
- V(StoreIC_GlobalProxy, STORE_IC, GENERIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
- kStrictMode) \
- V(StoreIC_PreMonomorphic_Strict, STORE_IC, PREMONOMORPHIC, \
- kStrictMode) \
- V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
- kStrictMode) \
- V(StoreIC_GlobalProxy_Strict, STORE_IC, GENERIC, \
- kStrictMode) \
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
- kStrictMode) \
+ StoreIC::kStrictModeState) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
- kStrictMode) \
+ StoreIC::kStrictModeState) \
V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
- kStrictMode) \
+ StoreIC::kStrictModeState) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
- kStrictMode) \
+ StoreIC::kStrictModeState) \
V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(FunctionApply, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
V(InternalArrayCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(ArrayCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
V(StringConstructCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(InterruptCheck, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(StackCheck, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
// Define list of builtin handlers implemented in assembly.
-#define BUILTIN_LIST_H(V) \
- V(LoadIC_Slow, LOAD_IC, Code::kNoExtraICState) \
- V(KeyedLoadIC_Slow, KEYED_LOAD_IC, Code::kNoExtraICState) \
- V(StoreIC_Slow, STORE_IC, Code::kNoExtraICState) \
- V(StoreIC_Slow_Strict, STORE_IC, kStrictMode) \
- V(KeyedStoreIC_Slow, KEYED_STORE_IC, Code::kNoExtraICState)\
- V(KeyedStoreIC_Slow_Strict, KEYED_STORE_IC, kStrictMode) \
- V(LoadIC_Normal, LOAD_IC, Code::kNoExtraICState) \
- V(StoreIC_Normal, STORE_IC, Code::kNoExtraICState) \
- V(StoreIC_Normal_Strict, STORE_IC, kStrictMode)
+#define BUILTIN_LIST_H(V) \
+ V(LoadIC_Slow, LOAD_IC) \
+ V(KeyedLoadIC_Slow, KEYED_LOAD_IC) \
+ V(StoreIC_Slow, STORE_IC) \
+ V(KeyedStoreIC_Slow, KEYED_STORE_IC) \
+ V(LoadIC_Normal, LOAD_IC) \
+ V(StoreIC_Normal, STORE_IC)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
@@ -316,7 +284,7 @@ class Builtins {
enum Name {
#define DEF_ENUM_C(name, ignore) k##name,
#define DEF_ENUM_A(name, kind, state, extra) k##name,
-#define DEF_ENUM_H(name, kind, extra) k##name,
+#define DEF_ENUM_H(name, kind) k##name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
BUILTIN_LIST_H(DEF_ENUM_H)
@@ -343,7 +311,7 @@ class Builtins {
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> name();
-#define DECLARE_BUILTIN_ACCESSOR_H(name, kind, extra) Handle<Code> name();
+#define DECLARE_BUILTIN_ACCESSOR_H(name, kind) Handle<Code> name();
BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
@@ -366,6 +334,11 @@ class Builtins {
}
static const char* GetName(JavaScript id) { return javascript_names_[id]; }
+ const char* name(int index) {
+ ASSERT(index >= 0);
+ ASSERT(index < builtin_count);
+ return names_[index];
+ }
static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
Handle<Code> GetCode(JavaScript id, bool* resolved);
static int NumberOfJavaScriptBuiltins() { return id_count; }
@@ -389,15 +362,15 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args);
- static void Generate_InRecompileQueue(MacroAssembler* masm);
- static void Generate_ConcurrentRecompile(MacroAssembler* masm);
+ static void Generate_CompileUnoptimized(MacroAssembler* masm);
+ static void Generate_InOptimizationQueue(MacroAssembler* masm);
+ static void Generate_CompileOptimized(MacroAssembler* masm);
+ static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm);
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
- static void Generate_LazyCompile(MacroAssembler* masm);
- static void Generate_LazyRecompile(MacroAssembler* masm);
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc
index fbfaf26159..faa26cdba2 100644
--- a/deps/v8/src/cached-powers.cc
+++ b/deps/v8/src/cached-powers.cc
@@ -133,7 +133,10 @@ static const CachedPower kCachedPowers[] = {
{V8_2PART_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
};
+#ifdef DEBUG
static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
+#endif
+
static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
// Difference between the decimal exponents in the table above.
@@ -149,7 +152,7 @@ void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
int kQ = DiyFp::kSignificandSize;
// Some platforms return incorrect sign on 0 result. We can ignore that here,
// which means we can avoid depending on platform.h.
- double k = ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
+ double k = std::ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
int foo = kCachedPowersOffset;
int index =
(foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index 767ad6513a..f52feda6c1 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -66,6 +66,27 @@ struct IdentifierPart {
}
};
+
+// WhiteSpace according to ECMA-262 5.1, 7.2.
+struct WhiteSpace {
+ static inline bool Is(uc32 c) {
+ return c == 0x0009 || // <TAB>
+ c == 0x000B || // <VT>
+ c == 0x000C || // <FF>
+ c == 0xFEFF || // <BOM>
+ // \u0020 and \u00A0 are included in unibrow::WhiteSpace.
+ unibrow::WhiteSpace::Is(c);
+ }
+};
+
+
+// WhiteSpace and LineTerminator according to ECMA-262 5.1, 7.2 and 7.3.
+struct WhiteSpaceOrLineTerminator {
+ static inline bool Is(uc32 c) {
+ return WhiteSpace::Is(c) || unibrow::LineTerminator::Is(c);
+ }
+};
+
} } // namespace v8::internal
#endif // V8_CHAR_PREDICATES_H_
diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc
index e08cd7c685..62e04ff205 100644
--- a/deps/v8/src/checks.cc
+++ b/deps/v8/src/checks.cc
@@ -30,6 +30,8 @@
#if V8_LIBC_GLIBC || V8_OS_BSD
# include <cxxabi.h>
# include <execinfo.h>
+#elif V8_OS_QNX
+# include <backtrace.h>
#endif // V8_LIBC_GLIBC || V8_OS_BSD
#include <stdio.h>
@@ -64,6 +66,26 @@ static V8_INLINE void DumpBacktrace() {
}
}
free(symbols);
+#elif V8_OS_QNX
+ char out[1024];
+ bt_accessor_t acc;
+ bt_memmap_t memmap;
+ bt_init_accessor(&acc, BT_SELF);
+ bt_load_memmap(&acc, &memmap);
+ bt_sprn_memmap(&memmap, out, sizeof(out));
+ i::OS::PrintError(out);
+ bt_addr_t trace[100];
+ int size = bt_get_backtrace(&acc, trace, ARRAY_SIZE(trace));
+ i::OS::PrintError("\n==== C stack trace ===============================\n\n");
+ if (size == 0) {
+ i::OS::PrintError("(empty)\n");
+ } else {
+ bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"),
+ out, sizeof(out), NULL);
+ i::OS::PrintError(out);
+ }
+ bt_unload_memmap(&memmap);
+ bt_release_accessor(&acc);
#endif // V8_LIBC_GLIBC || V8_OS_BSD
}
@@ -116,20 +138,8 @@ void CheckNonEqualsHelper(const char* file,
}
-void API_Fatal(const char* location, const char* format, ...) {
- i::OS::PrintError("\n#\n# Fatal error in %s\n# ", location);
- va_list arguments;
- va_start(arguments, format);
- i::OS::VPrintError(format, arguments);
- va_end(arguments);
- i::OS::PrintError("\n#\n\n");
- i::OS::Abort();
-}
-
-
namespace v8 { namespace internal {
intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
} } // namespace v8::internal
-
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index 9d2db28d8f..57f1852618 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -34,6 +34,7 @@
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
// development, but they should not be relied on in the final product.
#ifdef DEBUG
@@ -51,6 +52,23 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
#define UNREACHABLE() ((void) 0)
#endif
+// Simulator specific helpers.
+#if defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_A64)
+ // TODO(all): If possible automatically prepend an indicator like
+ // UNIMPLEMENTED or LOCATION.
+ #define ASM_UNIMPLEMENTED(message) \
+ __ Debug(message, __LINE__, NO_PARAM)
+ #define ASM_UNIMPLEMENTED_BREAK(message) \
+ __ Debug(message, __LINE__, \
+ FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
+ #define ASM_LOCATION(message) \
+ __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
+#else
+ #define ASM_UNIMPLEMENTED(message)
+ #define ASM_UNIMPLEMENTED_BREAK(message)
+ #define ASM_LOCATION(message)
+#endif
+
// The CHECK macro checks that the given condition is true; if not, it
// prints a message to stderr and aborts.
@@ -268,7 +286,7 @@ template <int> class StaticAssertionHelper { };
#define STATIC_CHECK(test) \
typedef \
StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
- SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
+ SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) V8_UNUSED
#endif
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 3232a74cc3..b7247eb6bf 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -81,23 +81,10 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HContext* context() { return context_; }
Isolate* isolate() { return info_.isolate(); }
- class ArrayContextChecker {
- public:
- ArrayContextChecker(HGraphBuilder* builder, HValue* constructor,
- HValue* array_function)
- : checker_(builder) {
- checker_.If<HCompareObjectEqAndBranch, HValue*>(constructor,
- array_function);
- checker_.Then();
- }
-
- ~ArrayContextChecker() {
- checker_.ElseDeopt("Array constructor called from different context");
- checker_.End();
- }
- private:
- IfBuilder checker_;
- };
+ HLoadNamedField* BuildLoadNamedField(HValue* object,
+ Representation representation,
+ int offset,
+ bool is_inobject);
enum ArgumentClass {
NONE,
@@ -106,7 +93,6 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
};
HValue* BuildArrayConstructor(ElementsKind kind,
- ContextCheckMode context_mode,
AllocationSiteOverrideMode override_mode,
ArgumentClass argument_class);
HValue* BuildInternalArrayConstructor(ElementsKind kind,
@@ -150,26 +136,24 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
next_block->SetJoinId(BailoutId::StubEntry());
set_current_block(next_block);
+ bool runtime_stack_params = descriptor_->stack_parameter_count_.is_valid();
+ HInstruction* stack_parameter_count = NULL;
for (int i = 0; i < param_count; ++i) {
- HParameter* param =
- Add<HParameter>(i, HParameter::REGISTER_PARAMETER);
+ Representation r = descriptor_->IsParameterCountRegister(i)
+ ? Representation::Integer32()
+ : Representation::Tagged();
+ HParameter* param = Add<HParameter>(i, HParameter::REGISTER_PARAMETER, r);
start_environment->Bind(i, param);
parameters_[i] = param;
+ if (descriptor_->IsParameterCountRegister(i)) {
+ param->set_type(HType::Smi());
+ stack_parameter_count = param;
+ arguments_length_ = stack_parameter_count;
+ }
}
- HInstruction* stack_parameter_count;
- if (descriptor_->stack_parameter_count_.is_valid()) {
- ASSERT(descriptor_->environment_length() == (param_count + 1));
- stack_parameter_count = New<HParameter>(param_count,
- HParameter::REGISTER_PARAMETER,
- Representation::Integer32());
- stack_parameter_count->set_type(HType::Smi());
- // It's essential to bind this value to the environment in case of deopt.
- AddInstruction(stack_parameter_count);
- start_environment->Bind(param_count, stack_parameter_count);
- arguments_length_ = stack_parameter_count;
- } else {
- ASSERT(descriptor_->environment_length() == param_count);
+ ASSERT(!runtime_stack_params || arguments_length_ != NULL);
+ if (!runtime_stack_params) {
stack_parameter_count = graph()->GetConstantMinus1();
arguments_length_ = graph()->GetConstant0();
}
@@ -189,10 +173,11 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) {
if (!stack_parameter_count->IsConstant() &&
descriptor_->hint_stack_parameter_count_ < 0) {
- HInstruction* amount = graph()->GetConstant1();
- stack_pop_count = Add<HAdd>(stack_parameter_count, amount);
- stack_pop_count->ChangeRepresentation(Representation::Integer32());
+ HInstruction* constant_one = graph()->GetConstant1();
+ stack_pop_count = AddUncasted<HAdd>(stack_parameter_count, constant_one);
stack_pop_count->ClearFlag(HValue::kCanOverflow);
+ // TODO(mvstanton): verify that stack_parameter_count+1 really fits in a
+ // smi.
} else {
int count = descriptor_->hint_stack_parameter_count_;
stack_pop_count = Add<HConstant>(count);
@@ -211,7 +196,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
template <class Stub>
class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
public:
- explicit CodeStubGraphBuilder(Isolate* isolate, Stub* stub)
+ CodeStubGraphBuilder(Isolate* isolate, Stub* stub)
: CodeStubGraphBuilderBase(isolate, stub) {}
protected:
@@ -252,9 +237,6 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
// Update the static counter each time a new code stub is generated.
isolate->counters()->code_stubs()->Increment();
- // Nested stubs are not allowed for leaves.
- AllowStubCallsScope allow_scope(&masm, false);
-
// Generate the code for the stub.
masm.set_generating_stub(true);
NoCurrentFrameScope scope(&masm);
@@ -270,8 +252,7 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
GetCodeKind(),
GetICState(),
GetExtraICState(),
- GetStubType(),
- GetStubFlags());
+ GetStubType());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@@ -304,7 +285,8 @@ static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) {
Handle<Code> code = chunk->Codegen();
if (FLAG_profile_hydrogen_code_stub_compilation) {
double ms = timer.Elapsed().InMillisecondsF();
- PrintF("[Lazy compilation of %s took %0.3f ms]\n", *stub->GetName(), ms);
+ PrintF("[Lazy compilation of %s took %0.3f ms]\n",
+ stub->GetName().get(), ms);
}
return code;
}
@@ -317,7 +299,8 @@ HValue* CodeStubGraphBuilder<ToNumberStub>::BuildCodeStub() {
// Check if the parameter is already a SMI or heap number.
IfBuilder if_number(this);
if_number.If<HIsSmiAndBranch>(value);
- if_number.OrIf<HCompareMap>(value, isolate()->factory()->heap_number_map());
+ if_number.OrIf<HCompareMap>(value, isolate()->factory()->heap_number_map(),
+ top_info());
if_number.Then();
// Return the number.
@@ -345,7 +328,7 @@ template <>
HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
info()->MarkAsSavesCallerDoubles();
HValue* number = GetParameter(NumberToStringStub::kNumber);
- return BuildNumberToString(number, handle(Type::Number(), isolate()));
+ return BuildNumberToString(number, Type::Number(zone()));
}
@@ -373,13 +356,15 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kTransitionInfoOffset);
- HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
+ HInstruction* boilerplate = Add<HLoadNamedField>(
+ allocation_site, static_cast<HValue*>(NULL), access);
HValue* push_value;
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
- if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
+ if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map(),
+ top_info());
if_fixed_cow.Then();
push_value = BuildCloneShallowArray(boilerplate,
allocation_site,
@@ -390,7 +375,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
if_fixed_cow.Else();
IfBuilder if_fixed(this);
- if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
+ if_fixed.If<HCompareMap>(elements, factory->fixed_array_map(), top_info());
if_fixed.Then();
push_value = BuildCloneShallowArray(boilerplate,
allocation_site,
@@ -443,7 +428,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kTransitionInfoOffset);
- HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
+ HInstruction* boilerplate = Add<HLoadNamedField>(
+ allocation_site, static_cast<HValue*>(NULL), access);
int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
int object_size = size;
@@ -452,9 +438,11 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
}
HValue* boilerplate_map = Add<HLoadNamedField>(
- boilerplate, HObjectAccess::ForMap());
+ boilerplate, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap());
HValue* boilerplate_size = Add<HLoadNamedField>(
- boilerplate_map, HObjectAccess::ForMapInstanceSize());
+ boilerplate_map, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMapInstanceSize());
HValue* size_in_words = Add<HConstant>(object_size >> kPointerSizeLog2);
checker.If<HCompareNumericAndBranch>(boilerplate_size,
size_in_words, Token::EQ);
@@ -463,17 +451,19 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HValue* size_in_bytes = Add<HConstant>(size);
HInstruction* object = Add<HAllocate>(size_in_bytes, HType::JSObject(),
- isolate()->heap()->GetPretenureMode(), JS_OBJECT_TYPE);
+ NOT_TENURED, JS_OBJECT_TYPE);
for (int i = 0; i < object_size; i += kPointerSize) {
- HObjectAccess access = HObjectAccess::ForJSObjectOffset(i);
- Add<HStoreNamedField>(object, access,
- Add<HLoadNamedField>(boilerplate, access));
+ HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(i);
+ Add<HStoreNamedField>(
+ object, access, Add<HLoadNamedField>(
+ boilerplate, static_cast<HValue*>(NULL), access));
}
ASSERT(FLAG_allocation_site_pretenuring || (size == object_size));
if (FLAG_allocation_site_pretenuring) {
- BuildCreateAllocationMemento(object, object_size, allocation_site);
+ BuildCreateAllocationMemento(
+ object, Add<HConstant>(object_size), allocation_site);
}
environment()->Push(object);
@@ -512,6 +502,18 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
AllocationSite::kNestedSiteOffset),
graph()->GetConstant0());
+ // Pretenuring calculation field.
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kPretenureDataOffset),
+ graph()->GetConstant0());
+
+ // Pretenuring memento creation count field.
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kPretenureCreateCountOffset),
+ graph()->GetConstant0());
+
// Store an empty fixed array for the code dependency.
HConstant* empty_fixed_array =
Add<HConstant>(isolate()->factory()->empty_fixed_array());
@@ -524,8 +526,9 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
// Link the object to the allocation site list
HValue* site_list = Add<HConstant>(
ExternalReference::allocation_sites_list_address(isolate()));
- HValue* site = Add<HLoadNamedField>(site_list,
- HObjectAccess::ForAllocationSiteList());
+ HValue* site = Add<HLoadNamedField>(
+ site_list, static_cast<HValue*>(NULL),
+ HObjectAccess::ForAllocationSiteList());
store = Add<HStoreNamedField>(object,
HObjectAccess::ForAllocationSiteOffset(AllocationSite::kWeakNextOffset),
site);
@@ -533,15 +536,11 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
object);
- // We use a hammer (SkipWriteBarrier()) to indicate that we know the input
- // cell is really a Cell, and so no write barrier is needed.
- // TODO(mvstanton): Add a debug_code check to verify the input cell is really
- // a cell. (perhaps with a new instruction, HAssert).
- HInstruction* cell = GetParameter(0);
- HObjectAccess access = HObjectAccess::ForCellValue();
- store = Add<HStoreNamedField>(cell, access, object);
- store->SkipWriteBarrier();
- return cell;
+ HInstruction* feedback_vector = GetParameter(0);
+ HInstruction* slot = GetParameter(1);
+ Add<HStoreKeyed>(feedback_vector, slot, object, FAST_ELEMENTS,
+ INITIALIZING_STORE);
+ return feedback_vector;
}
@@ -555,7 +554,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- false, NEVER_RETURN_HOLE, STANDARD_STORE);
+ LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
return load;
}
@@ -565,13 +564,32 @@ Handle<Code> KeyedLoadFastElementStub::GenerateCode(Isolate* isolate) {
}
+HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
+ HValue* object,
+ Representation representation,
+ int offset,
+ bool is_inobject) {
+ HObjectAccess access = is_inobject
+ ? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
+ : HObjectAccess::ForBackingStoreOffset(offset, representation);
+ if (representation.IsDouble()) {
+ // Load the heap number.
+ object = Add<HLoadNamedField>(
+ object, static_cast<HValue*>(NULL),
+ access.WithRepresentation(Representation::Tagged()));
+ // Load the double value from it.
+ access = HObjectAccess::ForHeapNumberValue();
+ }
+ return Add<HLoadNamedField>(object, static_cast<HValue*>(NULL), access);
+}
+
+
template<>
HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
- Representation rep = casted_stub()->representation();
- HObjectAccess access = casted_stub()->is_inobject() ?
- HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
- HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
- return AddLoadNamedField(GetParameter(0), access);
+ return BuildLoadNamedField(GetParameter(0),
+ casted_stub()->representation(),
+ casted_stub()->offset(),
+ casted_stub()->is_inobject());
}
@@ -582,11 +600,10 @@ Handle<Code> LoadFieldStub::GenerateCode(Isolate* isolate) {
template<>
HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
- Representation rep = casted_stub()->representation();
- HObjectAccess access = casted_stub()->is_inobject() ?
- HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
- HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
- return AddLoadNamedField(GetParameter(0), access);
+ return BuildLoadNamedField(GetParameter(0),
+ casted_stub()->representation(),
+ casted_stub()->offset(),
+ casted_stub()->is_inobject());
}
@@ -600,7 +617,7 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), GetParameter(2),
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- true, NEVER_RETURN_HOLE, casted_stub()->store_mode());
+ STORE, NEVER_RETURN_HOLE, casted_stub()->store_mode());
return GetParameter(2);
}
@@ -631,19 +648,10 @@ Handle<Code> TransitionElementsKindStub::GenerateCode(Isolate* isolate) {
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
ElementsKind kind,
- ContextCheckMode context_mode,
AllocationSiteOverrideMode override_mode,
ArgumentClass argument_class) {
HValue* constructor = GetParameter(ArrayConstructorStubBase::kConstructor);
- if (context_mode == CONTEXT_CHECK_REQUIRED) {
- HInstruction* array_function = BuildGetArrayFunction();
- ArrayContextChecker checker(this, constructor, array_function);
- }
-
- HValue* property_cell = GetParameter(ArrayConstructorStubBase::kPropertyCell);
- // Walk through the property cell to the AllocationSite
- HValue* alloc_site = Add<HLoadNamedField>(property_cell,
- HObjectAccess::ForCellValue());
+ HValue* alloc_site = GetParameter(ArrayConstructorStubBase::kAllocationSite);
JSArrayBuilder array_builder(this, kind, alloc_site, constructor,
override_mode);
HValue* result = NULL;
@@ -695,27 +703,7 @@ HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
HInstruction* argument = Add<HAccessArgumentsAt>(
elements, constant_one, constant_zero);
- HConstant* max_alloc_length =
- Add<HConstant>(JSObject::kInitialMaxFastElementArray);
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- HConstant* initial_capacity_node = Add<HConstant>(initial_capacity);
-
- HInstruction* checked_arg = Add<HBoundsCheck>(argument, max_alloc_length);
- IfBuilder if_builder(this);
- if_builder.If<HCompareNumericAndBranch>(checked_arg, constant_zero,
- Token::EQ);
- if_builder.Then();
- Push(initial_capacity_node); // capacity
- Push(constant_zero); // length
- if_builder.Else();
- Push(checked_arg); // capacity
- Push(checked_arg); // length
- if_builder.End();
-
- // Figure out total size
- HValue* length = Pop();
- HValue* capacity = Pop();
- return array_builder->AllocateArray(capacity, length, true);
+ return BuildAllocateArrayFromLength(array_builder, argument);
}
@@ -735,10 +723,15 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
// the array because they aren't compatible with a smi array.
// If it's a double array, no problem, and if it's fast then no
// problem either because doubles are boxed.
- bool fill_with_hole = IsFastSmiElementsKind(kind);
+ //
+ // TODO(mvstanton): consider an instruction to memset fill the array
+ // with zero in this case instead.
+ JSArrayBuilder::FillMode fill_mode = IsFastSmiElementsKind(kind)
+ ? JSArrayBuilder::FILL_WITH_HOLE
+ : JSArrayBuilder::DONT_FILL_WITH_HOLE;
HValue* new_object = array_builder->AllocateArray(checked_length,
checked_length,
- fill_with_hole);
+ fill_mode);
HValue* elements = array_builder->GetElementsLocation();
ASSERT(elements != NULL);
@@ -761,9 +754,8 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
template <>
HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
- ContextCheckMode context_mode = casted_stub()->context_mode();
AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
- return BuildArrayConstructor(kind, context_mode, override_mode, NONE);
+ return BuildArrayConstructor(kind, override_mode, NONE);
}
@@ -776,9 +768,8 @@ template <>
HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
- ContextCheckMode context_mode = casted_stub()->context_mode();
AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
- return BuildArrayConstructor(kind, context_mode, override_mode, SINGLE);
+ return BuildArrayConstructor(kind, override_mode, SINGLE);
}
@@ -791,9 +782,8 @@ Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode(
template <>
HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
- ContextCheckMode context_mode = casted_stub()->context_mode();
AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
- return BuildArrayConstructor(kind, context_mode, override_mode, MULTIPLE);
+ return BuildArrayConstructor(kind, override_mode, MULTIPLE);
}
@@ -850,7 +840,7 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
CompareNilICStub* stub = casted_stub();
HIfContinuation continuation;
Handle<Map> sentinel_map(isolate->heap()->meta_map());
- Handle<Type> type = stub->GetType(isolate, sentinel_map);
+ Type* type = stub->GetType(zone(), sentinel_map);
BuildCompareNil(GetParameter(0), type, &continuation);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
@@ -871,20 +861,22 @@ Handle<Code> CompareNilICStub::GenerateCode(Isolate* isolate) {
template <>
-HValue* CodeStubGraphBuilder<BinaryOpStub>::BuildCodeInitializedStub() {
- BinaryOpStub* stub = casted_stub();
- HValue* left = GetParameter(0);
- HValue* right = GetParameter(1);
+HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
+ BinaryOpIC::State state = casted_stub()->state();
- Handle<Type> left_type = stub->GetLeftType(isolate());
- Handle<Type> right_type = stub->GetRightType(isolate());
- Handle<Type> result_type = stub->GetResultType(isolate());
+ HValue* left = GetParameter(BinaryOpICStub::kLeft);
+ HValue* right = GetParameter(BinaryOpICStub::kRight);
+
+ Type* left_type = state.GetLeftType(zone());
+ Type* right_type = state.GetRightType(zone());
+ Type* result_type = state.GetResultType(zone());
ASSERT(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
- (stub->HasSideEffects(isolate()) || !result_type->Is(Type::None())));
+ (state.HasSideEffects() || !result_type->Is(Type::None())));
HValue* result = NULL;
- if (stub->operation() == Token::ADD &&
+ HAllocationMode allocation_mode(NOT_TENURED);
+ if (state.op() == Token::ADD &&
(left_type->Maybe(Type::String()) || right_type->Maybe(Type::String())) &&
!left_type->Is(Type::String()) && !right_type->Is(Type::String())) {
// For the generic add stub a fast case for string addition is performance
@@ -894,17 +886,18 @@ HValue* CodeStubGraphBuilder<BinaryOpStub>::BuildCodeInitializedStub() {
if_leftisstring.If<HIsStringAndBranch>(left);
if_leftisstring.Then();
{
- Push(AddInstruction(BuildBinaryOperation(
- stub->operation(), left, right,
- handle(Type::String(), isolate()), right_type,
- result_type, stub->fixed_right_arg(), true)));
+ Push(BuildBinaryOperation(
+ state.op(), left, right,
+ Type::String(zone()), right_type,
+ result_type, state.fixed_right_arg(),
+ allocation_mode));
}
if_leftisstring.Else();
{
- Push(AddInstruction(BuildBinaryOperation(
- stub->operation(), left, right,
+ Push(BuildBinaryOperation(
+ state.op(), left, right,
left_type, right_type, result_type,
- stub->fixed_right_arg(), true)));
+ state.fixed_right_arg(), allocation_mode));
}
if_leftisstring.End();
result = Pop();
@@ -913,33 +906,34 @@ HValue* CodeStubGraphBuilder<BinaryOpStub>::BuildCodeInitializedStub() {
if_rightisstring.If<HIsStringAndBranch>(right);
if_rightisstring.Then();
{
- Push(AddInstruction(BuildBinaryOperation(
- stub->operation(), left, right,
- left_type, handle(Type::String(), isolate()),
- result_type, stub->fixed_right_arg(), true)));
+ Push(BuildBinaryOperation(
+ state.op(), left, right,
+ left_type, Type::String(zone()),
+ result_type, state.fixed_right_arg(),
+ allocation_mode));
}
if_rightisstring.Else();
{
- Push(AddInstruction(BuildBinaryOperation(
- stub->operation(), left, right,
+ Push(BuildBinaryOperation(
+ state.op(), left, right,
left_type, right_type, result_type,
- stub->fixed_right_arg(), true)));
+ state.fixed_right_arg(), allocation_mode));
}
if_rightisstring.End();
result = Pop();
}
} else {
- result = AddInstruction(BuildBinaryOperation(
- stub->operation(), left, right,
+ result = BuildBinaryOperation(
+ state.op(), left, right,
left_type, right_type, result_type,
- stub->fixed_right_arg(), true));
+ state.fixed_right_arg(), allocation_mode);
}
// If we encounter a generic argument, the number conversion is
// observable, thus we cannot afford to bail out after the fact.
- if (!stub->HasSideEffects(isolate())) {
+ if (!state.HasSideEffects()) {
if (result_type->Is(Type::Smi())) {
- if (stub->operation() == Token::SHR) {
+ if (state.op() == Token::SHR) {
// TODO(olivf) Replace this by a SmiTagU Instruction.
// 0x40000000: this number would convert to negative when interpreting
// the register as signed value;
@@ -957,8 +951,8 @@ HValue* CodeStubGraphBuilder<BinaryOpStub>::BuildCodeInitializedStub() {
// Reuse the double box of one of the operands if we are allowed to (i.e.
// chained binops).
- if (stub->CanReuseDoubleBox()) {
- HValue* operand = (stub->mode() == OVERWRITE_LEFT) ? left : right;
+ if (state.CanReuseDoubleBox()) {
+ HValue* operand = (state.mode() == OVERWRITE_LEFT) ? left : right;
IfBuilder if_heap_number(this);
if_heap_number.IfNot<HIsSmiAndBranch>(operand);
if_heap_number.Then();
@@ -974,7 +968,58 @@ HValue* CodeStubGraphBuilder<BinaryOpStub>::BuildCodeInitializedStub() {
}
-Handle<Code> BinaryOpStub::GenerateCode(Isolate* isolate) {
+Handle<Code> BinaryOpICStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<BinaryOpWithAllocationSiteStub>::BuildCodeStub() {
+ BinaryOpIC::State state = casted_stub()->state();
+
+ HValue* allocation_site = GetParameter(
+ BinaryOpWithAllocationSiteStub::kAllocationSite);
+ HValue* left = GetParameter(BinaryOpWithAllocationSiteStub::kLeft);
+ HValue* right = GetParameter(BinaryOpWithAllocationSiteStub::kRight);
+
+ Type* left_type = state.GetLeftType(zone());
+ Type* right_type = state.GetRightType(zone());
+ Type* result_type = state.GetResultType(zone());
+ HAllocationMode allocation_mode(allocation_site);
+
+ return BuildBinaryOperation(state.op(), left, right,
+ left_type, right_type, result_type,
+ state.fixed_right_arg(), allocation_mode);
+}
+
+
+Handle<Code> BinaryOpWithAllocationSiteStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<StringAddStub>::BuildCodeInitializedStub() {
+ StringAddStub* stub = casted_stub();
+ StringAddFlags flags = stub->flags();
+ PretenureFlag pretenure_flag = stub->pretenure_flag();
+
+ HValue* left = GetParameter(StringAddStub::kLeft);
+ HValue* right = GetParameter(StringAddStub::kRight);
+
+ // Make sure that both arguments are strings if not known in advance.
+ if ((flags & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ left = BuildCheckString(left);
+ }
+ if ((flags & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ right = BuildCheckString(right);
+ }
+
+ return BuildStringAdd(left, right, HAllocationMode(pretenure_flag));
+}
+
+
+Handle<Code> StringAddStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@@ -1006,17 +1051,21 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
Handle<PropertyCell> placeholder_cell =
isolate()->factory()->NewPropertyCell(placeholer_value);
- HParameter* receiver = GetParameter(0);
HParameter* value = GetParameter(2);
- // Check that the map of the global has not changed: use a placeholder map
- // that will be replaced later with the global object's map.
- Handle<Map> placeholder_map = isolate()->factory()->meta_map();
- Add<HCheckMaps>(receiver, placeholder_map, top_info());
+ if (stub->check_global()) {
+ // Check that the map of the global has not changed: use a placeholder map
+ // that will be replaced later with the global object's map.
+ Handle<Map> placeholder_map = isolate()->factory()->meta_map();
+ HValue* global = Add<HConstant>(
+ StoreGlobalStub::global_placeholder(isolate()));
+ Add<HCheckMaps>(global, placeholder_map, top_info());
+ }
HValue* cell = Add<HConstant>(placeholder_cell);
HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
- HValue* cell_contents = Add<HLoadNamedField>(cell, access);
+ HValue* cell_contents = Add<HLoadNamedField>(
+ cell, static_cast<HValue*>(NULL), access);
if (stub->is_constant()) {
IfBuilder builder(this);
@@ -1068,7 +1117,7 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(object, key, value,
casted_stub()->is_jsarray(),
casted_stub()->to_kind(),
- true, ALLOW_RETURN_HOLE,
+ STORE, ALLOW_RETURN_HOLE,
casted_stub()->store_mode());
}
@@ -1094,7 +1143,8 @@ void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
Add<HStoreCodeEntry>(js_function, code_object);
// Now link a function into a list of optimized functions.
- HValue* optimized_functions_list = Add<HLoadNamedField>(native_context,
+ HValue* optimized_functions_list = Add<HLoadNamedField>(
+ native_context, static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
@@ -1112,8 +1162,8 @@ void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
graph()->GetConstantUndefined());
- HValue* code_object = Add<HLoadNamedField>(shared_info,
- HObjectAccess::ForCodeOffset());
+ HValue* code_object = Add<HLoadNamedField>(
+ shared_info, static_cast<HValue*>(NULL), HObjectAccess::ForCodeOffset());
Add<HStoreCodeEntry>(js_function, code_object);
}
@@ -1124,7 +1174,8 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* native_context) {
Counters* counters = isolate()->counters();
IfBuilder is_optimized(this);
- HInstruction* optimized_map = Add<HLoadNamedField>(shared_info,
+ HInstruction* optimized_map = Add<HLoadNamedField>(
+ shared_info, static_cast<HValue*>(NULL),
HObjectAccess::ForOptimizedCodeMap());
HValue* null_constant = Add<HConstant>(0);
is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
@@ -1139,15 +1190,23 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
// (native context, optimized code, literals).
// Map must never be empty, so check the first elements.
Label install_optimized;
- HValue* first_context_slot = Add<HLoadNamedField>(optimized_map,
+ HValue* first_context_slot = Add<HLoadNamedField>(
+ optimized_map, static_cast<HValue*>(NULL),
HObjectAccess::ForFirstContextSlot());
+ HValue* first_osr_ast_slot = Add<HLoadNamedField>(
+ optimized_map, static_cast<HValue*>(NULL),
+ HObjectAccess::ForFirstOsrAstIdSlot());
+ HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
IfBuilder already_in(this);
already_in.If<HCompareObjectEqAndBranch>(native_context,
first_context_slot);
+ already_in.AndIf<HCompareObjectEqAndBranch>(first_osr_ast_slot,
+ osr_ast_id_none);
already_in.Then();
{
- HValue* code_object = Add<HLoadNamedField>(optimized_map,
- HObjectAccess::ForFirstCodeSlot());
+ HValue* code_object = Add<HLoadNamedField>(
+ optimized_map, static_cast<HValue*>(NULL),
+ HObjectAccess::ForFirstCodeSlot());
BuildInstallOptimizedCode(js_function, native_context, code_object);
}
already_in.Else();
@@ -1158,9 +1217,10 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
context(),
LoopBuilder::kPostDecrement,
shared_function_entry_length);
- HValue* array_length = Add<HLoadNamedField>(optimized_map,
+ HValue* array_length = Add<HLoadNamedField>(
+ optimized_map, static_cast<HValue*>(NULL),
HObjectAccess::ForFixedArrayLength());
- HValue* key = loop_builder.BeginBody(array_length,
+ HValue* slot_iterator = loop_builder.BeginBody(array_length,
graph()->GetConstant0(),
Token::GT);
{
@@ -1169,8 +1229,8 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* second_entry_index =
Add<HConstant>(SharedFunctionInfo::kSecondEntryIndex);
IfBuilder restore_check(this);
- restore_check.If<HCompareNumericAndBranch>(key, second_entry_index,
- Token::EQ);
+ restore_check.If<HCompareNumericAndBranch>(
+ slot_iterator, second_entry_index, Token::EQ);
restore_check.Then();
{
// Store the unoptimized code
@@ -1179,20 +1239,29 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
}
restore_check.Else();
{
- HValue* keyed_minus = AddUncasted<HSub>(
- key, shared_function_entry_length);
- HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map,
- keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ STATIC_ASSERT(SharedFunctionInfo::kContextOffset == 0);
+ STATIC_ASSERT(SharedFunctionInfo::kEntryLength -
+ SharedFunctionInfo::kOsrAstIdOffset == 1);
+ HValue* native_context_slot = AddUncasted<HSub>(
+ slot_iterator, shared_function_entry_length);
+ HValue* osr_ast_id_slot = AddUncasted<HSub>(
+ slot_iterator, graph()->GetConstant1());
+ HInstruction* native_context_entry = Add<HLoadKeyed>(optimized_map,
+ native_context_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ HInstruction* osr_ast_id_entry = Add<HLoadKeyed>(optimized_map,
+ osr_ast_id_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
IfBuilder done_check(this);
done_check.If<HCompareObjectEqAndBranch>(native_context,
- keyed_lookup);
+ native_context_entry);
+ done_check.AndIf<HCompareObjectEqAndBranch>(osr_ast_id_entry,
+ osr_ast_id_none);
done_check.Then();
{
// Hit: fetch the optimized code.
- HValue* keyed_plus = AddUncasted<HAdd>(
- keyed_minus, graph()->GetConstant1());
+ HValue* code_slot = AddUncasted<HAdd>(
+ native_context_slot, graph()->GetConstant1());
HValue* code_object = Add<HLoadKeyed>(optimized_map,
- keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ code_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
BuildInstallOptimizedCode(js_function, native_context, code_object);
// Fall out of the loop
@@ -1232,7 +1301,8 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
// Compute the function map in the current native context and set that
// as the map of the allocated object.
HInstruction* native_context = BuildGetNativeContext();
- HInstruction* map_slot_value = Add<HLoadNamedField>(native_context,
+ HInstruction* map_slot_value = Add<HLoadNamedField>(
+ native_context, static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(map_index));
Add<HStoreNamedField>(js_function, HObjectAccess::ForMap(), map_slot_value);
@@ -1269,4 +1339,91 @@ Handle<Code> FastNewClosureStub::GenerateCode(Isolate* isolate) {
}
+template<>
+HValue* CodeStubGraphBuilder<FastNewContextStub>::BuildCodeStub() {
+ int length = casted_stub()->slots() + Context::MIN_CONTEXT_SLOTS;
+
+ // Get the function.
+ HParameter* function = GetParameter(FastNewContextStub::kFunction);
+
+ // Allocate the context in new space.
+ HAllocate* function_context = Add<HAllocate>(
+ Add<HConstant>(length * kPointerSize + FixedArray::kHeaderSize),
+ HType::Tagged(), NOT_TENURED, FIXED_ARRAY_TYPE);
+
+ // Set up the object header.
+ AddStoreMapConstant(function_context,
+ isolate()->factory()->function_context_map());
+ Add<HStoreNamedField>(function_context,
+ HObjectAccess::ForFixedArrayLength(),
+ Add<HConstant>(length));
+
+ // Set up the fixed slots.
+ Add<HStoreNamedField>(function_context,
+ HObjectAccess::ForContextSlot(Context::CLOSURE_INDEX),
+ function);
+ Add<HStoreNamedField>(function_context,
+ HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX),
+ context());
+ Add<HStoreNamedField>(function_context,
+ HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX),
+ graph()->GetConstant0());
+
+ // Copy the global object from the previous context.
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ Add<HStoreNamedField>(function_context,
+ HObjectAccess::ForContextSlot(
+ Context::GLOBAL_OBJECT_INDEX),
+ global_object);
+
+ // Initialize the rest of the slots to undefined.
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; ++i) {
+ Add<HStoreNamedField>(function_context,
+ HObjectAccess::ForContextSlot(i),
+ graph()->GetConstantUndefined());
+ }
+
+ return function_context;
+}
+
+
+Handle<Code> FastNewContextStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<KeyedLoadDictionaryElementStub>::BuildCodeStub() {
+ HValue* receiver = GetParameter(0);
+ HValue* key = GetParameter(1);
+
+ Add<HCheckSmi>(key);
+
+ return BuildUncheckedDictionaryElementLoad(receiver, key);
+}
+
+
+Handle<Code> KeyedLoadDictionaryElementStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<RegExpConstructResultStub>::BuildCodeStub() {
+ // Determine the parameters.
+ HValue* length = GetParameter(RegExpConstructResultStub::kLength);
+ HValue* index = GetParameter(RegExpConstructResultStub::kIndex);
+ HValue* input = GetParameter(RegExpConstructResultStub::kInput);
+
+ return BuildRegExpConstructResult(length, index, input);
+}
+
+
+Handle<Code> RegExpConstructResultStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index e68a5dd0c8..be14cf6e87 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -46,6 +46,7 @@ CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL),
deoptimization_handler_(NULL),
+ handler_arguments_mode_(DONT_PASS_ARGUMENTS),
miss_handler_(),
has_miss_handler_(false) { }
@@ -73,8 +74,8 @@ SmartArrayPointer<const char> CodeStub::GetName() {
void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
SmartArrayPointer<const char> name = GetName();
- PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
- GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
+ PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, name.get()));
+ GDBJIT(AddCode(GDBJITInterface::STUB, name.get(), code));
Counters* counters = isolate->counters();
counters->total_stubs_code_size()->Increment(code->instruction_size());
}
@@ -103,9 +104,6 @@ Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
// Update the static counter each time a new code stub is generated.
isolate->counters()->code_stubs()->Increment();
- // Nested stubs are not allowed for leaves.
- AllowStubCallsScope allow_scope(&masm, false);
-
// Generate the code for the stub.
masm.set_generating_stub(true);
NoCurrentFrameScope scope(&masm);
@@ -121,8 +119,7 @@ Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
GetCodeKind(),
GetICState(),
GetExtraICState(),
- GetStubType(),
- GetStubFlags());
+ GetStubType());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@@ -141,7 +138,6 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) {
if (UseSpecialCache()
? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code, isolate)) {
- ASSERT(IsPregenerated(isolate) == code->is_pregenerated());
ASSERT(GetCodeKind() == code->kind());
return Handle<Code>(code);
}
@@ -160,8 +156,9 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) {
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
- new_object->Disassemble(*GetName());
- PrintF("\n");
+ CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
+ new_object->Disassemble(GetName().get(), trace_scope.file());
+ PrintF(trace_scope.file(), "\n");
}
#endif
@@ -193,6 +190,7 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
#define DEF_CASE(name) case name: return #name "Stub";
CODE_STUB_LIST(DEF_CASE)
#undef DEF_CASE
+ case UninitializedMajorKey: return "<UninitializedMajorKey>Stub";
default:
if (!allow_unknown_keys) {
UNREACHABLE();
@@ -213,471 +211,69 @@ void CodeStub::PrintName(StringStream* stream) {
}
-void BinaryOpStub::PrintBaseName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* ovr = "";
- if (mode_ == OVERWRITE_LEFT) ovr = "_ReuseLeft";
- if (mode_ == OVERWRITE_RIGHT) ovr = "_ReuseRight";
- stream->Add("BinaryOpStub_%s%s", op_name, ovr);
-}
-
-
-void BinaryOpStub::PrintState(StringStream* stream) {
- stream->Add("(");
- stream->Add(StateToName(left_state_));
- stream->Add("*");
- if (fixed_right_arg_.has_value) {
- stream->Add("%d", fixed_right_arg_.value);
- } else {
- stream->Add(StateToName(right_state_));
+// static
+void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
+ // Generate the uninitialized versions of the stub.
+ for (int op = Token::BIT_OR; op <= Token::MOD; ++op) {
+ for (int mode = NO_OVERWRITE; mode <= OVERWRITE_RIGHT; ++mode) {
+ BinaryOpICStub stub(static_cast<Token::Value>(op),
+ static_cast<OverwriteMode>(mode));
+ stub.GetCode(isolate);
+ }
}
- stream->Add("->");
- stream->Add(StateToName(result_state_));
- stream->Add(")");
-}
-
-
-Maybe<Handle<Object> > BinaryOpStub::Result(Handle<Object> left,
- Handle<Object> right,
- Isolate* isolate) {
- Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
- Builtins::JavaScript func = BinaryOpIC::TokenToJSBuiltin(op_);
- Object* builtin = builtins->javascript_builtin(func);
- Handle<JSFunction> builtin_function =
- Handle<JSFunction>(JSFunction::cast(builtin), isolate);
- bool caught_exception;
- Handle<Object> result = Execution::Call(isolate, builtin_function, left,
- 1, &right, &caught_exception);
- return Maybe<Handle<Object> >(!caught_exception, result);
-}
-
-void BinaryOpStub::Initialize() {
- fixed_right_arg_.has_value = false;
- left_state_ = right_state_ = result_state_ = NONE;
+ // Generate special versions of the stub.
+ BinaryOpIC::State::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
}
-void BinaryOpStub::Generate(Token::Value op,
- State left,
- State right,
- State result,
- OverwriteMode mode,
- Isolate* isolate) {
- BinaryOpStub stub(INITIALIZED);
- stub.op_ = op;
- stub.left_state_ = left;
- stub.right_state_ = right;
- stub.result_state_ = result;
- stub.mode_ = mode;
- stub.GetCode(isolate);
+void BinaryOpICStub::PrintState(StringStream* stream) {
+ state_.Print(stream);
}
-void BinaryOpStub::Generate(Token::Value op,
- State left,
- int right,
- State result,
- OverwriteMode mode,
- Isolate* isolate) {
- BinaryOpStub stub(INITIALIZED);
- stub.op_ = op;
- stub.left_state_ = left;
- stub.fixed_right_arg_.has_value = true;
- stub.fixed_right_arg_.value = right;
- stub.right_state_ = SMI;
- stub.result_state_ = result;
- stub.mode_ = mode;
+// static
+void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate,
+ const BinaryOpIC::State& state) {
+ BinaryOpICStub stub(state);
stub.GetCode(isolate);
}
-void BinaryOpStub::GenerateAheadOfTime(Isolate* isolate) {
- Token::Value binop[] = {Token::SUB, Token::MOD, Token::DIV, Token::MUL,
- Token::ADD, Token::SAR, Token::BIT_OR, Token::BIT_AND,
- Token::BIT_XOR, Token::SHL, Token::SHR};
- for (unsigned i = 0; i < ARRAY_SIZE(binop); i++) {
- BinaryOpStub stub(UNINITIALIZED);
- stub.op_ = binop[i];
- stub.GetCode(isolate);
- }
-
- // TODO(olivf) We should investigate why adding stubs to the snapshot is so
- // expensive at runtime. When solved we should be able to add most binops to
- // the snapshot instead of hand-picking them.
- // Generated list of commonly used stubs
- Generate(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE, isolate);
- Generate(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE, isolate);
- Generate(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE, isolate);
- Generate(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE, isolate);
- Generate(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE, isolate);
- Generate(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE, isolate);
- Generate(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE, isolate);
- Generate(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE, isolate);
- Generate(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE, isolate);
- Generate(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE, isolate);
- Generate(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::MOD, SMI, 2, SMI, NO_OVERWRITE, isolate);
- Generate(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE, isolate);
- Generate(Token::MOD, SMI, 32, SMI, NO_OVERWRITE, isolate);
- Generate(Token::MOD, SMI, 4, SMI, NO_OVERWRITE, isolate);
- Generate(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::MOD, SMI, 8, SMI, NO_OVERWRITE, isolate);
- Generate(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE, isolate);
- Generate(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE, isolate);
- Generate(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE, isolate);
- Generate(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE, isolate);
- Generate(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
- Generate(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
- Generate(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
- Generate(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE, isolate);
- Generate(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
- Generate(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
- Generate(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
- Generate(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE, isolate);
- Generate(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
- Generate(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
-}
-
-
-bool BinaryOpStub::can_encode_arg_value(int32_t value) const {
- return op_ == Token::MOD && value > 0 && IsPowerOf2(value) &&
- FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
-}
-
-
-int BinaryOpStub::encode_arg_value(int32_t value) const {
- ASSERT(can_encode_arg_value(value));
- return WhichPowerOf2(value);
-}
-
-
-int32_t BinaryOpStub::decode_arg_value(int value) const {
- return 1 << value;
-}
-
-
-int BinaryOpStub::encode_token(Token::Value op) const {
- ASSERT(op >= FIRST_TOKEN && op <= LAST_TOKEN);
- return op - FIRST_TOKEN;
-}
-
-
-Token::Value BinaryOpStub::decode_token(int op) const {
- int res = op + FIRST_TOKEN;
- ASSERT(res >= FIRST_TOKEN && res <= LAST_TOKEN);
- return static_cast<Token::Value>(res);
+// static
+void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
+ // Generate special versions of the stub.
+ BinaryOpIC::State::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
}
-const char* BinaryOpStub::StateToName(State state) {
- switch (state) {
- case NONE:
- return "None";
- case SMI:
- return "Smi";
- case INT32:
- return "Int32";
- case NUMBER:
- return "Number";
- case STRING:
- return "String";
- case GENERIC:
- return "Generic";
- }
- return "";
-}
-
-
-void BinaryOpStub::UpdateStatus(Handle<Object> left,
- Handle<Object> right,
- Maybe<Handle<Object> > result) {
- int old_state = GetExtraICState();
-
- UpdateStatus(left, &left_state_);
- UpdateStatus(right, &right_state_);
-
- int32_t value;
- bool new_has_fixed_right_arg =
- right->ToInt32(&value) && can_encode_arg_value(value) &&
- (left_state_ == SMI || left_state_ == INT32) &&
- (result_state_ == NONE || !fixed_right_arg_.has_value);
-
- fixed_right_arg_ = Maybe<int32_t>(new_has_fixed_right_arg, value);
-
- if (result.has_value) UpdateStatus(result.value, &result_state_);
-
- State max_input = Max(left_state_, right_state_);
-
- if (!has_int_result() && op_ != Token::SHR &&
- max_input <= NUMBER && max_input > result_state_) {
- result_state_ = max_input;
- }
-
- ASSERT(result_state_ <= (has_int_result() ? INT32 : NUMBER) ||
- op_ == Token::ADD);
-
- if (old_state == GetExtraICState()) {
- // Tagged operations can lead to non-truncating HChanges
- if (left->IsUndefined() || left->IsBoolean()) {
- left_state_ = GENERIC;
- } else if (right->IsUndefined() || right->IsBoolean()) {
- right_state_ = GENERIC;
- } else {
- // Since the fpu is to precise, we might bail out on numbers which
- // actually would truncate with 64 bit precision.
- ASSERT(!CpuFeatures::IsSupported(SSE2) &&
- result_state_ <= INT32);
- result_state_ = NUMBER;
- }
- }
-}
-
-
-void BinaryOpStub::UpdateStatus(Handle<Object> object,
- State* state) {
- bool is_truncating = (op_ == Token::BIT_AND || op_ == Token::BIT_OR ||
- op_ == Token::BIT_XOR || op_ == Token::SAR ||
- op_ == Token::SHL || op_ == Token::SHR);
- v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(object);
- if (object->IsBoolean() && is_truncating) {
- // Booleans are converted by truncating by HChange.
- type = TypeInfo::Integer32();
- }
- if (object->IsUndefined()) {
- // Undefined will be automatically truncated for us by HChange.
- type = is_truncating ? TypeInfo::Integer32() : TypeInfo::Double();
- }
- State int_state = SmiValuesAre32Bits() ? NUMBER : INT32;
- State new_state = NONE;
- if (type.IsSmi()) {
- new_state = SMI;
- } else if (type.IsInteger32()) {
- new_state = int_state;
- } else if (type.IsNumber()) {
- new_state = NUMBER;
- } else if (object->IsString() && operation() == Token::ADD) {
- new_state = STRING;
- } else {
- new_state = GENERIC;
- }
- if ((new_state <= NUMBER && *state > NUMBER) ||
- (new_state > NUMBER && *state <= NUMBER && *state != NONE)) {
- new_state = GENERIC;
- }
- *state = Max(*state, new_state);
+void BinaryOpICWithAllocationSiteStub::PrintState(StringStream* stream) {
+ state_.Print(stream);
}
-Handle<Type> BinaryOpStub::StateToType(State state,
- Isolate* isolate) {
- Handle<Type> t = handle(Type::None(), isolate);
- switch (state) {
- case NUMBER:
- t = handle(Type::Union(t, handle(Type::Double(), isolate)), isolate);
- // Fall through.
- case INT32:
- t = handle(Type::Union(t, handle(Type::Signed32(), isolate)), isolate);
- // Fall through.
- case SMI:
- t = handle(Type::Union(t, handle(Type::Smi(), isolate)), isolate);
- break;
-
- case STRING:
- t = handle(Type::Union(t, handle(Type::String(), isolate)), isolate);
- break;
- case GENERIC:
- return handle(Type::Any(), isolate);
- break;
- case NONE:
- break;
+// static
+void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(
+ Isolate* isolate, const BinaryOpIC::State& state) {
+ if (state.CouldCreateAllocationMementos()) {
+ BinaryOpICWithAllocationSiteStub stub(state);
+ stub.GetCode(isolate);
}
- return t;
}
-Handle<Type> BinaryOpStub::GetLeftType(Isolate* isolate) const {
- return StateToType(left_state_, isolate);
-}
-
-
-Handle<Type> BinaryOpStub::GetRightType(Isolate* isolate) const {
- return StateToType(right_state_, isolate);
-}
-
-
-Handle<Type> BinaryOpStub::GetResultType(Isolate* isolate) const {
- if (HasSideEffects(isolate)) return StateToType(NONE, isolate);
- if (result_state_ == GENERIC && op_ == Token::ADD) {
- return handle(Type::Union(handle(Type::Number(), isolate),
- handle(Type::String(), isolate)), isolate);
+void StringAddStub::PrintBaseName(StringStream* stream) {
+ stream->Add("StringAddStub");
+ if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ stream->Add("_CheckBoth");
+ } else if ((flags() & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ stream->Add("_CheckLeft");
+ } else if ((flags() & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ stream->Add("_CheckRight");
}
- ASSERT(result_state_ != GENERIC);
- if (result_state_ == NUMBER && op_ == Token::SHR) {
- return handle(Type::Unsigned32(), isolate);
+ if (pretenure_flag() == TENURED) {
+ stream->Add("_Tenured");
}
- return StateToType(result_state_, isolate);
}
@@ -833,7 +429,6 @@ void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
// Note: Although a no-op transition is semantically OK, it is hinting at a
// bug somewhere in our state transition machinery.
ASSERT(from != to);
- #ifdef DEBUG
if (!FLAG_trace_ic) return;
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
@@ -847,7 +442,6 @@ void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
to.Print(&stream);
stream.Add("]\n");
stream.OutputToStdOut();
- #endif
}
@@ -875,38 +469,33 @@ void CompareNilICStub::State::Print(StringStream* stream) const {
}
-Handle<Type> CompareNilICStub::GetType(
- Isolate* isolate,
- Handle<Map> map) {
+Type* CompareNilICStub::GetType(Zone* zone, Handle<Map> map) {
if (state_.Contains(CompareNilICStub::GENERIC)) {
- return handle(Type::Any(), isolate);
+ return Type::Any(zone);
}
- Handle<Type> result(Type::None(), isolate);
+ Type* result = Type::None(zone);
if (state_.Contains(CompareNilICStub::UNDEFINED)) {
- result = handle(Type::Union(result, handle(Type::Undefined(), isolate)),
- isolate);
+ result = Type::Union(result, Type::Undefined(zone), zone);
}
if (state_.Contains(CompareNilICStub::NULL_TYPE)) {
- result = handle(Type::Union(result, handle(Type::Null(), isolate)),
- isolate);
+ result = Type::Union(result, Type::Null(zone), zone);
}
if (state_.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
- Type* type = map.is_null() ? Type::Detectable() : Type::Class(map);
- result = handle(Type::Union(result, handle(type, isolate)), isolate);
+ Type* type =
+ map.is_null() ? Type::Detectable(zone) : Type::Class(map, zone);
+ result = Type::Union(result, type, zone);
}
return result;
}
-Handle<Type> CompareNilICStub::GetInputType(
- Isolate* isolate,
- Handle<Map> map) {
- Handle<Type> output_type = GetType(isolate, map);
- Handle<Type> nil_type = handle(nil_value_ == kNullValue
- ? Type::Null() : Type::Undefined(), isolate);
- return handle(Type::Union(output_type, nil_type), isolate);
+Type* CompareNilICStub::GetInputType(Zone* zone, Handle<Map> map) {
+ Type* output_type = GetType(zone, map);
+ Type* nil_type =
+ nil_value_ == kNullValue ? Type::Null(zone) : Type::Undefined(zone);
+ return Type::Union(output_type, nil_type, zone);
}
@@ -941,14 +530,15 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
}
-void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
+void KeyedLoadDictionaryElementPlatformStub::Generate(
+ MacroAssembler* masm) {
KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
}
void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
CreateAllocationSiteStub stub;
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
}
@@ -960,15 +550,12 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
UNREACHABLE();
break;
case DICTIONARY_ELEMENTS:
@@ -994,7 +581,6 @@ void ArgumentsAccessStub::PrintName(StringStream* stream) {
void CallFunctionStub::PrintName(StringStream* stream) {
stream->Add("CallFunctionStub_Args%d", argc_);
- if (ReceiverMightBeImplicit()) stream->Add("_Implicit");
if (RecordCallTarget()) stream->Add("_Recording");
}
@@ -1005,6 +591,28 @@ void CallConstructStub::PrintName(StringStream* stream) {
}
+void ArrayConstructorStub::PrintName(StringStream* stream) {
+ stream->Add("ArrayConstructorStub");
+ switch (argument_count_) {
+ case ANY: stream->Add("_Any"); break;
+ case NONE: stream->Add("_None"); break;
+ case ONE: stream->Add("_One"); break;
+ case MORE_THAN_ONE: stream->Add("_More_Than_One"); break;
+ }
+}
+
+
+void ArrayConstructorStubBase::BasePrintName(const char* name,
+ StringStream* stream) {
+ stream->Add(name);
+ stream->Add("_");
+ stream->Add(ElementsKindToString(elements_kind()));
+ if (override_mode() == DISABLE_ALLOCATION_SITES) {
+ stream->Add("_DISABLE_ALLOCATION_SITES");
+ }
+}
+
+
bool ToBooleanStub::UpdateStatus(Handle<Object> object) {
Types old_types(types_);
bool to_boolean_value = types_.UpdateStatus(object);
@@ -1087,8 +695,8 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub stub1(NOT_JS_FUNCTION_STUB_MODE);
StubFailureTrampolineStub stub2(JS_FUNCTION_STUB_MODE);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
+ stub2.GetCode(isolate);
}
@@ -1133,6 +741,40 @@ void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
}
+void FastNewContextStub::InstallDescriptors(Isolate* isolate) {
+ FastNewContextStub stub(FastNewContextStub::kMaximumSlots);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+// static
+void BinaryOpICStub::InstallDescriptors(Isolate* isolate) {
+ BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+// static
+void BinaryOpWithAllocationSiteStub::InstallDescriptors(Isolate* isolate) {
+ BinaryOpWithAllocationSiteStub stub(Token::ADD, NO_OVERWRITE);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+// static
+void StringAddStub::InstallDescriptors(Isolate* isolate) {
+ StringAddStub stub(STRING_ADD_CHECK_NONE, NOT_TENURED);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+// static
+void RegExpConstructResultStub::InstallDescriptors(Isolate* isolate) {
+ RegExpConstructResultStub stub;
+ InstallDescriptor(isolate, &stub);
+}
+
+
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
: argument_count_(ANY) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 80d99d8b68..07e34be578 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -41,7 +41,9 @@ namespace internal {
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
V(CallFunction) \
V(CallConstruct) \
- V(BinaryOp) \
+ V(BinaryOpIC) \
+ V(BinaryOpICWithAllocationSite) \
+ V(BinaryOpWithAllocationSite) \
V(StringAdd) \
V(SubString) \
V(StringCompare) \
@@ -55,7 +57,6 @@ namespace internal {
V(RecordWrite) \
V(StoreBufferOverflow) \
V(RegExpExec) \
- V(TranscendentalCache) \
V(Instanceof) \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
@@ -63,7 +64,6 @@ namespace internal {
V(Interrupt) \
V(FastNewClosure) \
V(FastNewContext) \
- V(FastNewBlockContext) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(CreateAllocationSite) \
@@ -76,6 +76,7 @@ namespace internal {
V(CEntry) \
V(JSEntry) \
V(KeyedLoadElement) \
+ V(ArrayPush) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
@@ -93,12 +94,14 @@ namespace internal {
V(InternalArrayConstructor) \
V(ProfileEntryHook) \
V(StoreGlobal) \
+ V(CallApiFunction) \
+ V(CallApiGetter) \
/* IC Handler stubs */ \
V(LoadField) \
V(KeyedLoadField)
// List of code stubs only used on ARM platforms.
-#if V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_A64)
#define CODE_STUB_LIST_ARM(V) \
V(GetProperty) \
V(SetProperty) \
@@ -112,7 +115,9 @@ namespace internal {
#if V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
V(RegExpCEntry) \
- V(DirectCEntry)
+ V(DirectCEntry) \
+ V(StoreRegistersState) \
+ V(RestoreRegistersState)
#else
#define CODE_STUB_LIST_MIPS(V)
#endif
@@ -123,13 +128,11 @@ namespace internal {
CODE_STUB_LIST_ARM(V) \
CODE_STUB_LIST_MIPS(V)
-// Mode to overwrite BinaryExpression values.
-enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-
// Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED {
public:
enum Major {
+ UninitializedMajorKey = 0,
#define DEF_ENUM(name) name,
CODE_STUB_LIST(DEF_ENUM)
#undef DEF_ENUM
@@ -158,16 +161,6 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
- bool CompilingCallsToThisStubIsGCSafe(Isolate* isolate) {
- bool is_pregenerated = IsPregenerated(isolate);
- Code* code = NULL;
- CHECK(!is_pregenerated || FindCodeInCache(&code, isolate));
- return is_pregenerated;
- }
-
- // See comment above, where Instanceof is defined.
- virtual bool IsPregenerated(Isolate* isolate) { return false; }
-
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void GenerateFPStubs(Isolate* isolate);
@@ -189,15 +182,12 @@ class CodeStub BASE_EMBEDDED {
virtual InlineCacheState GetICState() {
return UNINITIALIZED;
}
- virtual Code::ExtraICState GetExtraICState() {
- return Code::kNoExtraICState;
+ virtual ExtraICState GetExtraICState() {
+ return kNoExtraICState;
}
virtual Code::StubType GetStubType() {
return Code::NORMAL;
}
- virtual int GetStubFlags() {
- return -1;
- }
virtual void PrintName(StringStream* stream);
@@ -254,6 +244,7 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey());
}
+ STATIC_ASSERT(NUMBER_OF_IDS < (1 << kStubMajorKeyBits));
class MajorKeyBits: public BitField<uint32_t, 0, kStubMajorKeyBits> {};
class MinorKeyBits: public BitField<uint32_t,
kStubMajorKeyBits, kStubMinorKeyBits> {}; // NOLINT
@@ -276,31 +267,34 @@ class PlatformCodeStub : public CodeStub {
enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
-
+enum HandlerArgumentsMode { DONT_PASS_ARGUMENTS, PASS_ARGUMENTS };
struct CodeStubInterfaceDescriptor {
CodeStubInterfaceDescriptor();
int register_param_count_;
+
Register stack_parameter_count_;
// if hint_stack_parameter_count_ > 0, the code stub can optimize the
// return sequence. Default value is -1, which means it is ignored.
int hint_stack_parameter_count_;
StubFunctionMode function_mode_;
Register* register_params_;
+
Address deoptimization_handler_;
+ HandlerArgumentsMode handler_arguments_mode_;
+
+ bool initialized() const { return register_param_count_ >= 0; }
int environment_length() const {
- if (stack_parameter_count_.is_valid()) {
- return register_param_count_ + 1;
- }
return register_param_count_;
}
- bool initialized() const { return register_param_count_ >= 0; }
-
void SetMissHandler(ExternalReference handler) {
miss_handler_ = handler;
has_miss_handler_ = true;
+ // Our miss handler infrastructure doesn't currently support
+ // variable stack parameter counts.
+ ASSERT(!stack_parameter_count_.is_valid());
}
ExternalReference miss_handler() {
@@ -312,17 +306,61 @@ struct CodeStubInterfaceDescriptor {
return has_miss_handler_;
}
+ Register GetParameterRegister(int index) const {
+ return register_params_[index];
+ }
+
+ bool IsParameterCountRegister(int index) {
+ return GetParameterRegister(index).is(stack_parameter_count_);
+ }
+
+ int GetHandlerParameterCount() {
+ int params = environment_length();
+ if (handler_arguments_mode_ == PASS_ARGUMENTS) {
+ params += 1;
+ }
+ return params;
+ }
+
private:
ExternalReference miss_handler_;
bool has_miss_handler_;
};
-// A helper to make up for the fact that type Register is not fully
-// defined outside of the platform directories
-#define DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index) \
- ((index) == (descriptor)->register_param_count_) \
- ? (descriptor)->stack_parameter_count_ \
- : (descriptor)->register_params_[(index)]
+
+struct PlatformCallInterfaceDescriptor;
+
+
+struct CallInterfaceDescriptor {
+ CallInterfaceDescriptor()
+ : register_param_count_(-1),
+ register_params_(NULL),
+ param_representations_(NULL),
+ platform_specific_descriptor_(NULL) { }
+
+ bool initialized() const { return register_param_count_ >= 0; }
+
+ int environment_length() const {
+ return register_param_count_;
+ }
+
+ Representation GetParameterRepresentation(int index) const {
+ return param_representations_[index];
+ }
+
+ Register GetParameterRegister(int index) const {
+ return register_params_[index];
+ }
+
+ PlatformCallInterfaceDescriptor* platform_specific_descriptor() const {
+ return platform_specific_descriptor_;
+ }
+
+ int register_param_count_;
+ Register* register_params_;
+ Representation* param_representations_;
+ PlatformCallInterfaceDescriptor* platform_specific_descriptor_;
+};
class HydrogenCodeStub : public CodeStub {
@@ -395,25 +433,14 @@ class RuntimeCallHelper {
};
-// TODO(bmeurer): Move to the StringAddStub declaration once we're
-// done with the translation to a hydrogen code stub.
-enum StringAddFlags {
- // Omit both parameter checks.
- STRING_ADD_CHECK_NONE = 0,
- // Check left parameter.
- STRING_ADD_CHECK_LEFT = 1 << 0,
- // Check right parameter.
- STRING_ADD_CHECK_RIGHT = 1 << 1,
- // Check both parameters.
- STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT
-};
-
} } // namespace v8::internal
#if V8_TARGET_ARCH_IA32
#include "ia32/code-stubs-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/code-stubs-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/code-stubs-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -518,7 +545,7 @@ class FastNewClosureStub : public HydrogenCodeStub {
};
-class FastNewContextStub : public PlatformCodeStub {
+class FastNewContextStub V8_FINAL : public HydrogenCodeStub {
public:
static const int kMaximumSlots = 64;
@@ -526,75 +553,24 @@ class FastNewContextStub : public PlatformCodeStub {
ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
}
- void Generate(MacroAssembler* masm);
-
- private:
- int slots_;
-
- Major MajorKey() { return FastNewContext; }
- int MinorKey() { return slots_; }
-};
-
-
-class FastNewBlockContextStub : public PlatformCodeStub {
- public:
- static const int kMaximumSlots = 64;
-
- explicit FastNewBlockContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int slots_;
-
- Major MajorKey() { return FastNewBlockContext; }
- int MinorKey() { return slots_; }
-};
-
-class StoreGlobalStub : public HydrogenCodeStub {
- public:
- StoreGlobalStub(StrictModeFlag strict_mode, bool is_constant) {
- bit_field_ = StrictModeBits::encode(strict_mode) |
- IsConstantBits::encode(is_constant);
- }
-
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
- virtual Code::Kind GetCodeKind() const { return Code::STORE_IC; }
- virtual InlineCacheState GetICState() { return MONOMORPHIC; }
- virtual Code::ExtraICState GetExtraICState() { return bit_field_; }
-
- bool is_constant() {
- return IsConstantBits::decode(bit_field_);
- }
- void set_is_constant(bool value) {
- bit_field_ = IsConstantBits::update(bit_field_, value);
- }
-
- Representation representation() {
- return Representation::FromKind(RepresentationBits::decode(bit_field_));
- }
- void set_representation(Representation r) {
- bit_field_ = RepresentationBits::update(bit_field_, r.kind());
- }
+ static void InstallDescriptors(Isolate* isolate);
- private:
- virtual int NotMissMinorKey() { return GetExtraICState(); }
- Major MajorKey() { return StoreGlobal; }
+ int slots() const { return slots_; }
- class StrictModeBits: public BitField<StrictModeFlag, 0, 1> {};
- class IsConstantBits: public BitField<bool, 1, 1> {};
- class RepresentationBits: public BitField<Representation::Kind, 2, 8> {};
+ virtual Major MajorKey() V8_OVERRIDE { return FastNewContext; }
+ virtual int NotMissMinorKey() V8_OVERRIDE { return slots_; }
- int bit_field_;
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kFunction = 0;
- DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
+ private:
+ int slots_;
};
@@ -704,8 +680,6 @@ class CreateAllocationSiteStub : public HydrogenCodeStub {
virtual Handle<Code> GenerateCode(Isolate* isolate);
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
-
static void GenerateAheadOfTime(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
@@ -776,6 +750,7 @@ class ArrayConstructorStub: public PlatformCodeStub {
private:
void GenerateDispatchToArrayStub(MacroAssembler* masm,
AllocationSiteOverrideMode mode);
+ virtual void PrintName(StringStream* stream);
virtual CodeStub::Major MajorKey() { return ArrayConstructor; }
virtual int MinorKey() { return argument_count_; }
@@ -867,8 +842,8 @@ class StoreICStub: public ICStub {
: ICStub(kind), strict_mode_(strict_mode) { }
protected:
- virtual Code::ExtraICState GetExtraICState() {
- return strict_mode_;
+ virtual ExtraICState GetExtraICState() {
+ return StoreIC::ComputeExtraICState(strict_mode_);
}
private:
@@ -899,7 +874,6 @@ class HICStub: public HydrogenCodeStub {
virtual InlineCacheState GetICState() { return MONOMORPHIC; }
protected:
- HICStub() { }
class KindBits: public BitField<Code::Kind, 0, 4> {};
virtual Code::Kind kind() const = 0;
};
@@ -908,17 +882,18 @@ class HICStub: public HydrogenCodeStub {
class HandlerStub: public HICStub {
public:
virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
- virtual int GetStubFlags() { return kind(); }
+ virtual ExtraICState GetExtraICState() { return kind(); }
protected:
HandlerStub() : HICStub() { }
+ virtual int NotMissMinorKey() { return bit_field_; }
+ int bit_field_;
};
class LoadFieldStub: public HandlerStub {
public:
- LoadFieldStub(bool inobject, int index, Representation representation)
- : HandlerStub() {
+ LoadFieldStub(bool inobject, int index, Representation representation) {
Initialize(Code::LOAD_IC, inobject, index, representation);
}
@@ -952,7 +927,7 @@ class LoadFieldStub: public HandlerStub {
return UnboxedDoubleBits::decode(bit_field_);
}
- virtual Code::StubType GetStubType() { return Code::FIELD; }
+ virtual Code::StubType GetStubType() { return Code::FAST; }
protected:
LoadFieldStub() : HandlerStub() { }
@@ -974,9 +949,108 @@ class LoadFieldStub: public HandlerStub {
class IndexBits: public BitField<int, 5, 11> {};
class UnboxedDoubleBits: public BitField<bool, 16, 1> {};
virtual CodeStub::Major MajorKey() { return LoadField; }
- virtual int NotMissMinorKey() { return bit_field_; }
+};
+
+
+class StoreGlobalStub : public HandlerStub {
+ public:
+ explicit StoreGlobalStub(bool is_constant, bool check_global) {
+ bit_field_ = IsConstantBits::encode(is_constant) |
+ CheckGlobalBits::encode(check_global);
+ }
+
+ static Handle<HeapObject> global_placeholder(Isolate* isolate) {
+ return isolate->factory()->uninitialized_value();
+ }
+
+ Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate,
+ GlobalObject* global,
+ PropertyCell* cell) {
+ Handle<Code> code = CodeStub::GetCodeCopyFromTemplate(isolate);
+ if (check_global()) {
+ // Replace the placeholder cell and global object map with the actual
+ // global cell and receiver map.
+ code->ReplaceNthObject(1, global_placeholder(isolate)->map(), global);
+ code->ReplaceNthObject(1, isolate->heap()->meta_map(), global->map());
+ }
+ Map* cell_map = isolate->heap()->global_property_cell_map();
+ code->ReplaceNthObject(1, cell_map, cell);
+ return code;
+ }
+
+ virtual Code::Kind kind() const { return Code::STORE_IC; }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ bool is_constant() const {
+ return IsConstantBits::decode(bit_field_);
+ }
+ bool check_global() const {
+ return CheckGlobalBits::decode(bit_field_);
+ }
+ void set_is_constant(bool value) {
+ bit_field_ = IsConstantBits::update(bit_field_, value);
+ }
+
+ Representation representation() {
+ return Representation::FromKind(RepresentationBits::decode(bit_field_));
+ }
+ void set_representation(Representation r) {
+ bit_field_ = RepresentationBits::update(bit_field_, r.kind());
+ }
+
+ private:
+ Major MajorKey() { return StoreGlobal; }
+
+ class IsConstantBits: public BitField<bool, 0, 1> {};
+ class RepresentationBits: public BitField<Representation::Kind, 1, 8> {};
+ class CheckGlobalBits: public BitField<bool, 9, 1> {};
+
+ DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
+};
+
+
+class CallApiFunctionStub : public PlatformCodeStub {
+ public:
+ CallApiFunctionStub(bool is_store,
+ bool call_data_undefined,
+ int argc) {
+ bit_field_ =
+ IsStoreBits::encode(is_store) |
+ CallDataUndefinedBits::encode(call_data_undefined) |
+ ArgumentBits::encode(argc);
+ ASSERT(!is_store || argc == 1);
+ }
+
+ private:
+ virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
+ virtual Major MajorKey() V8_OVERRIDE { return CallApiFunction; }
+ virtual int MinorKey() V8_OVERRIDE { return bit_field_; }
+
+ class IsStoreBits: public BitField<bool, 0, 1> {};
+ class CallDataUndefinedBits: public BitField<bool, 1, 1> {};
+ class ArgumentBits: public BitField<int, 2, Code::kArgumentsBits> {};
int bit_field_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallApiFunctionStub);
+};
+
+
+class CallApiGetterStub : public PlatformCodeStub {
+ public:
+ CallApiGetterStub() {}
+
+ private:
+ virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
+ virtual Major MajorKey() V8_OVERRIDE { return CallApiGetter; }
+ virtual int MinorKey() V8_OVERRIDE { return 0; }
+
+ DISALLOW_COPY_AND_ASSIGN(CallApiGetterStub);
};
@@ -998,177 +1072,218 @@ class KeyedLoadFieldStub: public LoadFieldStub {
};
-class BinaryOpStub: public HydrogenCodeStub {
+class BinaryOpICStub : public HydrogenCodeStub {
public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : HydrogenCodeStub(UNINITIALIZED), op_(op), mode_(mode) {
- ASSERT(op <= LAST_TOKEN && op >= FIRST_TOKEN);
- Initialize();
- }
-
- explicit BinaryOpStub(Code::ExtraICState state)
- : op_(decode_token(OpBits::decode(state))),
- mode_(OverwriteModeField::decode(state)),
- fixed_right_arg_(
- Maybe<int>(HasFixedRightArgBits::decode(state),
- decode_arg_value(FixedRightArgValueBits::decode(state)))),
- left_state_(LeftStateField::decode(state)),
- right_state_(fixed_right_arg_.has_value
- ? ((fixed_right_arg_.value <= Smi::kMaxValue) ? SMI : INT32)
- : RightStateField::decode(state)),
- result_state_(ResultStateField::decode(state)) {
- // We don't deserialize the SSE2 Field, since this is only used to be able
- // to include SSE2 as well as non-SSE2 versions in the snapshot. For code
- // generation we always want it to reflect the current state.
- ASSERT(!fixed_right_arg_.has_value ||
- can_encode_arg_value(fixed_right_arg_.value));
- }
+ BinaryOpICStub(Token::Value op, OverwriteMode mode)
+ : HydrogenCodeStub(UNINITIALIZED), state_(op, mode) {}
- static const int FIRST_TOKEN = Token::BIT_OR;
- static const int LAST_TOKEN = Token::MOD;
+ explicit BinaryOpICStub(const BinaryOpIC::State& state) : state_(state) {}
static void GenerateAheadOfTime(Isolate* isolate);
+
virtual void InitializeInterfaceDescriptor(
- Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
- static void InitializeForIsolate(Isolate* isolate) {
- BinaryOpStub binopStub(UNINITIALIZED);
- binopStub.InitializeInterfaceDescriptor(
- isolate, isolate->code_stub_interface_descriptor(CodeStub::BinaryOp));
+ Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
+ return Code::BINARY_OP_IC;
}
- virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; }
- virtual InlineCacheState GetICState() {
- if (Max(left_state_, right_state_) == NONE) {
- return ::v8::internal::UNINITIALIZED;
- }
- if (Max(left_state_, right_state_) == GENERIC) return MEGAMORPHIC;
- return MONOMORPHIC;
+ virtual InlineCacheState GetICState() V8_FINAL V8_OVERRIDE {
+ return state_.GetICState();
}
- virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
+ virtual ExtraICState GetExtraICState() V8_FINAL V8_OVERRIDE {
+ return state_.GetExtraICState();
+ }
+
+ virtual void VerifyPlatformFeatures(Isolate* isolate) V8_FINAL V8_OVERRIDE {
ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
}
- virtual Code::ExtraICState GetExtraICState() {
- bool sse_field = Max(result_state_, Max(left_state_, right_state_)) > SMI &&
- CpuFeatures::IsSafeForSnapshot(SSE2);
+ virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+
+ const BinaryOpIC::State& state() const { return state_; }
- return OpBits::encode(encode_token(op_))
- | LeftStateField::encode(left_state_)
- | RightStateField::encode(fixed_right_arg_.has_value
- ? NONE : right_state_)
- | ResultStateField::encode(result_state_)
- | HasFixedRightArgBits::encode(fixed_right_arg_.has_value)
- | FixedRightArgValueBits::encode(fixed_right_arg_.has_value
- ? encode_arg_value(
- fixed_right_arg_.value)
- : 0)
- | SSE2Field::encode(sse_field)
- | OverwriteModeField::encode(mode_);
+ virtual void PrintState(StringStream* stream) V8_FINAL V8_OVERRIDE;
+
+ virtual Major MajorKey() V8_OVERRIDE { return BinaryOpIC; }
+ virtual int NotMissMinorKey() V8_FINAL V8_OVERRIDE {
+ return GetExtraICState();
}
- bool CanReuseDoubleBox() {
- return result_state_ <= NUMBER && result_state_ > SMI &&
- ((left_state_ > SMI && left_state_ <= NUMBER &&
- mode_ == OVERWRITE_LEFT) ||
- (right_state_ > SMI && right_state_ <= NUMBER &&
- mode_ == OVERWRITE_RIGHT));
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kLeft = 0;
+ static const int kRight = 1;
+
+ private:
+ static void GenerateAheadOfTime(Isolate* isolate,
+ const BinaryOpIC::State& state);
+
+ BinaryOpIC::State state_;
+
+ DISALLOW_COPY_AND_ASSIGN(BinaryOpICStub);
+};
+
+
+class ArrayPushStub: public PlatformCodeStub {
+ public:
+ ArrayPushStub(ElementsKind kind, int argc) {
+ bit_field_ = ElementsKindBits::encode(kind) | ArgcBits::encode(argc);
}
- bool HasSideEffects(Isolate* isolate) const {
- Handle<Type> left = GetLeftType(isolate);
- Handle<Type> right = GetRightType(isolate);
- return left->Maybe(Type::Receiver()) || right->Maybe(Type::Receiver());
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int arguments_count() { return ArgcBits::decode(bit_field_); }
+ ElementsKind elements_kind() {
+ return ElementsKindBits::decode(bit_field_);
}
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual CodeStub::Major MajorKey() { return ArrayPush; }
+ virtual int MinorKey() { return bit_field_; }
+
+ class ElementsKindBits: public BitField<ElementsKind, 0, 3> {};
+ class ArgcBits: public BitField<int, 3, Code::kArgumentsBits> {};
+
+ int bit_field_;
+};
+
+
+// TODO(bmeurer): Merge this into the BinaryOpICStub once we have proper tail
+// call support for stubs in Hydrogen.
+class BinaryOpICWithAllocationSiteStub V8_FINAL : public PlatformCodeStub {
+ public:
+ explicit BinaryOpICWithAllocationSiteStub(const BinaryOpIC::State& state)
+ : state_(state) {}
- Maybe<Handle<Object> > Result(Handle<Object> left,
- Handle<Object> right,
- Isolate* isolate);
+ static void GenerateAheadOfTime(Isolate* isolate);
- Token::Value operation() const { return op_; }
- OverwriteMode mode() const { return mode_; }
- Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+ Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate,
+ Handle<AllocationSite> allocation_site) {
+ Handle<Code> code = CodeStub::GetCodeCopyFromTemplate(isolate);
+ // Replace the placeholder oddball with the actual allocation site.
+ code->ReplaceNthObject(1, isolate->heap()->oddball_map(), *allocation_site);
+ return code;
+ }
- Handle<Type> GetLeftType(Isolate* isolate) const;
- Handle<Type> GetRightType(Isolate* isolate) const;
- Handle<Type> GetResultType(Isolate* isolate) const;
+ virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
+ return Code::BINARY_OP_IC;
+ }
- void UpdateStatus(Handle<Object> left,
- Handle<Object> right,
- Maybe<Handle<Object> > result);
+ virtual InlineCacheState GetICState() V8_OVERRIDE {
+ return state_.GetICState();
+ }
- void PrintState(StringStream* stream);
+ virtual ExtraICState GetExtraICState() V8_OVERRIDE {
+ return state_.GetExtraICState();
+ }
+
+ virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
+ ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
+ }
+
+ virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
+
+ virtual void PrintState(StringStream* stream) V8_OVERRIDE;
+
+ virtual Major MajorKey() V8_OVERRIDE { return BinaryOpICWithAllocationSite; }
+ virtual int MinorKey() V8_OVERRIDE { return GetExtraICState(); }
private:
- explicit BinaryOpStub(InitializationState state) : HydrogenCodeStub(state),
- op_(Token::ADD),
- mode_(NO_OVERWRITE) {
- Initialize();
- }
- void Initialize();
-
- enum State { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
-
- // We truncate the last bit of the token.
- STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 5));
- class LeftStateField: public BitField<State, 0, 3> {};
- // When fixed right arg is set, we don't need to store the right state.
- // Thus the two fields can overlap.
- class HasFixedRightArgBits: public BitField<bool, 4, 1> {};
- class FixedRightArgValueBits: public BitField<int, 5, 4> {};
- class RightStateField: public BitField<State, 5, 3> {};
- class ResultStateField: public BitField<State, 9, 3> {};
- class SSE2Field: public BitField<bool, 12, 1> {};
- class OverwriteModeField: public BitField<OverwriteMode, 13, 2> {};
- class OpBits: public BitField<int, 15, 5> {};
-
- virtual CodeStub::Major MajorKey() { return BinaryOp; }
- virtual int NotMissMinorKey() { return GetExtraICState(); }
+ static void GenerateAheadOfTime(Isolate* isolate,
+ const BinaryOpIC::State& state);
- static Handle<Type> StateToType(State state,
- Isolate* isolate);
+ BinaryOpIC::State state_;
+
+ DISALLOW_COPY_AND_ASSIGN(BinaryOpICWithAllocationSiteStub);
+};
- static void Generate(Token::Value op,
- State left,
- int right,
- State result,
- OverwriteMode mode,
- Isolate* isolate);
- static void Generate(Token::Value op,
- State left,
- State right,
- State result,
- OverwriteMode mode,
- Isolate* isolate);
+class BinaryOpWithAllocationSiteStub V8_FINAL : public BinaryOpICStub {
+ public:
+ BinaryOpWithAllocationSiteStub(Token::Value op, OverwriteMode mode)
+ : BinaryOpICStub(op, mode) {}
- void UpdateStatus(Handle<Object> object,
- State* state);
+ explicit BinaryOpWithAllocationSiteStub(const BinaryOpIC::State& state)
+ : BinaryOpICStub(state) {}
- bool can_encode_arg_value(int32_t value) const;
- int encode_arg_value(int32_t value) const;
- int32_t decode_arg_value(int value) const;
- int encode_token(Token::Value op) const;
- Token::Value decode_token(int op) const;
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ static void InstallDescriptors(Isolate* isolate);
- bool has_int_result() const {
- return op_ == Token::BIT_XOR || op_ == Token::BIT_AND ||
- op_ == Token::BIT_OR || op_ == Token::SAR || op_ == Token::SHL;
+ virtual Code::Kind GetCodeKind() const V8_FINAL V8_OVERRIDE {
+ return Code::STUB;
}
- const char* StateToName(State state);
+ virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
- void PrintBaseName(StringStream* stream);
+ virtual Major MajorKey() V8_OVERRIDE {
+ return BinaryOpWithAllocationSite;
+ }
- Token::Value op_;
- OverwriteMode mode_;
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kAllocationSite = 0;
+ static const int kLeft = 1;
+ static const int kRight = 2;
+};
+
+
+enum StringAddFlags {
+ // Omit both parameter checks.
+ STRING_ADD_CHECK_NONE = 0,
+ // Check left parameter.
+ STRING_ADD_CHECK_LEFT = 1 << 0,
+ // Check right parameter.
+ STRING_ADD_CHECK_RIGHT = 1 << 1,
+ // Check both parameters.
+ STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT
+};
+
+
+class StringAddStub V8_FINAL : public HydrogenCodeStub {
+ public:
+ StringAddStub(StringAddFlags flags, PretenureFlag pretenure_flag)
+ : bit_field_(StringAddFlagsBits::encode(flags) |
+ PretenureFlagBits::encode(pretenure_flag)) {}
+
+ StringAddFlags flags() const {
+ return StringAddFlagsBits::decode(bit_field_);
+ }
+
+ PretenureFlag pretenure_flag() const {
+ return PretenureFlagBits::decode(bit_field_);
+ }
- Maybe<int> fixed_right_arg_;
- State left_state_;
- State right_state_;
- State result_state_;
+ virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
+ ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
+ }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kLeft = 0;
+ static const int kRight = 1;
+
+ private:
+ class StringAddFlagsBits: public BitField<StringAddFlags, 0, 2> {};
+ class PretenureFlagBits: public BitField<PretenureFlag, 2, 1> {};
+ uint32_t bit_field_;
+
+ virtual Major MajorKey() V8_OVERRIDE { return StringAdd; }
+ virtual int NotMissMinorKey() V8_OVERRIDE { return bit_field_; }
+
+ virtual void PrintBaseName(StringStream* stream) V8_OVERRIDE;
+
+ DISALLOW_COPY_AND_ASSIGN(StringAddStub);
};
@@ -1195,10 +1310,6 @@ class ICCompareStub: public PlatformCodeStub {
CompareIC::State* handler_state,
Token::Value* op);
- static CompareIC::State CompareState(int minor_key) {
- return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
- }
-
virtual InlineCacheState GetICState();
private:
@@ -1243,12 +1354,12 @@ class ICCompareStub: public PlatformCodeStub {
class CompareNilICStub : public HydrogenCodeStub {
public:
- Handle<Type> GetType(Isolate* isolate, Handle<Map> map = Handle<Map>());
- Handle<Type> GetInputType(Isolate* isolate, Handle<Map> map);
+ Type* GetType(Zone* zone, Handle<Map> map = Handle<Map>());
+ Type* GetInputType(Zone* zone, Handle<Map> map);
explicit CompareNilICStub(NilValue nil) : nil_value_(nil) { }
- CompareNilICStub(Code::ExtraICState ic_state,
+ CompareNilICStub(ExtraICState ic_state,
InitializationState init_state = INITIALIZED)
: HydrogenCodeStub(init_state),
nil_value_(NilValueField::decode(ic_state)),
@@ -1285,7 +1396,7 @@ class CompareNilICStub : public HydrogenCodeStub {
virtual Handle<Code> GenerateCode(Isolate* isolate);
- virtual Code::ExtraICState GetExtraICState() {
+ virtual ExtraICState GetExtraICState() {
return NilValueField::encode(nil_value_) |
TypesField::encode(state_.ToIntegral());
}
@@ -1351,7 +1462,6 @@ class CEntryStub : public PlatformCodeStub {
// time, so it's OK to call it from other stubs that can't cope with GC during
// their code generation. On machines that always have gp registers (x64) we
// can generate both variants ahead of time.
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateAheadOfTime(Isolate* isolate);
protected:
@@ -1452,15 +1562,28 @@ class RegExpExecStub: public PlatformCodeStub {
};
-class RegExpConstructResultStub: public PlatformCodeStub {
+class RegExpConstructResultStub V8_FINAL : public HydrogenCodeStub {
public:
RegExpConstructResultStub() { }
- private:
- Major MajorKey() { return RegExpConstructResult; }
- int MinorKey() { return 0; }
+ virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
- void Generate(MacroAssembler* masm);
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ virtual Major MajorKey() V8_OVERRIDE { return RegExpConstructResult; }
+ virtual int NotMissMinorKey() V8_OVERRIDE { return 0; }
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kLength = 0;
+ static const int kIndex = 1;
+ static const int kInput = 2;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RegExpConstructResultStub);
};
@@ -1495,12 +1618,16 @@ class CallFunctionStub: public PlatformCodeStub {
return FlagBits::encode(flags_) | ArgcBits::encode(argc_);
}
- bool ReceiverMightBeImplicit() {
- return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
+ bool RecordCallTarget() {
+ return flags_ == RECORD_CALL_TARGET;
}
- bool RecordCallTarget() {
- return (flags_ & RECORD_CALL_TARGET) != 0;
+ bool CallAsMethod() {
+ return flags_ == CALL_AS_METHOD || flags_ == WRAP_AND_CALL;
+ }
+
+ bool NeedsChecks() {
+ return flags_ != WRAP_AND_CALL;
}
};
@@ -1526,6 +1653,10 @@ class CallConstructStub: public PlatformCodeStub {
bool RecordCallTarget() {
return (flags_ & RECORD_CALL_TARGET) != 0;
}
+
+ bool CallAsMethod() {
+ return (flags_ & CALL_AS_METHOD) != 0;
+ }
};
@@ -1703,27 +1834,27 @@ class StringCharAtGenerator {
};
-class AllowStubCallsScope {
+class KeyedLoadDictionaryElementStub : public HydrogenCodeStub {
public:
- AllowStubCallsScope(MacroAssembler* masm, bool allow)
- : masm_(masm), previous_allow_(masm->allow_stub_calls()) {
- masm_->set_allow_stub_calls(allow);
- }
- ~AllowStubCallsScope() {
- masm_->set_allow_stub_calls(previous_allow_);
- }
+ KeyedLoadDictionaryElementStub() {}
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
- MacroAssembler* masm_;
- bool previous_allow_;
+ Major MajorKey() { return KeyedLoadElement; }
+ int NotMissMinorKey() { return DICTIONARY_ELEMENTS; }
- DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope);
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
};
-class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
+class KeyedLoadDictionaryElementPlatformStub : public PlatformCodeStub {
public:
- KeyedLoadDictionaryElementStub() {}
+ KeyedLoadDictionaryElementPlatformStub() {}
void Generate(MacroAssembler* masm);
@@ -1731,7 +1862,7 @@ class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
Major MajorKey() { return KeyedLoadElement; }
int MinorKey() { return DICTIONARY_ELEMENTS; }
- DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementPlatformStub);
};
@@ -1742,23 +1873,21 @@ class DoubleToIStub : public PlatformCodeStub {
int offset,
bool is_truncating,
bool skip_fastpath = false) : bit_field_(0) {
- bit_field_ = SourceRegisterBits::encode(source.code_) |
- DestinationRegisterBits::encode(destination.code_) |
+ bit_field_ = SourceRegisterBits::encode(source.code()) |
+ DestinationRegisterBits::encode(destination.code()) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
SkipFastPathBits::encode(skip_fastpath) |
SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
- CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
+ CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
}
Register source() {
- Register result = { SourceRegisterBits::decode(bit_field_) };
- return result;
+ return Register::from_code(SourceRegisterBits::decode(bit_field_));
}
Register destination() {
- Register result = { DestinationRegisterBits::decode(bit_field_) };
- return result;
+ return Register::from_code(DestinationRegisterBits::decode(bit_field_));
}
bool is_truncating() {
@@ -1916,26 +2045,17 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
};
-enum ContextCheckMode {
- CONTEXT_CHECK_REQUIRED,
- CONTEXT_CHECK_NOT_REQUIRED,
- LAST_CONTEXT_CHECK_MODE = CONTEXT_CHECK_NOT_REQUIRED
-};
-
-
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
- ArrayConstructorStubBase(ElementsKind kind, ContextCheckMode context_mode,
+ ArrayConstructorStubBase(ElementsKind kind,
AllocationSiteOverrideMode override_mode) {
// It only makes sense to override local allocation site behavior
// if there is a difference between the global allocation site policy
// for an ElementsKind and the desired usage of the stub.
- ASSERT(!(FLAG_track_allocation_sites &&
- override_mode == DISABLE_ALLOCATION_SITES) ||
+ ASSERT(override_mode != DISABLE_ALLOCATION_SITES ||
AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
bit_field_ = ElementsKindBits::encode(kind) |
- AllocationSiteOverrideModeBits::encode(override_mode) |
- ContextCheckModeBits::encode(context_mode);
+ AllocationSiteOverrideModeBits::encode(override_mode);
}
ElementsKind elements_kind() const {
@@ -1946,33 +2066,25 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
return AllocationSiteOverrideModeBits::decode(bit_field_);
}
- ContextCheckMode context_mode() const {
- return ContextCheckModeBits::decode(bit_field_);
- }
-
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE {
- // We only pre-generate stubs that verify correct context
- return context_mode() == CONTEXT_CHECK_REQUIRED;
- }
-
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void InstallDescriptors(Isolate* isolate);
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kConstructor = 0;
- static const int kPropertyCell = 1;
+ static const int kAllocationSite = 1;
+
+ protected:
+ void BasePrintName(const char* name, StringStream* stream);
private:
int NotMissMinorKey() { return bit_field_; }
// Ensure data fits within available bits.
STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1);
- STATIC_ASSERT(LAST_CONTEXT_CHECK_MODE == 1);
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class AllocationSiteOverrideModeBits: public
BitField<AllocationSiteOverrideMode, 8, 1> {}; // NOLINT
- class ContextCheckModeBits: public BitField<ContextCheckMode, 9, 1> {};
uint32_t bit_field_;
DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase);
@@ -1983,9 +2095,8 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
public:
ArrayNoArgumentConstructorStub(
ElementsKind kind,
- ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
- : ArrayConstructorStubBase(kind, context_mode, override_mode) {
+ : ArrayConstructorStubBase(kind, override_mode) {
}
virtual Handle<Code> GenerateCode(Isolate* isolate);
@@ -1997,6 +2108,10 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
private:
Major MajorKey() { return ArrayNoArgumentConstructor; }
+ virtual void PrintName(StringStream* stream) {
+ BasePrintName("ArrayNoArgumentConstructorStub", stream);
+ }
+
DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub);
};
@@ -2005,9 +2120,8 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
public:
ArraySingleArgumentConstructorStub(
ElementsKind kind,
- ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
- : ArrayConstructorStubBase(kind, context_mode, override_mode) {
+ : ArrayConstructorStubBase(kind, override_mode) {
}
virtual Handle<Code> GenerateCode(Isolate* isolate);
@@ -2019,6 +2133,10 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
private:
Major MajorKey() { return ArraySingleArgumentConstructor; }
+ virtual void PrintName(StringStream* stream) {
+ BasePrintName("ArraySingleArgumentConstructorStub", stream);
+ }
+
DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub);
};
@@ -2027,9 +2145,8 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
public:
ArrayNArgumentsConstructorStub(
ElementsKind kind,
- ContextCheckMode context_mode = CONTEXT_CHECK_REQUIRED,
AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
- : ArrayConstructorStubBase(kind, context_mode, override_mode) {
+ : ArrayConstructorStubBase(kind, override_mode) {
}
virtual Handle<Code> GenerateCode(Isolate* isolate);
@@ -2041,6 +2158,10 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
private:
Major MajorKey() { return ArrayNArgumentsConstructor; }
+ virtual void PrintName(StringStream* stream) {
+ BasePrintName("ArrayNArgumentsConstructorStub", stream);
+ }
+
DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub);
};
@@ -2051,7 +2172,6 @@ class InternalArrayConstructorStubBase : public HydrogenCodeStub {
kind_ = kind;
}
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void InstallDescriptors(Isolate* isolate);
@@ -2196,7 +2316,7 @@ class ToBooleanStub: public HydrogenCodeStub {
explicit ToBooleanStub(Types types = Types())
: types_(types) { }
- explicit ToBooleanStub(Code::ExtraICState state)
+ explicit ToBooleanStub(ExtraICState state)
: types_(static_cast<byte>(state)) { }
bool UpdateStatus(Handle<Object> object);
@@ -2223,7 +2343,7 @@ class ToBooleanStub: public HydrogenCodeStub {
return ToBooleanStub(UNINITIALIZED).GetCode(isolate);
}
- virtual Code::ExtraICState GetExtraICState() {
+ virtual ExtraICState GetExtraICState() {
return types_.ToIntegral();
}
@@ -2315,8 +2435,6 @@ class StubFailureTrampolineStub : public PlatformCodeStub {
explicit StubFailureTrampolineStub(StubFunctionMode function_mode)
: fp_registers_(CanUseFPRegisters()), function_mode_(function_mode) {}
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
-
static void GenerateAheadOfTime(Isolate* isolate);
private:
@@ -2361,6 +2479,12 @@ class ProfileEntryHookStub : public PlatformCodeStub {
DISALLOW_COPY_AND_ASSIGN(ProfileEntryHookStub);
};
+
+class CallDescriptors {
+ public:
+ static void InitializeForIsolate(Isolate* isolate);
+};
+
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 573ddc6ce7..f6c36682de 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -81,7 +81,7 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
CodeStub::MajorName(info->code_stub()->MajorKey(), true);
PrintF("%s", name == NULL ? "<unknown>" : name);
} else {
- PrintF("%s", *info->function()->debug_name()->ToCString());
+ PrintF("%s", info->function()->debug_name()->ToCString().get());
}
PrintF("]\n");
}
@@ -89,12 +89,12 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
#ifdef DEBUG
if (!info->IsStub() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter(info->isolate()).PrintProgram(info->function()));
+ PrettyPrinter(info->zone()).PrintProgram(info->function()));
}
if (!info->IsStub() && print_ast) {
PrintF("--- AST ---\n%s\n",
- AstPrinter(info->isolate()).PrintProgram(info->function()));
+ AstPrinter(info->zone()).PrintProgram(info->function()));
}
#endif // DEBUG
}
@@ -136,10 +136,12 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
FunctionLiteral* function = info->function();
bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION ||
code->kind() == Code::FUNCTION;
+
+ CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
if (print_source) {
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
+ PrintF(tracing_scope.file(), "--- Raw source ---\n");
ConsStringIteratorOp op;
StringCharacterStream stream(String::cast(script->source()),
&op,
@@ -149,31 +151,38 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
int source_len =
function->end_position() - function->start_position() + 1;
for (int i = 0; i < source_len; i++) {
- if (stream.HasMore()) PrintF("%c", stream.GetNext());
+ if (stream.HasMore()) {
+ PrintF(tracing_scope.file(), "%c", stream.GetNext());
+ }
}
- PrintF("\n\n");
+ PrintF(tracing_scope.file(), "\n\n");
}
}
if (info->IsOptimizing()) {
if (FLAG_print_unopt_code) {
- PrintF("--- Unoptimized code ---\n");
+ PrintF(tracing_scope.file(), "--- Unoptimized code ---\n");
info->closure()->shared()->code()->Disassemble(
- *function->debug_name()->ToCString());
+ function->debug_name()->ToCString().get(), tracing_scope.file());
}
- PrintF("--- Optimized code ---\n");
+ PrintF(tracing_scope.file(), "--- Optimized code ---\n");
+ PrintF(tracing_scope.file(),
+ "optimization_id = %d\n", info->optimization_id());
} else {
- PrintF("--- Code ---\n");
+ PrintF(tracing_scope.file(), "--- Code ---\n");
}
if (print_source) {
- PrintF("source_position = %d\n", function->start_position());
+ PrintF(tracing_scope.file(),
+ "source_position = %d\n", function->start_position());
}
if (info->IsStub()) {
CodeStub::Major major_key = info->code_stub()->MajorKey();
- code->Disassemble(CodeStub::MajorName(major_key, false));
+ code->Disassemble(CodeStub::MajorName(major_key, false),
+ tracing_scope.file());
} else {
- code->Disassemble(*function->debug_name()->ToCString());
+ code->Disassemble(function->debug_name()->ToCString().get(),
+ tracing_scope.file());
}
- PrintF("--- End code ---\n");
+ PrintF(tracing_scope.file(), "--- End code ---\n");
}
#endif // ENABLE_DISASSEMBLER
}
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index ea20296916..be76de8aea 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -30,7 +30,6 @@
#include "code-stubs.h"
#include "runtime.h"
-#include "type-info.h"
// Include the declaration of the architecture defined class CodeGenerator.
// The contract to the shared code is that the the CodeGenerator is a subclass
@@ -73,6 +72,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#include "ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/codegen-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/codegen-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -84,12 +85,39 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
namespace v8 {
namespace internal {
+
+class CompilationInfo;
+
+
+class CodeGenerator {
+ public:
+ // Printing of AST, etc. as requested by flags.
+ static void MakeCodePrologue(CompilationInfo* info, const char* kind);
+
+ // Allocate and install the code.
+ static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
+ Code::Flags flags,
+ CompilationInfo* info);
+
+ // Print the code after compiling it.
+ static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
+
+ static bool RecordPositions(MacroAssembler* masm,
+ int pos,
+ bool right_here = false);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+
// Results of the library implementation of transcendental functions may differ
// from the one we use in our generated code. Therefore we use the same
// generated code both in runtime and compiled code.
typedef double (*UnaryMathFunction)(double x);
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
UnaryMathFunction CreateExpFunction();
UnaryMathFunction CreateSqrtFunction();
@@ -112,6 +140,8 @@ class ElementsTransitionGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
};
+static const int kNumberDictionaryProbes = 4;
+
} } // namespace v8::internal
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index 01537e87b0..1f7aef4f0d 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -40,6 +40,22 @@ var $WeakSet = global.WeakSet;
// supported internally but required for Harmony sets and maps.
var undefined_sentinel = {};
+
+// Map and Set uses SameValueZero which means that +0 and -0 should be treated
+// as the same value.
+function NormalizeKey(key) {
+ if (IS_UNDEFINED(key)) {
+ return undefined_sentinel;
+ }
+
+ if (key === 0) {
+ return 0;
+ }
+
+ return key;
+}
+
+
// -------------------------------------------------------------------
// Harmony Set
@@ -57,10 +73,7 @@ function SetAdd(key) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.add', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- return %SetAdd(this, key);
+ return %SetAdd(this, NormalizeKey(key));
}
@@ -69,10 +82,7 @@ function SetHas(key) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.has', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- return %SetHas(this, key);
+ return %SetHas(this, NormalizeKey(key));
}
@@ -81,9 +91,7 @@ function SetDelete(key) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.delete', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
+ key = NormalizeKey(key);
if (%SetHas(this, key)) {
%SetDelete(this, key);
return true;
@@ -151,10 +159,7 @@ function MapGet(key) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.get', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- return %MapGet(this, key);
+ return %MapGet(this, NormalizeKey(key));
}
@@ -163,10 +168,7 @@ function MapSet(key, value) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.set', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- return %MapSet(this, key, value);
+ return %MapSet(this, NormalizeKey(key), value);
}
@@ -175,10 +177,7 @@ function MapHas(key) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.has', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- return %MapHas(this, key);
+ return %MapHas(this, NormalizeKey(key));
}
@@ -187,10 +186,7 @@ function MapDelete(key) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.delete', this]);
}
- if (IS_UNDEFINED(key)) {
- key = undefined_sentinel;
- }
- return %MapDelete(this, key);
+ return %MapDelete(this, NormalizeKey(key));
}
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index fffe5da71d..a69ef4c765 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -421,7 +421,6 @@ Handle<SharedFunctionInfo> CompilationCache::LookupScript(
Handle<SharedFunctionInfo> CompilationCache::LookupEval(
Handle<String> source,
Handle<Context> context,
- bool is_global,
LanguageMode language_mode,
int scope_position) {
if (!IsEnabled()) {
@@ -429,7 +428,7 @@ Handle<SharedFunctionInfo> CompilationCache::LookupEval(
}
Handle<SharedFunctionInfo> result;
- if (is_global) {
+ if (context->IsNativeContext()) {
result = eval_global_.Lookup(
source, context, language_mode, scope_position);
} else {
@@ -454,9 +453,7 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
void CompilationCache::PutScript(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
- if (!IsEnabled()) {
- return;
- }
+ if (!IsEnabled()) return;
script_.Put(source, context, function_info);
}
@@ -464,15 +461,12 @@ void CompilationCache::PutScript(Handle<String> source,
void CompilationCache::PutEval(Handle<String> source,
Handle<Context> context,
- bool is_global,
Handle<SharedFunctionInfo> function_info,
int scope_position) {
- if (!IsEnabled()) {
- return;
- }
+ if (!IsEnabled()) return;
HandleScope scope(isolate());
- if (is_global) {
+ if (context->IsNativeContext()) {
eval_global_.Put(source, context, function_info, scope_position);
} else {
ASSERT(scope_position != RelocInfo::kNoPosition);
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 414e09e655..ead52b5fa4 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -222,7 +222,6 @@ class CompilationCache {
// contain a script for the given source string.
Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
Handle<Context> context,
- bool is_global,
LanguageMode language_mode,
int scope_position);
@@ -241,7 +240,6 @@ class CompilationCache {
// with the shared function info. This may overwrite an existing mapping.
void PutEval(Handle<String> source,
Handle<Context> context,
- bool is_global,
Handle<SharedFunctionInfo> function_info,
int scope_position);
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index e86baa02aa..d466778069 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -59,7 +59,9 @@ CompilationInfo::CompilationInfo(Handle<Script> script,
: flags_(LanguageModeField::encode(CLASSIC_MODE)),
script_(script),
osr_ast_id_(BailoutId::None()),
- osr_pc_offset_(0) {
+ parameter_count_(0),
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(script->GetIsolate(), BASE, zone);
}
@@ -70,7 +72,9 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
osr_ast_id_(BailoutId::None()),
- osr_pc_offset_(0) {
+ parameter_count_(0),
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -83,7 +87,9 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
script_(Handle<Script>(Script::cast(shared_info_->script()))),
context_(closure->context()),
osr_ast_id_(BailoutId::None()),
- osr_pc_offset_(0) {
+ parameter_count_(0),
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -94,7 +100,9 @@ CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
: flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
osr_ast_id_(BailoutId::None()),
- osr_pc_offset_(0) {
+ parameter_count_(0),
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
@@ -184,8 +192,12 @@ void CompilationInfo::RollbackDependencies() {
int CompilationInfo::num_parameters() const {
- ASSERT(!IsStub());
- return scope()->num_parameters();
+ if (IsStub()) {
+ ASSERT(parameter_count_ > 0);
+ return parameter_count_;
+ } else {
+ return scope()->num_parameters();
+ }
}
@@ -203,8 +215,7 @@ Code::Flags CompilationInfo::flags() const {
return Code::ComputeFlags(code_stub()->GetCodeKind(),
code_stub()->GetICState(),
code_stub()->GetExtraICState(),
- code_stub()->GetStubType(),
- code_stub()->GetStubFlags());
+ code_stub()->GetStubType());
} else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
@@ -227,8 +238,7 @@ void CompilationInfo::DisableOptimization() {
// profiler, so they trigger their own optimization when they're called
// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
bool CompilationInfo::ShouldSelfOptimize() {
- return FLAG_self_optimization &&
- FLAG_crankshaft &&
+ return FLAG_crankshaft &&
!function()->flags()->Contains(kDontSelfOptimize) &&
!function()->dont_optimize() &&
function()->scope()->AllowsLazyCompilation() &&
@@ -236,89 +246,16 @@ bool CompilationInfo::ShouldSelfOptimize() {
}
-// Determine whether to use the full compiler for all code. If the flag
-// --always-full-compiler is specified this is the case. For the virtual frame
-// based compiler the full compiler is also used if a debugger is connected, as
-// the code from the full compiler supports mode precise break points. For the
-// crankshaft adaptive compiler debugging the optimized code is not possible at
-// all. However crankshaft support recompilation of functions, so in this case
-// the full compiler need not be be used if a debugger is attached, but only if
-// break points has actually been set.
-static bool IsDebuggerActive(Isolate* isolate) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- return isolate->use_crankshaft() ?
- isolate->debug()->has_break_points() :
- isolate->debugger()->IsDebuggerActive();
-#else
- return false;
-#endif
-}
-
-
-static bool AlwaysFullCompiler(Isolate* isolate) {
- return FLAG_always_full_compiler || IsDebuggerActive(isolate);
+void CompilationInfo::PrepareForCompilation(Scope* scope) {
+ ASSERT(scope_ == NULL);
+ scope_ = scope;
+ function()->ProcessFeedbackSlots(isolate_);
}
-void RecompileJob::RecordOptimizationStats() {
- Handle<JSFunction> function = info()->closure();
- if (!function->IsOptimized()) {
- // Concurrent recompilation and OSR may race. Increment only once.
- int opt_count = function->shared()->opt_count();
- function->shared()->set_opt_count(opt_count + 1);
- }
- double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
- double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
- double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
- if (FLAG_trace_opt) {
- PrintF("[optimizing ");
- function->ShortPrint();
- PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
- ms_codegen);
- }
- if (FLAG_trace_opt_stats) {
- static double compilation_time = 0.0;
- static int compiled_functions = 0;
- static int code_size = 0;
-
- compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
- compiled_functions++;
- code_size += function->shared()->SourceSize();
- PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
- compiled_functions,
- code_size,
- compilation_time);
- }
- if (FLAG_hydrogen_stats) {
- isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_create_graph_,
- time_taken_to_optimize_,
- time_taken_to_codegen_);
- }
-}
-
-
-// A return value of true indicates the compilation pipeline is still
-// going, not necessarily that we optimized the code.
-static bool MakeCrankshaftCode(CompilationInfo* info) {
- RecompileJob job(info);
- RecompileJob::Status status = job.CreateGraph();
-
- if (status != RecompileJob::SUCCEEDED) {
- return status != RecompileJob::FAILED;
- }
- status = job.OptimizeGraph();
- if (status != RecompileJob::SUCCEEDED) {
- status = job.AbortOptimization();
- return status != RecompileJob::FAILED;
- }
- status = job.GenerateAndInstallCode();
- return status != RecompileJob::FAILED;
-}
-
-
-class HOptimizedGraphBuilderWithPotisions: public HOptimizedGraphBuilder {
+class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
public:
- explicit HOptimizedGraphBuilderWithPotisions(CompilationInfo* info)
+ explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
: HOptimizedGraphBuilder(info) {
}
@@ -348,12 +285,30 @@ class HOptimizedGraphBuilderWithPotisions: public HOptimizedGraphBuilder {
}
MODULE_NODE_LIST(DEF_VISIT)
DECLARATION_NODE_LIST(DEF_VISIT)
- AUXILIARY_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
};
-RecompileJob::Status RecompileJob::CreateGraph() {
+// Determine whether to use the full compiler for all code. If the flag
+// --always-full-compiler is specified this is the case. For the virtual frame
+// based compiler the full compiler is also used if a debugger is connected, as
+// the code from the full compiler supports mode precise break points. For the
+// crankshaft adaptive compiler debugging the optimized code is not possible at
+// all. However crankshaft support recompilation of functions, so in this case
+// the full compiler need not be be used if a debugger is attached, but only if
+// break points has actually been set.
+static bool IsDebuggerActive(Isolate* isolate) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ return isolate->use_crankshaft() ?
+ isolate->debug()->has_break_points() :
+ isolate->debugger()->IsDebuggerActive();
+#else
+ return false;
+#endif
+}
+
+
+OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
ASSERT(isolate()->use_crankshaft());
ASSERT(info()->IsOptimizing());
ASSERT(!info()->IsCompilingForDebugging());
@@ -369,18 +324,15 @@ RecompileJob::Status RecompileJob::CreateGraph() {
// Fall back to using the full code generator if it's not possible
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
- if (AlwaysFullCompiler(isolate())) {
- info()->AbortOptimization();
- return SetLastStatus(BAILED_OUT);
- }
+ if (FLAG_always_full_compiler) return AbortOptimization();
+ if (IsDebuggerActive(isolate())) return AbortOptimization(kDebuggerIsActive);
// Limit the number of times we re-compile a functions with
// the optimizing compiler.
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
if (info()->opt_count() > kMaxOptCount) {
- info()->set_bailout_reason(kOptimizedTooManyTimes);
- return AbortOptimization();
+ return AbortAndDisableOptimization(kOptimizedTooManyTimes);
}
// Due to an encoding limit on LUnallocated operands in the Lithium
@@ -393,21 +345,18 @@ RecompileJob::Status RecompileJob::CreateGraph() {
const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
Scope* scope = info()->scope();
if ((scope->num_parameters() + 1) > parameter_limit) {
- info()->set_bailout_reason(kTooManyParameters);
- return AbortOptimization();
+ return AbortAndDisableOptimization(kTooManyParameters);
}
const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
if (info()->is_osr() &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
- info()->set_bailout_reason(kTooManyParametersLocals);
- return AbortOptimization();
+ return AbortAndDisableOptimization(kTooManyParametersLocals);
}
// Take --hydrogen-filter into account.
if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
- info()->AbortOptimization();
- return SetLastStatus(BAILED_OUT);
+ return AbortOptimization(kHydrogenFilter);
}
// Recompile the unoptimized version of the code if the current version
@@ -424,7 +373,7 @@ RecompileJob::Status RecompileJob::CreateGraph() {
// Note that we use the same AST that we will use for generating the
// optimized code.
unoptimized.SetFunction(info()->function());
- unoptimized.SetScope(info()->scope());
+ unoptimized.PrepareForCompilation(info()->scope());
unoptimized.SetContext(info()->context());
if (should_recompile) unoptimized.EnableDeoptimizationSupport();
bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
@@ -452,22 +401,22 @@ RecompileJob::Status RecompileJob::CreateGraph() {
if (FLAG_trace_hydrogen) {
Handle<String> name = info()->function()->debug_name();
PrintF("-----------------------------------------------------------\n");
- PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
+ PrintF("Compiling method %s using hydrogen\n", name->ToCString().get());
isolate()->GetHTracer()->TraceCompilation(info());
}
// Type-check the function.
AstTyper::Run(info());
- graph_builder_ = FLAG_emit_opt_code_positions
- ? new(info()->zone()) HOptimizedGraphBuilderWithPotisions(info())
+ graph_builder_ = FLAG_hydrogen_track_positions
+ ? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info())
: new(info()->zone()) HOptimizedGraphBuilder(info());
Timer t(this, &time_taken_to_create_graph_);
+ info()->set_this_has_uses(false);
graph_ = graph_builder_->CreateGraph();
if (isolate()->has_pending_exception()) {
- info()->SetCode(Handle<Code>::null());
return SetLastStatus(FAILED);
}
@@ -477,24 +426,21 @@ RecompileJob::Status RecompileJob::CreateGraph() {
ASSERT(!graph_builder_->inline_bailout() || graph_ == NULL);
if (graph_ == NULL) {
if (graph_builder_->inline_bailout()) {
- info_->AbortOptimization();
- return SetLastStatus(BAILED_OUT);
- } else {
return AbortOptimization();
+ } else {
+ return AbortAndDisableOptimization();
}
}
if (info()->HasAbortedDueToDependencyChange()) {
- info_->set_bailout_reason(kBailedOutDueToDependencyChange);
- info_->AbortOptimization();
- return SetLastStatus(BAILED_OUT);
+ return AbortOptimization(kBailedOutDueToDependencyChange);
}
return SetLastStatus(SUCCEEDED);
}
-RecompileJob::Status RecompileJob::OptimizeGraph() {
+OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
@@ -504,20 +450,19 @@ RecompileJob::Status RecompileJob::OptimizeGraph() {
Timer t(this, &time_taken_to_optimize_);
ASSERT(graph_ != NULL);
BailoutReason bailout_reason = kNoReason;
- if (!graph_->Optimize(&bailout_reason)) {
- if (bailout_reason == kNoReason) graph_builder_->Bailout(bailout_reason);
- return SetLastStatus(BAILED_OUT);
- } else {
+
+ if (graph_->Optimize(&bailout_reason)) {
chunk_ = LChunk::NewChunk(graph_);
- if (chunk_ == NULL) {
- return SetLastStatus(BAILED_OUT);
- }
+ if (chunk_ != NULL) return SetLastStatus(SUCCEEDED);
+ } else if (bailout_reason != kNoReason) {
+ graph_builder_->Bailout(bailout_reason);
}
- return SetLastStatus(SUCCEEDED);
+
+ return AbortOptimization();
}
-RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
+OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
ASSERT(last_status() == SUCCEEDED);
ASSERT(!info()->HasAbortedDueToDependencyChange());
DisallowCodeDependencyChange no_dependency_change;
@@ -533,9 +478,9 @@ RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
Handle<Code> optimized_code = chunk_->Codegen();
if (optimized_code.is_null()) {
if (info()->bailout_reason() == kNoReason) {
- info()->set_bailout_reason(kCodeGenerationFailed);
+ info_->set_bailout_reason(kCodeGenerationFailed);
}
- return AbortOptimization();
+ return AbortAndDisableOptimization();
}
info()->SetCode(optimized_code);
}
@@ -546,54 +491,40 @@ RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
}
-static bool GenerateCode(CompilationInfo* info) {
- bool is_optimizing = info->isolate()->use_crankshaft() &&
- !info->IsCompilingForDebugging() &&
- info->IsOptimizing();
- if (is_optimizing) {
- Logger::TimerEventScope timer(
- info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
- return MakeCrankshaftCode(info);
- } else {
- if (info->IsOptimizing()) {
- // Have the CompilationInfo decide if the compilation should be
- // BASE or NONOPT.
- info->DisableOptimization();
- }
- Logger::TimerEventScope timer(
- info->isolate(), Logger::TimerEventScope::v8_compile_full_code);
- return FullCodeGenerator::MakeCode(info);
+void OptimizedCompileJob::RecordOptimizationStats() {
+ Handle<JSFunction> function = info()->closure();
+ if (!function->IsOptimized()) {
+ // Concurrent recompilation and OSR may race. Increment only once.
+ int opt_count = function->shared()->opt_count();
+ function->shared()->set_opt_count(opt_count + 1);
}
-}
-
-
-static bool MakeCode(CompilationInfo* info) {
- // Precondition: code has been parsed. Postcondition: the code field in
- // the compilation info is set if compilation succeeded.
- ASSERT(info->function() != NULL);
- return Rewriter::Rewrite(info) && Scope::Analyze(info) && GenerateCode(info);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
- // Precondition: code has been parsed. Postcondition: the code field in
- // the compilation info is set if compilation succeeded.
- bool succeeded = MakeCode(info);
- if (!info->shared_info().is_null()) {
- Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(),
- info->zone());
- info->shared_info()->set_scope_info(*scope_info);
+ double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
+ double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
+ double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
+ if (FLAG_trace_opt) {
+ PrintF("[optimizing ");
+ function->ShortPrint();
+ PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
+ ms_codegen);
}
- return succeeded;
-}
-#endif
-
+ if (FLAG_trace_opt_stats) {
+ static double compilation_time = 0.0;
+ static int compiled_functions = 0;
+ static int code_size = 0;
-static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
- bool allow_lazy_without_ctx = false) {
- return LiveEditFunctionTracker::IsActive(info->isolate()) ||
- (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
+ compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
+ compiled_functions++;
+ code_size += function->shared()->SourceSize();
+ PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
+ compiled_functions,
+ code_size,
+ compilation_time);
+ }
+ if (FLAG_hydrogen_stats) {
+ isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_create_graph_,
+ time_taken_to_optimize_,
+ time_taken_to_codegen_);
+ }
}
@@ -624,54 +555,250 @@ void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
}
-static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
+static void UpdateSharedFunctionInfo(CompilationInfo* info) {
+ // Update the shared function info with the compiled code and the
+ // scope info. Please note, that the order of the shared function
+ // info initialization is important since set_scope_info might
+ // trigger a GC, causing the ASSERT below to be invalid if the code
+ // was flushed. By setting the code object last we avoid this.
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::Create(info->scope(), info->zone());
+ shared->set_scope_info(*scope_info);
+
+ Handle<Code> code = info->code();
+ CHECK(code->kind() == Code::FUNCTION);
+ shared->ReplaceCode(*code);
+ if (shared->optimization_disabled()) code->set_optimizable(false);
+
+ // Set the expected number of properties for instances.
+ FunctionLiteral* lit = info->function();
+ int expected = lit->expected_property_count();
+ SetExpectedNofPropertiesFromEstimate(shared, expected);
+
+ // Check the function has compiled code.
+ ASSERT(shared->is_compiled());
+ shared->set_dont_optimize_reason(lit->dont_optimize_reason());
+ shared->set_dont_inline(lit->flags()->Contains(kDontInline));
+ shared->set_ast_node_count(lit->ast_node_count());
+ shared->set_language_mode(lit->language_mode());
+}
+
+
+// Sets the function info on a function.
+// The start_position points to the first '(' character after the function name
+// in the full script source. When counting characters in the script source the
+// the first character is number 0 (not 1).
+static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
+ FunctionLiteral* lit,
+ bool is_toplevel,
+ Handle<Script> script) {
+ function_info->set_length(lit->parameter_count());
+ function_info->set_formal_parameter_count(lit->parameter_count());
+ function_info->set_script(*script);
+ function_info->set_function_token_position(lit->function_token_position());
+ function_info->set_start_position(lit->start_position());
+ function_info->set_end_position(lit->end_position());
+ function_info->set_is_expression(lit->is_expression());
+ function_info->set_is_anonymous(lit->is_anonymous());
+ function_info->set_is_toplevel(is_toplevel);
+ function_info->set_inferred_name(*lit->inferred_name());
+ function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
+ function_info->set_allows_lazy_compilation_without_context(
+ lit->AllowsLazyCompilationWithoutContext());
+ function_info->set_language_mode(lit->language_mode());
+ function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
+ function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
+ function_info->set_ast_node_count(lit->ast_node_count());
+ function_info->set_is_function(lit->is_function());
+ function_info->set_dont_optimize_reason(lit->dont_optimize_reason());
+ function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
+ function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
+ function_info->set_is_generator(lit->is_generator());
+}
+
+
+static bool CompileUnoptimizedCode(CompilationInfo* info) {
+ ASSERT(info->function() != NULL);
+ if (!Rewriter::Rewrite(info)) return false;
+ if (!Scope::Analyze(info)) return false;
+ ASSERT(info->scope() != NULL);
+
+ if (!FullCodeGenerator::MakeCode(info)) {
+ Isolate* isolate = info->isolate();
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return false;
+ }
+ return true;
+}
+
+
+static Handle<Code> GetUnoptimizedCodeCommon(CompilationInfo* info) {
+ VMState<COMPILER> state(info->isolate());
+ PostponeInterruptsScope postpone(info->isolate());
+ if (!Parser::Parse(info)) return Handle<Code>::null();
+ LanguageMode language_mode = info->function()->language_mode();
+ info->SetLanguageMode(language_mode);
+
+ if (!CompileUnoptimizedCode(info)) return Handle<Code>::null();
+ Compiler::RecordFunctionCompilation(
+ Logger::LAZY_COMPILE_TAG, info, info->shared_info());
+ UpdateSharedFunctionInfo(info);
+ ASSERT_EQ(Code::FUNCTION, info->code()->kind());
+ return info->code();
+}
+
+
+Handle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) {
+ ASSERT(!function->GetIsolate()->has_pending_exception());
+ ASSERT(!function->is_compiled());
+ if (function->shared()->is_compiled()) {
+ return Handle<Code>(function->shared()->code());
+ }
+
+ CompilationInfoWithZone info(function);
+ Handle<Code> result = GetUnoptimizedCodeCommon(&info);
+ ASSERT_EQ(result.is_null(), info.isolate()->has_pending_exception());
+
+ if (FLAG_always_opt &&
+ !result.is_null() &&
+ info.isolate()->use_crankshaft() &&
+ !info.shared_info()->optimization_disabled() &&
+ !info.isolate()->DebuggerHasBreakPoints()) {
+ Handle<Code> opt_code = Compiler::GetOptimizedCode(
+ function, result, Compiler::NOT_CONCURRENT);
+ if (!opt_code.is_null()) result = opt_code;
+ }
+
+ return result;
+}
+
+
+Handle<Code> Compiler::GetUnoptimizedCode(Handle<SharedFunctionInfo> shared) {
+ ASSERT(!shared->GetIsolate()->has_pending_exception());
+ ASSERT(!shared->is_compiled());
+
+ CompilationInfoWithZone info(shared);
+ Handle<Code> result = GetUnoptimizedCodeCommon(&info);
+ ASSERT_EQ(result.is_null(), info.isolate()->has_pending_exception());
+ return result;
+}
+
+
+bool Compiler::EnsureCompiled(Handle<JSFunction> function,
+ ClearExceptionFlag flag) {
+ if (function->is_compiled()) return true;
+ Handle<Code> code = Compiler::GetUnoptimizedCode(function);
+ if (code.is_null()) {
+ if (flag == CLEAR_EXCEPTION) {
+ function->GetIsolate()->clear_pending_exception();
+ }
+ return false;
+ }
+ function->ReplaceCode(*code);
+ ASSERT(function->is_compiled());
+ return true;
+}
+
+
+// Compile full code for debugging. This code will have debug break slots
+// and deoptimization information. Deoptimization information is required
+// in case that an optimized version of this function is still activated on
+// the stack. It will also make sure that the full code is compiled with
+// the same flags as the previous version, that is flags which can change
+// the code generated. The current method of mapping from already compiled
+// full code without debug break slots to full code with debug break slots
+// depends on the generated code is otherwise exactly the same.
+// If compilation fails, just keep the existing code.
+Handle<Code> Compiler::GetCodeForDebugging(Handle<JSFunction> function) {
+ CompilationInfoWithZone info(function);
+ Isolate* isolate = info.isolate();
+ VMState<COMPILER> state(isolate);
+
+ ASSERT(!isolate->has_pending_exception());
+ Handle<Code> old_code(function->shared()->code());
+ ASSERT(old_code->kind() == Code::FUNCTION);
+ ASSERT(!old_code->has_debug_break_slots());
+
+ info.MarkCompilingForDebugging();
+ if (old_code->is_compiled_optimizable()) {
+ info.EnableDeoptimizationSupport();
+ } else {
+ info.MarkNonOptimizable();
+ }
+ Handle<Code> new_code = GetUnoptimizedCodeCommon(&info);
+ if (new_code.is_null()) {
+ isolate->clear_pending_exception();
+ } else {
+ ASSERT_EQ(old_code->is_compiled_optimizable(),
+ new_code->is_compiled_optimizable());
+ }
+ return new_code;
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void Compiler::CompileForLiveEdit(Handle<Script> script) {
+ // TODO(635): support extensions.
+ CompilationInfoWithZone info(script);
+ PostponeInterruptsScope postpone(info.isolate());
+ VMState<COMPILER> state(info.isolate());
+
+ info.MarkAsGlobal();
+ if (!Parser::Parse(&info)) return;
+ LanguageMode language_mode = info.function()->language_mode();
+ info.SetLanguageMode(language_mode);
+
+ LiveEditFunctionTracker tracker(info.isolate(), info.function());
+ if (!CompileUnoptimizedCode(&info)) return;
+ if (!info.shared_info().is_null()) {
+ Handle<ScopeInfo> scope_info = ScopeInfo::Create(info.scope(),
+ info.zone());
+ info.shared_info()->set_scope_info(*scope_info);
+ }
+ tracker.RecordRootFunctionInfo(info.code());
+}
+#endif
+
+
+static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
+ bool allow_lazy_without_ctx = false) {
+ return LiveEditFunctionTracker::IsActive(info->isolate()) ||
+ (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
+}
+
+
+static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
PostponeInterruptsScope postpone(isolate);
-
ASSERT(!isolate->native_context().is_null());
Handle<Script> script = info->script();
+
// TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
FixedArray* array = isolate->native_context()->embedder_data();
script->set_context_data(array->get(0));
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (info->is_eval()) {
- script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
- // For eval scripts add information on the function from which eval was
- // called.
- if (info->is_eval()) {
- StackTraceFrameIterator it(isolate);
- if (!it.done()) {
- script->set_eval_from_shared(it.frame()->function()->shared());
- Code* code = it.frame()->LookupCode();
- int offset = static_cast<int>(
- it.frame()->pc() - code->instruction_start());
- script->set_eval_from_instructions_offset(Smi::FromInt(offset));
- }
- }
- }
-
- // Notify debugger
isolate->debugger()->OnBeforeCompile(script);
#endif
- // Only allow non-global compiles for eval.
ASSERT(info->is_eval() || info->is_global());
- {
- Parser parser(info);
- if ((info->pre_parse_data() != NULL ||
- String::cast(script->source())->length() > FLAG_min_preparse_length) &&
- !DebuggerWantsEagerCompilation(info))
- parser.set_allow_lazy(true);
- if (!parser.Parse()) {
+
+ bool parse_allow_lazy =
+ (info->pre_parse_data() != NULL ||
+ String::cast(script->source())->length() > FLAG_min_preparse_length) &&
+ !DebuggerWantsEagerCompilation(info);
+
+ Handle<SharedFunctionInfo> result;
+
+ { VMState<COMPILER> state(info->isolate());
+ if (!Parser::Parse(info, parse_allow_lazy)) {
return Handle<SharedFunctionInfo>::null();
}
- }
- FunctionLiteral* lit = info->function();
- LiveEditFunctionTracker live_edit_tracker(isolate, lit);
- Handle<SharedFunctionInfo> result;
- {
+ FunctionLiteral* lit = info->function();
+ LiveEditFunctionTracker live_edit_tracker(isolate, lit);
+
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
@@ -681,48 +808,32 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
HistogramTimerScope timer(rate);
// Compile the code.
- if (!MakeCode(info)) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ if (!CompileUnoptimizedCode(info)) {
return Handle<SharedFunctionInfo>::null();
}
// Allocate function.
ASSERT(!info->code().is_null());
- result =
- isolate->factory()->NewSharedFunctionInfo(
- lit->name(),
- lit->materialized_literal_count(),
- lit->is_generator(),
- info->code(),
- ScopeInfo::Create(info->scope(), info->zone()));
+ result = isolate->factory()->NewSharedFunctionInfo(
+ lit->name(),
+ lit->materialized_literal_count(),
+ lit->is_generator(),
+ info->code(),
+ ScopeInfo::Create(info->scope(), info->zone()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
- Compiler::SetFunctionInfo(result, lit, true, script);
-
- if (script->name()->IsString()) {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- info,
- String::cast(script->name())));
- GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
- script,
- info->code(),
- info));
- } else {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- info,
- isolate->heap()->empty_string()));
- GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
- }
+ SetFunctionInfo(result, lit, true, script);
+
+ Handle<String> script_name = script->name()->IsString()
+ ? Handle<String>(String::cast(script->name()))
+ : isolate->factory()->empty_string();
+ Logger::LogEventsAndTags log_tag = info->is_eval()
+ ? Logger::EVAL_TAG
+ : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
+
+ PROFILE(isolate, CodeCreateEvent(
+ log_tag, *info->code(), *result, info, *script_name));
+ GDBJIT(AddCode(script_name, script, info->code(), info));
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
@@ -731,37 +842,90 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
lit->expected_property_count());
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+
+ live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
}
#ifdef ENABLE_DEBUGGER_SUPPORT
- // Notify debugger
- isolate->debugger()->OnAfterCompile(
- script, Debugger::NO_AFTER_COMPILE_FLAGS);
+ isolate->debugger()->OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif
- live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
-
return result;
}
-Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin,
- Handle<Context> context,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag natives) {
+Handle<JSFunction> Compiler::GetFunctionFromEval(Handle<String> source,
+ Handle<Context> context,
+ LanguageMode language_mode,
+ ParseRestriction restriction,
+ int scope_position) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
- isolate->counters()->total_load_size()->Increment(source_length);
+ isolate->counters()->total_eval_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
- // The VM is in the COMPILER state until exiting this function.
- VMState<COMPILER> state(isolate);
+ CompilationCache* compilation_cache = isolate->compilation_cache();
+ Handle<SharedFunctionInfo> shared_info = compilation_cache->LookupEval(
+ source, context, language_mode, scope_position);
+
+ if (shared_info.is_null()) {
+ Handle<Script> script = isolate->factory()->NewScript(source);
+ CompilationInfoWithZone info(script);
+ info.MarkAsEval();
+ if (context->IsNativeContext()) info.MarkAsGlobal();
+ info.SetLanguageMode(language_mode);
+ info.SetParseRestriction(restriction);
+ info.SetContext(context);
+
+#if ENABLE_DEBUGGER_SUPPORT
+ Debug::RecordEvalCaller(script);
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+ shared_info = CompileToplevel(&info);
+
+ if (shared_info.is_null()) {
+ return Handle<JSFunction>::null();
+ } else {
+ // Explicitly disable optimization for eval code. We're not yet prepared
+ // to handle eval-code in the optimizing compiler.
+ shared_info->DisableOptimization(kEval);
+
+ // If caller is strict mode, the result must be in strict mode or
+ // extended mode as well, but not the other way around. Consider:
+ // eval("'use strict'; ...");
+ ASSERT(language_mode != STRICT_MODE || !shared_info->is_classic_mode());
+ // If caller is in extended mode, the result must also be in
+ // extended mode.
+ ASSERT(language_mode != EXTENDED_MODE ||
+ shared_info->is_extended_mode());
+ if (!shared_info->dont_cache()) {
+ compilation_cache->PutEval(
+ source, context, shared_info, scope_position);
+ }
+ }
+ } else if (shared_info->ic_age() != isolate->heap()->global_ic_age()) {
+ shared_info->ResetForNewContext(isolate->heap()->global_ic_age());
+ }
+
+ return isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared_info, context, NOT_TENURED);
+}
+
+
+Handle<SharedFunctionInfo> Compiler::CompileScript(Handle<String> source,
+ Handle<Object> script_name,
+ int line_offset,
+ int column_offset,
+ bool is_shared_cross_origin,
+ Handle<Context> context,
+ v8::Extension* extension,
+ ScriptDataImpl* pre_data,
+ Handle<Object> script_data,
+ NativesFlag natives) {
+ Isolate* isolate = source->GetIsolate();
+ int source_length = source->length();
+ isolate->counters()->total_load_size()->Increment(source_length);
+ isolate->counters()->total_compile_size()->Increment(source_length);
CompilationCache* compilation_cache = isolate->compilation_cache();
@@ -810,14 +974,12 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
if (FLAG_use_strict) {
info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
}
- result = MakeFunctionInfo(&info);
+ result = CompileToplevel(&info);
if (extension == NULL && !result.is_null() && !result->dont_cache()) {
compilation_cache->PutScript(source, context, result);
}
- } else {
- if (result->ic_age() != isolate->heap()->global_ic_age()) {
+ } else if (result->ic_age() != isolate->heap()->global_ic_age()) {
result->ResetForNewContext(isolate->heap()->global_ic_age());
- }
}
if (result.is_null()) isolate->ReportPendingMessages();
@@ -825,130 +987,86 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
}
-Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- LanguageMode language_mode,
- ParseRestriction restriction,
- int scope_position) {
- Isolate* isolate = source->GetIsolate();
- int source_length = source->length();
- isolate->counters()->total_eval_size()->Increment(source_length);
- isolate->counters()->total_compile_size()->Increment(source_length);
-
- // The VM is in the COMPILER state until exiting this function.
- VMState<COMPILER> state(isolate);
-
- // Do a lookup in the compilation cache; if the entry is not there, invoke
- // the compiler and add the result to the cache.
- Handle<SharedFunctionInfo> result;
- CompilationCache* compilation_cache = isolate->compilation_cache();
- result = compilation_cache->LookupEval(source,
- context,
- is_global,
- language_mode,
- scope_position);
+Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
+ Handle<Script> script) {
+ // Precondition: code has been parsed and scopes have been analyzed.
+ CompilationInfoWithZone info(script);
+ info.SetFunction(literal);
+ info.PrepareForCompilation(literal->scope());
+ info.SetLanguageMode(literal->scope()->language_mode());
- if (result.is_null()) {
- // Create a script object describing the script to be compiled.
- Handle<Script> script = isolate->factory()->NewScript(source);
- CompilationInfoWithZone info(script);
- info.MarkAsEval();
- if (is_global) info.MarkAsGlobal();
- info.SetLanguageMode(language_mode);
- info.SetParseRestriction(restriction);
- info.SetContext(context);
- result = MakeFunctionInfo(&info);
- if (!result.is_null()) {
- // Explicitly disable optimization for eval code. We're not yet prepared
- // to handle eval-code in the optimizing compiler.
- result->DisableOptimization(kEval);
+ Isolate* isolate = info.isolate();
+ Factory* factory = isolate->factory();
+ LiveEditFunctionTracker live_edit_tracker(isolate, literal);
+ // Determine if the function can be lazily compiled. This is necessary to
+ // allow some of our builtin JS files to be lazily compiled. These
+ // builtins cannot be handled lazily by the parser, since we have to know
+ // if a function uses the special natives syntax, which is something the
+ // parser records.
+ // If the debugger requests compilation for break points, we cannot be
+ // aggressive about lazy compilation, because it might trigger compilation
+ // of functions without an outer context when setting a breakpoint through
+ // Debug::FindSharedFunctionInfoInScript.
+ bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
+ bool allow_lazy = literal->AllowsLazyCompilation() &&
+ !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
- // If caller is strict mode, the result must be in strict mode or
- // extended mode as well, but not the other way around. Consider:
- // eval("'use strict'; ...");
- ASSERT(language_mode != STRICT_MODE || !result->is_classic_mode());
- // If caller is in extended mode, the result must also be in
- // extended mode.
- ASSERT(language_mode != EXTENDED_MODE ||
- result->is_extended_mode());
- if (!result->dont_cache()) {
- compilation_cache->PutEval(
- source, context, is_global, result, scope_position);
- }
- }
+ // Generate code
+ Handle<ScopeInfo> scope_info;
+ if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
+ Handle<Code> code = isolate->builtins()->CompileUnoptimized();
+ info.SetCode(code);
+ scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
+ } else if (FullCodeGenerator::MakeCode(&info)) {
+ ASSERT(!info.code().is_null());
+ scope_info = ScopeInfo::Create(info.scope(), info.zone());
} else {
- if (result->ic_age() != isolate->heap()->global_ic_age()) {
- result->ResetForNewContext(isolate->heap()->global_ic_age());
- }
+ return Handle<SharedFunctionInfo>::null();
}
+ // Create a shared function info object.
+ Handle<SharedFunctionInfo> result =
+ factory->NewSharedFunctionInfo(literal->name(),
+ literal->materialized_literal_count(),
+ literal->is_generator(),
+ info.code(),
+ scope_info);
+ SetFunctionInfo(result, literal, false, script);
+ RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
+ result->set_allows_lazy_compilation(allow_lazy);
+ result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
+
+ // Set the expected number of properties for instances and return
+ // the resulting function.
+ SetExpectedNofPropertiesFromEstimate(result,
+ literal->expected_property_count());
+ live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
return result;
}
-static bool InstallFullCode(CompilationInfo* info) {
- // Update the shared function info with the compiled code and the
- // scope info. Please note, that the order of the shared function
- // info initialization is important since set_scope_info might
- // trigger a GC, causing the ASSERT below to be invalid if the code
- // was flushed. By setting the code object last we avoid this.
- Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<Code> code = info->code();
- CHECK(code->kind() == Code::FUNCTION);
- Handle<JSFunction> function = info->closure();
- Handle<ScopeInfo> scope_info =
- ScopeInfo::Create(info->scope(), info->zone());
- shared->set_scope_info(*scope_info);
- shared->ReplaceCode(*code);
- if (!function.is_null()) {
- function->ReplaceCode(*code);
- ASSERT(!function->IsOptimized());
- }
-
- // Set the expected number of properties for instances.
- FunctionLiteral* lit = info->function();
- int expected = lit->expected_property_count();
- SetExpectedNofPropertiesFromEstimate(shared, expected);
-
- // Check the function has compiled code.
- ASSERT(shared->is_compiled());
- shared->set_dont_optimize_reason(lit->dont_optimize_reason());
- shared->set_dont_inline(lit->flags()->Contains(kDontInline));
- shared->set_ast_node_count(lit->ast_node_count());
-
- if (info->isolate()->use_crankshaft() &&
- !function.is_null() &&
- !shared->optimization_disabled()) {
- // If we're asked to always optimize, we compile the optimized
- // version of the function right away - unless the debugger is
- // active as it makes no sense to compile optimized code then.
- if (FLAG_always_opt &&
- !info->isolate()->DebuggerHasBreakPoints()) {
- CompilationInfoWithZone optimized(function);
- optimized.SetOptimizing(BailoutId::None());
- return Compiler::CompileLazy(&optimized);
+static Handle<Code> GetCodeFromOptimizedCodeMap(Handle<JSFunction> function,
+ BailoutId osr_ast_id) {
+ if (FLAG_cache_optimized_code) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ DisallowHeapAllocation no_gc;
+ int index = shared->SearchOptimizedCodeMap(
+ function->context()->native_context(), osr_ast_id);
+ if (index > 0) {
+ if (FLAG_trace_opt) {
+ PrintF("[found optimized code for ");
+ function->ShortPrint();
+ if (!osr_ast_id.IsNone()) {
+ PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
+ }
+ PrintF("]\n");
+ }
+ FixedArray* literals = shared->GetLiteralsFromOptimizedCodeMap(index);
+ if (literals != NULL) function->set_literals(literals);
+ return Handle<Code>(shared->GetCodeFromOptimizedCodeMap(index));
}
}
- return true;
-}
-
-
-static void InstallCodeCommon(CompilationInfo* info) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<Code> code = info->code();
- ASSERT(!code.is_null());
-
- // Set optimizable to false if this is disallowed by the shared
- // function info, e.g., we might have flushed the code and must
- // reset this bit when lazy compiling the code again.
- if (shared->optimization_disabled()) code->set_optimizable(false);
-
- if (shared->code() == *code) {
- // Do not send compilation event for the same code twice.
- return;
- }
- Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+ return Handle<Code>::null();
}
@@ -956,329 +1074,172 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<Code> code = info->code();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
- // Cache non-OSR optimized code.
- if (FLAG_cache_optimized_code && !info->is_osr()) {
+ // Cache optimized code.
+ if (FLAG_cache_optimized_code) {
Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
Handle<FixedArray> literals(function->literals());
Handle<Context> native_context(function->context()->native_context());
SharedFunctionInfo::AddToOptimizedCodeMap(
- shared, native_context, code, literals);
+ shared, native_context, code, literals, info->osr_ast_id());
}
}
-static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
- if (!info->IsOptimizing()) return false; // Nothing to look up.
+static bool CompileOptimizedPrologue(CompilationInfo* info) {
+ if (!Parser::Parse(info)) return false;
+ LanguageMode language_mode = info->function()->language_mode();
+ info->SetLanguageMode(language_mode);
- // Lookup non-OSR optimized code.
- if (FLAG_cache_optimized_code && !info->is_osr()) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
- Handle<JSFunction> function = info->closure();
- ASSERT(!function.is_null());
- Handle<Context> native_context(function->context()->native_context());
- int index = shared->SearchOptimizedCodeMap(*native_context);
- if (index > 0) {
- if (FLAG_trace_opt) {
- PrintF("[found optimized code for ");
- function->ShortPrint();
- PrintF("]\n");
- }
- // Caching of optimized code enabled and optimized code found.
- shared->InstallFromOptimizedCodeMap(*function, index);
- return true;
- }
- }
- return false;
+ if (!Rewriter::Rewrite(info)) return false;
+ if (!Scope::Analyze(info)) return false;
+ ASSERT(info->scope() != NULL);
+ return true;
}
-bool Compiler::CompileLazy(CompilationInfo* info) {
- Isolate* isolate = info->isolate();
-
- // The VM is in the COMPILER state until exiting this function.
- VMState<COMPILER> state(isolate);
-
- PostponeInterruptsScope postpone(isolate);
-
- Handle<SharedFunctionInfo> shared = info->shared_info();
- int compiled_size = shared->end_position() - shared->start_position();
- isolate->counters()->total_compile_size()->Increment(compiled_size);
-
- if (InstallCodeFromOptimizedCodeMap(info)) return true;
-
- // Generate the AST for the lazily compiled function.
- if (Parser::Parse(info)) {
- // Measure how long it takes to do the lazy compilation; only take the
- // rest of the function into account to avoid overlap with the lazy
- // parsing statistics.
- HistogramTimerScope timer(isolate->counters()->compile_lazy());
-
- // After parsing we know the function's language mode. Remember it.
- LanguageMode language_mode = info->function()->language_mode();
- info->SetLanguageMode(language_mode);
- shared->set_language_mode(language_mode);
-
- // Compile the code.
- if (!MakeCode(info)) {
- if (!isolate->has_pending_exception()) {
- isolate->StackOverflow();
- }
- } else {
- InstallCodeCommon(info);
-
- if (info->IsOptimizing()) {
- // Optimized code successfully created.
- Handle<Code> code = info->code();
- ASSERT(shared->scope_info() != ScopeInfo::Empty(isolate));
- // TODO(titzer): Only replace the code if it was not an OSR compile.
- info->closure()->ReplaceCode(*code);
- InsertCodeIntoOptimizedCodeMap(info);
- return true;
- } else if (!info->is_osr()) {
- // Compilation failed. Replace with full code if not OSR compile.
- return InstallFullCode(info);
- }
- }
- }
+static bool GetOptimizedCodeNow(CompilationInfo* info) {
+ if (!CompileOptimizedPrologue(info)) return false;
- ASSERT(info->code().is_null());
- return false;
+ Logger::TimerEventScope timer(
+ info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
+
+ OptimizedCompileJob job(info);
+ if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED) return false;
+ if (job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED) return false;
+ if (job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) return false;
+
+ // Success!
+ ASSERT(!info->isolate()->has_pending_exception());
+ InsertCodeIntoOptimizedCodeMap(info);
+ Compiler::RecordFunctionCompilation(
+ Logger::LAZY_COMPILE_TAG, info, info->shared_info());
+ return true;
}
-bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
- uint32_t osr_pc_offset) {
- bool compiling_for_osr = (osr_pc_offset != 0);
-
- Isolate* isolate = closure->GetIsolate();
- // Here we prepare compile data for the concurrent recompilation thread, but
- // this still happens synchronously and interrupts execution.
- Logger::TimerEventScope timer(
- isolate, Logger::TimerEventScope::v8_recompile_synchronous);
-
+static bool GetOptimizedCodeLater(CompilationInfo* info) {
+ Isolate* isolate = info->isolate();
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
- closure->PrintName();
- PrintF(" on next run.\n");
+ info->closure()->PrintName();
+ PrintF(" later.\n");
}
return false;
}
- SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
- Handle<SharedFunctionInfo> shared = info->shared_info();
+ CompilationHandleScope handle_scope(info);
+ if (!CompileOptimizedPrologue(info)) return false;
+ info->SaveHandles(); // Copy handles to the compilation handle scope.
+
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_recompile_synchronous);
+
+ OptimizedCompileJob* job = new(info->zone()) OptimizedCompileJob(info);
+ OptimizedCompileJob::Status status = job->CreateGraph();
+ if (status != OptimizedCompileJob::SUCCEEDED) return false;
+ isolate->optimizing_compiler_thread()->QueueForOptimization(job);
- if (compiling_for_osr) {
- BailoutId osr_ast_id =
- shared->code()->TranslatePcOffsetToAstId(osr_pc_offset);
- ASSERT(!osr_ast_id.IsNone());
- info->SetOptimizing(osr_ast_id);
- info->set_osr_pc_offset(osr_pc_offset);
-
- if (FLAG_trace_osr) {
- PrintF("[COSR - attempt to queue ");
- closure->PrintName();
- PrintF(" at AST id %d]\n", osr_ast_id.ToInt());
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Queued ");
+ info->closure()->PrintName();
+ if (info->is_osr()) {
+ PrintF(" for concurrent OSR at %d.\n", info->osr_ast_id().ToInt());
+ } else {
+ PrintF(" for concurrent optimization.\n");
}
- } else {
- info->SetOptimizing(BailoutId::None());
}
+ return true;
+}
+
+Handle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
+ Handle<Code> current_code,
+ ConcurrencyMode mode,
+ BailoutId osr_ast_id) {
+ Handle<Code> cached_code = GetCodeFromOptimizedCodeMap(function, osr_ast_id);
+ if (!cached_code.is_null()) return cached_code;
+
+ SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(function));
+ Isolate* isolate = info->isolate();
VMState<COMPILER> state(isolate);
+ ASSERT(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ ASSERT_NE(ScopeInfo::Empty(isolate), shared->scope_info());
int compiled_size = shared->end_position() - shared->start_position();
isolate->counters()->total_compile_size()->Increment(compiled_size);
+ current_code->set_profiler_ticks(0);
- {
- CompilationHandleScope handle_scope(*info);
+ info->SetOptimizing(osr_ast_id, current_code);
- if (!compiling_for_osr && InstallCodeFromOptimizedCodeMap(*info)) {
- return true;
+ if (mode == CONCURRENT) {
+ if (GetOptimizedCodeLater(info.get())) {
+ info.Detach(); // The background recompile job owns this now.
+ return isolate->builtins()->InOptimizationQueue();
}
+ } else {
+ if (GetOptimizedCodeNow(info.get())) return info->code();
+ }
- if (Parser::Parse(*info)) {
- LanguageMode language_mode = info->function()->language_mode();
- info->SetLanguageMode(language_mode);
- shared->set_language_mode(language_mode);
- info->SaveHandles();
-
- if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
- RecompileJob* job = new(info->zone()) RecompileJob(*info);
- RecompileJob::Status status = job->CreateGraph();
- if (status == RecompileJob::SUCCEEDED) {
- info.Detach();
- shared->code()->set_profiler_ticks(0);
- isolate->optimizing_compiler_thread()->QueueForOptimization(job);
- ASSERT(!isolate->has_pending_exception());
- return true;
- } else if (status == RecompileJob::BAILED_OUT) {
- isolate->clear_pending_exception();
- InstallFullCode(*info);
- }
- }
- }
+ // Failed.
+ if (FLAG_trace_opt) {
+ PrintF("[failed to optimize ");
+ function->PrintName();
+ PrintF(": %s]\n", GetBailoutReason(info->bailout_reason()));
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
- return false;
+ return Handle<Code>::null();
}
-Handle<Code> Compiler::InstallOptimizedCode(RecompileJob* job) {
+Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
+ // Take ownership of compilation info. Deleting compilation info
+ // also tears down the zone and the recompile job.
SmartPointer<CompilationInfo> info(job->info());
- // The function may have already been optimized by OSR. Simply continue.
- // Except when OSR already disabled optimization for some reason.
- if (info->shared_info()->optimization_disabled()) {
- info->AbortOptimization();
- InstallFullCode(*info);
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** aborting optimization for ");
- info->closure()->PrintName();
- PrintF(" as it has been disabled.\n");
- }
- ASSERT(!info->closure()->IsInRecompileQueue());
- return Handle<Code>::null();
- }
-
Isolate* isolate = info->isolate();
+
VMState<COMPILER> state(isolate);
Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
- // If crankshaft succeeded, install the optimized code else install
- // the unoptimized code.
- RecompileJob::Status status = job->last_status();
- if (info->HasAbortedDueToDependencyChange()) {
- info->set_bailout_reason(kBailedOutDueToDependencyChange);
- status = job->AbortOptimization();
- } else if (status != RecompileJob::SUCCEEDED) {
- info->set_bailout_reason(kFailedBailedOutLastTime);
- status = job->AbortOptimization();
- } else if (isolate->DebuggerHasBreakPoints()) {
- info->set_bailout_reason(kDebuggerIsActive);
- status = job->AbortOptimization();
- } else {
- status = job->GenerateAndInstallCode();
- ASSERT(status == RecompileJob::SUCCEEDED ||
- status == RecompileJob::BAILED_OUT);
- }
- InstallCodeCommon(*info);
- if (status == RecompileJob::SUCCEEDED) {
- Handle<Code> code = info->code();
- ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
- info->closure()->ReplaceCode(*code);
- if (info->shared_info()->SearchOptimizedCodeMap(
- info->closure()->context()->native_context()) == -1) {
- InsertCodeIntoOptimizedCodeMap(*info);
- }
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Optimized code for ");
- info->closure()->PrintName();
- PrintF(" installed.\n");
- }
- } else {
- info->AbortOptimization();
- InstallFullCode(*info);
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ shared->code()->set_profiler_ticks(0);
+
+ // 1) Optimization may have failed.
+ // 2) The function may have already been optimized by OSR. Simply continue.
+ // Except when OSR already disabled optimization for some reason.
+ // 3) The code may have already been invalidated due to dependency change.
+ // 4) Debugger may have been activated.
+
+ if (job->last_status() != OptimizedCompileJob::SUCCEEDED ||
+ shared->optimization_disabled() ||
+ info->HasAbortedDueToDependencyChange() ||
+ isolate->DebuggerHasBreakPoints()) {
+ return Handle<Code>::null();
}
- // Optimized code is finally replacing unoptimized code. Reset the latter's
- // profiler ticks to prevent too soon re-opt after a deopt.
- info->shared_info()->code()->set_profiler_ticks(0);
- ASSERT(!info->closure()->IsInRecompileQueue());
- return (status == RecompileJob::SUCCEEDED) ? info->code()
- : Handle<Code>::null();
-}
-
-
-Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
- Handle<Script> script) {
- // Precondition: code has been parsed and scopes have been analyzed.
- CompilationInfoWithZone info(script);
- info.SetFunction(literal);
- info.SetScope(literal->scope());
- info.SetLanguageMode(literal->scope()->language_mode());
- Isolate* isolate = info.isolate();
- Factory* factory = isolate->factory();
- LiveEditFunctionTracker live_edit_tracker(isolate, literal);
- // Determine if the function can be lazily compiled. This is necessary to
- // allow some of our builtin JS files to be lazily compiled. These
- // builtins cannot be handled lazily by the parser, since we have to know
- // if a function uses the special natives syntax, which is something the
- // parser records.
- // If the debugger requests compilation for break points, we cannot be
- // aggressive about lazy compilation, because it might trigger compilation
- // of functions without an outer context when setting a breakpoint through
- // Debug::FindSharedFunctionInfoInScript.
- bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
- bool allow_lazy = literal->AllowsLazyCompilation() &&
- !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
-
- Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate));
-
- // Generate code
- if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
- Handle<Code> code = isolate->builtins()->LazyCompile();
- info.SetCode(code);
- } else if (GenerateCode(&info)) {
- ASSERT(!info.code().is_null());
- scope_info = ScopeInfo::Create(info.scope(), info.zone());
- } else {
- return Handle<SharedFunctionInfo>::null();
+ if (job->GenerateCode() != OptimizedCompileJob::SUCCEEDED) {
+ return Handle<Code>::null();
}
- // Create a shared function info object.
- Handle<SharedFunctionInfo> result =
- factory->NewSharedFunctionInfo(literal->name(),
- literal->materialized_literal_count(),
- literal->is_generator(),
- info.code(),
- scope_info);
- SetFunctionInfo(result, literal, false, script);
- RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
- result->set_allows_lazy_compilation(allow_lazy);
- result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
-
- // Set the expected number of properties for instances and return
- // the resulting function.
- SetExpectedNofPropertiesFromEstimate(result,
- literal->expected_property_count());
- live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
- return result;
-}
+ Compiler::RecordFunctionCompilation(
+ Logger::LAZY_COMPILE_TAG, info.get(), shared);
+ if (info->shared_info()->SearchOptimizedCodeMap(
+ info->context()->native_context(), info->osr_ast_id()) == -1) {
+ InsertCodeIntoOptimizedCodeMap(info.get());
+ }
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Optimized code for ");
+ info->closure()->PrintName();
+ PrintF(" generated.\n");
+ }
-// Sets the function info on a function.
-// The start_position points to the first '(' character after the function name
-// in the full script source. When counting characters in the script source the
-// the first character is number 0 (not 1).
-void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script) {
- function_info->set_length(lit->parameter_count());
- function_info->set_formal_parameter_count(lit->parameter_count());
- function_info->set_script(*script);
- function_info->set_function_token_position(lit->function_token_position());
- function_info->set_start_position(lit->start_position());
- function_info->set_end_position(lit->end_position());
- function_info->set_is_expression(lit->is_expression());
- function_info->set_is_anonymous(lit->is_anonymous());
- function_info->set_is_toplevel(is_toplevel);
- function_info->set_inferred_name(*lit->inferred_name());
- function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
- function_info->set_allows_lazy_compilation_without_context(
- lit->AllowsLazyCompilationWithoutContext());
- function_info->set_language_mode(lit->language_mode());
- function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
- function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
- function_info->set_ast_node_count(lit->ast_node_count());
- function_info->set_is_function(lit->is_function());
- function_info->set_dont_optimize_reason(lit->dont_optimize_reason());
- function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
- function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
- function_info->set_is_generator(lit->is_generator());
+ return Handle<Code>(*info->code());
}
@@ -1295,31 +1256,18 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
info->isolate()->cpu_profiler()->is_profiling()) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
- if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
+ if (code.is_identical_to(info->isolate()->builtins()->CompileUnoptimized()))
return;
int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
int column_num =
GetScriptColumnNumber(script, shared->start_position()) + 1;
USE(line_num);
- if (script->name()->IsString()) {
- PROFILE(info->isolate(),
- CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
- *code,
- *shared,
- info,
- String::cast(script->name()),
- line_num,
- column_num));
- } else {
- PROFILE(info->isolate(),
- CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
- *code,
- *shared,
- info,
- info->isolate()->heap()->empty_string(),
- line_num,
- column_num));
- }
+ String* script_name = script->name()->IsString()
+ ? String::cast(script->name())
+ : info->isolate()->heap()->empty_string();
+ Logger::LogEventsAndTags log_tag = Logger::ToNativeByScript(tag, *script);
+ PROFILE(info->isolate(), CodeCreateEvent(
+ log_tag, *code, *shared, info, script_name, line_num, column_num));
}
GDBJIT(AddCode(Handle<String>(shared->DebugName()),
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 2d9e52a8e3..f7ff09c57d 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -84,7 +84,7 @@ class CompilationInfo {
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
- uint32_t osr_pc_offset() const { return osr_pc_offset_; }
+ Handle<Code> unoptimized_code() const { return unoptimized_code_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
int num_heap_slots() const;
@@ -98,6 +98,17 @@ class CompilationInfo {
ASSERT(!is_lazy());
flags_ |= IsGlobal::encode(true);
}
+ void set_parameter_count(int parameter_count) {
+ ASSERT(IsStub());
+ parameter_count_ = parameter_count;
+ }
+
+ void set_this_has_uses(bool has_no_uses) {
+ this_has_uses_ = has_no_uses;
+ }
+ bool this_has_uses() {
+ return this_has_uses_;
+ }
void SetLanguageMode(LanguageMode language_mode) {
ASSERT(this->language_mode() == CLASSIC_MODE ||
this->language_mode() == language_mode ||
@@ -164,10 +175,8 @@ class CompilationInfo {
ASSERT(function_ == NULL);
function_ = literal;
}
- void SetScope(Scope* scope) {
- ASSERT(scope_ == NULL);
- scope_ = scope;
- }
+ // When the scope is applied, we may have deferred work to do on the function.
+ void PrepareForCompilation(Scope* scope);
void SetGlobalScope(Scope* global_scope) {
ASSERT(global_scope_ == NULL);
global_scope_ = global_scope;
@@ -184,19 +193,16 @@ class CompilationInfo {
void SetContext(Handle<Context> context) {
context_ = context;
}
- void MarkCompilingForDebugging(Handle<Code> current_code) {
- ASSERT(mode_ != OPTIMIZE);
- ASSERT(current_code->kind() == Code::FUNCTION);
+
+ void MarkCompilingForDebugging() {
flags_ |= IsCompilingForDebugging::encode(true);
- if (current_code->is_compiled_optimizable()) {
- EnableDeoptimizationSupport();
- } else {
- mode_ = CompilationInfo::NONOPT;
- }
}
bool IsCompilingForDebugging() {
return IsCompilingForDebugging::decode(flags_);
}
+ void MarkNonOptimizable() {
+ SetMode(CompilationInfo::NONOPT);
+ }
bool ShouldTrapOnDeopt() const {
return (FLAG_trap_on_deopt && IsOptimizing()) ||
@@ -216,9 +222,12 @@ class CompilationInfo {
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
bool IsStub() const { return mode_ == STUB; }
- void SetOptimizing(BailoutId osr_ast_id) {
+ void SetOptimizing(BailoutId osr_ast_id, Handle<Code> unoptimized) {
+ ASSERT(!shared_info_.is_null());
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
+ unoptimized_code_ = unoptimized;
+ optimization_id_ = isolate()->NextOptimizationId();
}
void DisableOptimization();
@@ -234,11 +243,6 @@ class CompilationInfo {
// Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize();
- // Reset code to the unoptimized version when optimization is aborted.
- void AbortOptimization() {
- SetCode(handle(shared_info()->code()));
- }
-
void set_deferred_handles(DeferredHandles* deferred_handles) {
ASSERT(deferred_handles_ == NULL);
deferred_handles_ = deferred_handles;
@@ -261,6 +265,7 @@ class CompilationInfo {
SaveHandle(&shared_info_);
SaveHandle(&context_);
SaveHandle(&script_);
+ SaveHandle(&unoptimized_code_);
}
BailoutReason bailout_reason() const { return bailout_reason_; }
@@ -298,22 +303,20 @@ class CompilationInfo {
}
void AbortDueToDependencyChange() {
- ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
abort_due_to_dependency_ = true;
}
bool HasAbortedDueToDependencyChange() {
- ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
return abort_due_to_dependency_;
}
- void set_osr_pc_offset(uint32_t pc_offset) {
- osr_pc_offset_ = pc_offset;
+ bool HasSameOsrEntry(Handle<JSFunction> function, BailoutId osr_ast_id) {
+ return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure_);
}
- bool HasSameOsrEntry(Handle<JSFunction> function, uint32_t pc_offset) {
- return osr_pc_offset_ == pc_offset && function.is_identical_to(closure_);
- }
+ int optimization_id() const { return optimization_id_; }
protected:
CompilationInfo(Handle<Script> script,
@@ -409,9 +412,10 @@ class CompilationInfo {
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
BailoutId osr_ast_id_;
- // The pc_offset corresponding to osr_ast_id_ in unoptimized code.
- // We can look this up in the back edge table, but cache it for quick access.
- uint32_t osr_pc_offset_;
+ // The unoptimized code we patched for OSR may not be the shared code
+ // afterwards, since we may need to compile it again to include deoptimization
+ // data. Keep track which code we patched.
+ Handle<Code> unoptimized_code_;
// Flag whether compilation needs to be aborted due to dependency change.
bool abort_due_to_dependency_;
@@ -442,8 +446,15 @@ class CompilationInfo {
// during graph optimization.
int opt_count_;
+ // Number of parameters used for compilation of stubs that require arguments.
+ int parameter_count_;
+
+ bool this_has_uses_;
+
Handle<Foreign> object_wrapper_;
+ int optimization_id_;
+
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -504,9 +515,9 @@ class LChunk;
// fail, bail-out to the full code generator or succeed. Apart from
// their return value, the status of the phase last run can be checked
// using last_status().
-class RecompileJob: public ZoneObject {
+class OptimizedCompileJob: public ZoneObject {
public:
- explicit RecompileJob(CompilationInfo* info)
+ explicit OptimizedCompileJob(CompilationInfo* info)
: info_(info),
graph_builder_(NULL),
graph_(NULL),
@@ -520,14 +531,21 @@ class RecompileJob: public ZoneObject {
MUST_USE_RESULT Status CreateGraph();
MUST_USE_RESULT Status OptimizeGraph();
- MUST_USE_RESULT Status GenerateAndInstallCode();
+ MUST_USE_RESULT Status GenerateCode();
Status last_status() const { return last_status_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info()->isolate(); }
- MUST_USE_RESULT Status AbortOptimization() {
- info_->AbortOptimization();
+ MUST_USE_RESULT Status AbortOptimization(
+ BailoutReason reason = kNoReason) {
+ if (reason != kNoReason) info_->set_bailout_reason(reason);
+ return SetLastStatus(BAILED_OUT);
+ }
+
+ MUST_USE_RESULT Status AbortAndDisableOptimization(
+ BailoutReason reason = kNoReason) {
+ if (reason != kNoReason) info_->set_bailout_reason(reason);
info_->shared_info()->DisableOptimization(info_->bailout_reason());
return SetLastStatus(BAILED_OUT);
}
@@ -557,7 +575,7 @@ class RecompileJob: public ZoneObject {
void RecordOptimizationStats();
struct Timer {
- Timer(RecompileJob* job, TimeDelta* location)
+ Timer(OptimizedCompileJob* job, TimeDelta* location)
: job_(job), location_(location) {
ASSERT(location_ != NULL);
timer_.Start();
@@ -567,7 +585,7 @@ class RecompileJob: public ZoneObject {
*location_ += timer_.Elapsed();
}
- RecompileJob* job_;
+ OptimizedCompileJob* job_;
ElapsedTimer timer_;
TimeDelta* location_;
};
@@ -587,56 +605,53 @@ class RecompileJob: public ZoneObject {
class Compiler : public AllStatic {
public:
- // Call count before primitive functions trigger their own optimization.
- static const int kCallsUntilPrimitiveOpt = 200;
+ static Handle<Code> GetUnoptimizedCode(Handle<JSFunction> function);
+ static Handle<Code> GetUnoptimizedCode(Handle<SharedFunctionInfo> shared);
+ static bool EnsureCompiled(Handle<JSFunction> function,
+ ClearExceptionFlag flag);
+ static Handle<Code> GetCodeForDebugging(Handle<JSFunction> function);
- // All routines return a SharedFunctionInfo.
- // If an error occurs an exception is raised and the return handle
- // contains NULL.
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ static void CompileForLiveEdit(Handle<Script> script);
+#endif
- // Compile a String source within a context.
- static Handle<SharedFunctionInfo> Compile(Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin,
- Handle<Context> context,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag is_natives_code);
-
- // Compile a String source within a context for Eval.
- static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
+ // Compile a String source within a context for eval.
+ static Handle<JSFunction> GetFunctionFromEval(Handle<String> source,
Handle<Context> context,
- bool is_global,
LanguageMode language_mode,
ParseRestriction restriction,
int scope_position);
- // Compile from function info (used for lazy compilation). Returns true on
- // success and false if the compilation resulted in a stack overflow.
- static bool CompileLazy(CompilationInfo* info);
-
- static bool RecompileConcurrent(Handle<JSFunction> function,
- uint32_t osr_pc_offset = 0);
-
- // Compile a shared function info object (the function is possibly lazily
- // compiled).
+ // Compile a String source within a context.
+ static Handle<SharedFunctionInfo> CompileScript(Handle<String> source,
+ Handle<Object> script_name,
+ int line_offset,
+ int column_offset,
+ bool is_shared_cross_origin,
+ Handle<Context> context,
+ v8::Extension* extension,
+ ScriptDataImpl* pre_data,
+ Handle<Object> script_data,
+ NativesFlag is_natives_code);
+
+ // Create a shared function info object (the code may be lazily compiled).
static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
Handle<Script> script);
- // Set the function info for a newly compiled function.
- static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
+ enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
- static Handle<Code> InstallOptimizedCode(RecompileJob* job);
+ // Generate and return optimized code or start a concurrent optimization job.
+ // In the latter case, return the InOptimizationQueue builtin. On failure,
+ // return the empty handle.
+ static Handle<Code> GetOptimizedCode(
+ Handle<JSFunction> function,
+ Handle<Code> current_code,
+ ConcurrencyMode mode,
+ BailoutId osr_ast_id = BailoutId::None());
-#ifdef ENABLE_DEBUGGER_SUPPORT
- static bool MakeCodeForLiveEdit(CompilationInfo* info);
-#endif
+ // Generate and return code from previously queued optimization job.
+ // On failure, return the empty handle.
+ static Handle<Code> GetConcurrentlyOptimizedCode(OptimizedCompileJob* job);
static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
CompilationInfo* info,
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 189c215e63..b2e0661a34 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -131,9 +131,9 @@ enum BindingFlags {
V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
- V(FLOAT_ARRAY_FUN_INDEX, JSFunction, float_array_fun) \
- V(DOUBLE_ARRAY_FUN_INDEX, JSFunction, double_array_fun) \
- V(UINT8C_ARRAY_FUN_INDEX, JSFunction, uint8c_array_fun) \
+ V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
+ V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
+ V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
@@ -166,6 +166,8 @@ enum BindingFlags {
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
+ V(RUN_MICROTASKS_INDEX, JSFunction, run_microtasks) \
+ V(ENQUEUE_EXTERNAL_MICROTASK_INDEX, JSFunction, enqueue_external_microtask) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
@@ -178,14 +180,12 @@ enum BindingFlags {
observers_begin_perform_splice) \
V(OBSERVERS_END_SPLICE_INDEX, JSFunction, \
observers_end_perform_splice) \
- V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
V(STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX, Map, \
strict_mode_generator_function_map) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
generator_object_prototype_map) \
- V(GENERATOR_RESULT_MAP_INDEX, Map, generator_result_map) \
- V(RANDOM_SEED_INDEX, ByteArray, random_seed)
+ V(GENERATOR_RESULT_MAP_INDEX, Map, generator_result_map)
// JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and
@@ -297,9 +297,9 @@ class Context: public FixedArray {
INT16_ARRAY_FUN_INDEX,
UINT32_ARRAY_FUN_INDEX,
INT32_ARRAY_FUN_INDEX,
- FLOAT_ARRAY_FUN_INDEX,
- DOUBLE_ARRAY_FUN_INDEX,
- UINT8C_ARRAY_FUN_INDEX,
+ FLOAT32_ARRAY_FUN_INDEX,
+ FLOAT64_ARRAY_FUN_INDEX,
+ UINT8_CLAMPED_ARRAY_FUN_INDEX,
DATA_VIEW_FUN_INDEX,
MESSAGE_LISTENERS_INDEX,
MAKE_MESSAGE_FUN_INDEX,
@@ -318,6 +318,8 @@ class Context: public FixedArray {
EMBEDDER_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
+ RUN_MICROTASKS_INDEX,
+ ENQUEUE_EXTERNAL_MICROTASK_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
@@ -327,12 +329,10 @@ class Context: public FixedArray {
OBSERVERS_ENQUEUE_SPLICE_INDEX,
OBSERVERS_BEGIN_SPLICE_INDEX,
OBSERVERS_END_SPLICE_INDEX,
- OBSERVERS_DELIVER_CHANGES_INDEX,
GENERATOR_FUNCTION_MAP_INDEX,
STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,
GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
GENERATOR_RESULT_MAP_INDEX,
- RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 7ba19ba0f1..e503eb5027 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -88,7 +88,7 @@ inline unsigned int FastD2UI(double x) {
inline double DoubleToInteger(double x) {
if (std::isnan(x)) return 0;
if (!std::isfinite(x) || x == 0) return x;
- return (x >= 0) ? floor(x) : ceil(x);
+ return (x >= 0) ? std::floor(x) : std::ceil(x);
}
@@ -128,7 +128,7 @@ inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
Iterator* current,
EndMark end) {
while (*current != end) {
- if (!unicode_cache->IsWhiteSpace(**current)) return true;
+ if (!unicode_cache->IsWhiteSpaceOrLineTerminator(**current)) return true;
++*current;
}
return false;
@@ -233,7 +233,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache,
}
ASSERT(number != 0);
- return ldexp(static_cast<double>(negative ? -number : number), exponent);
+ return std::ldexp(static_cast<double>(negative ? -number : number), exponent);
}
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 5f1219eea9..9c52d41e5d 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -394,15 +394,16 @@ char* DoubleToRadixCString(double value, int radix) {
if (is_negative) value = -value;
// Get the integer part and the decimal part.
- double integer_part = floor(value);
+ double integer_part = std::floor(value);
double decimal_part = value - integer_part;
// Convert the integer part starting from the back. Always generate
// at least one digit.
int integer_pos = kBufferSize - 2;
do {
- integer_buffer[integer_pos--] =
- chars[static_cast<int>(fmod(integer_part, radix))];
+ double remainder = std::fmod(integer_part, radix);
+ integer_buffer[integer_pos--] = chars[static_cast<int>(remainder)];
+ integer_part -= remainder;
integer_part /= radix;
} while (integer_part >= 1.0);
// Sanity check.
@@ -423,8 +424,8 @@ char* DoubleToRadixCString(double value, int radix) {
while ((decimal_part > 0.0) && (decimal_pos < kBufferSize - 1)) {
decimal_part *= radix;
decimal_buffer[decimal_pos++] =
- chars[static_cast<int>(floor(decimal_part))];
- decimal_part -= floor(decimal_part);
+ chars[static_cast<int>(std::floor(decimal_part))];
+ decimal_part -= std::floor(decimal_part);
}
decimal_buffer[decimal_pos] = '\0';
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 7aa2d3fb3a..f850f581f0 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -72,7 +72,7 @@ inline int FastD2IChecked(double x) {
// The result is unspecified if x is infinite or NaN, or if the rounded
// integer value is outside the range of type int.
inline int FastD2I(double x) {
- return static_cast<int>(x);
+ return static_cast<int32_t>(x);
}
inline unsigned int FastD2UI(double x);
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index b1af621ccc..41f3e98644 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -156,6 +156,16 @@ void ProfilerEventsProcessor::Run() {
}
+void* ProfilerEventsProcessor::operator new(size_t size) {
+ return AlignedAlloc(size, V8_ALIGNOF(ProfilerEventsProcessor));
+}
+
+
+void ProfilerEventsProcessor::operator delete(void* ptr) {
+ AlignedFree(ptr);
+}
+
+
int CpuProfiler::GetProfilesCount() {
// The count of profiles doesn't depend on a security token.
return profiles_->profiles()->length();
@@ -176,6 +186,10 @@ void CpuProfiler::DeleteAllProfiles() {
void CpuProfiler::DeleteProfile(CpuProfile* profile) {
profiles_->RemoveProfile(profile);
delete profile;
+ if (profiles_->profiles()->is_empty() && !is_profiling_) {
+ // If this was the last profile, clean up all accessory data as well.
+ ResetProfiles();
+ }
}
@@ -376,7 +390,6 @@ CpuProfiler::CpuProfiler(Isolate* isolate)
sampling_interval_(TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
profiles_(new CpuProfilesCollection(isolate->heap())),
- next_profile_uid_(1),
generator_(NULL),
processor_(NULL),
is_profiling_(false) {
@@ -391,7 +404,6 @@ CpuProfiler::CpuProfiler(Isolate* isolate,
sampling_interval_(TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
profiles_(test_profiles),
- next_profile_uid_(1),
generator_(test_generator),
processor_(test_processor),
is_profiling_(false) {
@@ -417,7 +429,7 @@ void CpuProfiler::ResetProfiles() {
void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
- if (profiles_->StartProfiling(title, next_profile_uid_++, record_samples)) {
+ if (profiles_->StartProfiling(title, record_samples)) {
StartProcessorIfNotStarted();
}
processor_->AddCurrentStack(isolate_);
@@ -437,18 +449,8 @@ void CpuProfiler::StartProcessorIfNotStarted() {
logger->is_logging_ = false;
generator_ = new ProfileGenerator(profiles_);
Sampler* sampler = logger->sampler();
-#if V8_CC_MSVC && (_MSC_VER >= 1800)
- // VS2013 reports "warning C4316: 'v8::internal::ProfilerEventsProcessor'
- // : object allocated on the heap may not be aligned 64". We need to
- // figure out if this is a legitimate warning or a compiler bug.
- #pragma warning(push)
- #pragma warning(disable:4316)
-#endif
processor_ = new ProfilerEventsProcessor(
generator_, sampler, sampling_interval_);
-#if V8_CC_MSVC && (_MSC_VER >= 1800)
- #pragma warning(pop)
-#endif
is_profiling_ = true;
// Enumerate stuff we already have in the heap.
ASSERT(isolate_->heap()->HasBeenSetUp());
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index fcb9a67ddf..a9f034f0c7 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -158,6 +158,11 @@ class ProfilerEventsProcessor : public Thread {
inline TickSample* StartTickSample();
inline void FinishTickSample();
+ // SamplingCircularQueue has stricter alignment requirements than a normal new
+ // can fulfil, so we need to provide our own new/delete here.
+ void* operator new(size_t size);
+ void operator delete(void* ptr);
+
private:
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent();
@@ -268,7 +273,6 @@ class CpuProfiler : public CodeEventListener {
Isolate* isolate_;
TimeDelta sampling_interval_;
CpuProfilesCollection* profiles_;
- unsigned next_profile_uid_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
bool saved_is_logging_;
diff --git a/deps/v8/src/cpu.cc b/deps/v8/src/cpu.cc
index 2bf51a7f6c..1e622495f4 100644
--- a/deps/v8/src/cpu.cc
+++ b/deps/v8/src/cpu.cc
@@ -27,19 +27,22 @@
#include "cpu.h"
-#if V8_CC_MSVC
+#if V8_LIBC_MSVCRT
#include <intrin.h> // __cpuid()
#endif
#if V8_OS_POSIX
#include <unistd.h> // sysconf()
#endif
+#if V8_OS_QNX
+#include <sys/syspage.h> // cpuinfo
+#endif
+#include <ctype.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
#include <algorithm>
-#include <cctype>
-#include <climits>
-#include <cstdio>
-#include <cstdlib>
-#include <cstring>
#include "checks.h"
#if V8_OS_WIN
@@ -51,8 +54,8 @@ namespace internal {
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
-// Define __cpuid() for non-MSVC compilers.
-#if !V8_CC_MSVC
+// Define __cpuid() for non-MSVC libraries.
+#if !V8_LIBC_MSVCRT
static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#if defined(__i386__) && defined(__pic__)
@@ -74,10 +77,12 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#endif // defined(__i386__) && defined(__pic__)
}
-#endif // !V8_CC_MSVC
+#endif // !V8_LIBC_MSVCRT
#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
+#if V8_OS_LINUX
+
#if V8_HOST_ARCH_ARM
// See <uapi/asm/hwcap.h> kernel header.
@@ -249,6 +254,8 @@ static bool HasListItem(const char* list, const char* item) {
return false;
}
+#endif // V8_OS_LINUX
+
#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
CPU::CPU() : stepping_(0),
@@ -328,7 +335,11 @@ CPU::CPU() : stepping_(0),
has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
#endif
}
+
#elif V8_HOST_ARCH_ARM
+
+#if V8_OS_LINUX
+
CPUInfo cpu_info;
// Extract implementor from the "CPU implementer" field.
@@ -438,7 +449,34 @@ CPU::CPU() : stepping_(0),
// We don't support any FPUs other than VFP.
has_fpu_ = has_vfp_;
+
+#elif V8_OS_QNX
+
+ uint32_t cpu_flags = SYSPAGE_ENTRY(cpuinfo)->flags;
+ if (cpu_flags & ARM_CPU_FLAG_V7) {
+ architecture_ = 7;
+ has_thumbee_ = true;
+ } else if (cpu_flags & ARM_CPU_FLAG_V6) {
+ architecture_ = 6;
+ // QNX doesn't say if ThumbEE is available.
+ // Assume false for the architectures older than ARMv7.
+ }
+ ASSERT(architecture_ >= 6);
+ has_fpu_ = (cpu_flags & CPU_FLAG_FPU) != 0;
+ has_vfp_ = has_fpu_;
+ if (cpu_flags & ARM_CPU_FLAG_NEON) {
+ has_neon_ = true;
+ has_vfp3_ = has_vfp_;
+#ifdef ARM_CPU_FLAG_VFP_D32
+ has_vfp3_d32_ = (cpu_flags & ARM_CPU_FLAG_VFP_D32) != 0;
+#endif
+ }
+ has_idiva_ = (cpu_flags & ARM_CPU_FLAG_IDIV) != 0;
+
+#endif // V8_OS_LINUX
+
#elif V8_HOST_ARCH_MIPS
+
// Simple detection of FPU at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration
// to user-space applications. According to MIPS (early 2010), no similar
@@ -448,6 +486,7 @@ CPU::CPU() : stepping_(0),
char* cpu_model = cpu_info.ExtractField("cpu model");
has_fpu_ = HasListItem(cpu_model, "FPU");
delete[] cpu_model;
+
#endif
}
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index 379631cb7c..2c909fa762 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -30,6 +30,7 @@
#include "d8.h"
#include "d8-debug.h"
#include "debug-agent.h"
+#include "platform/socket.h"
namespace v8 {
@@ -63,7 +64,8 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
TryCatch try_catch;
// Get the toJSONProtocol function on the event and get the JSON format.
- Local<String> to_json_fun_name = String::New("toJSONProtocol");
+ Local<String> to_json_fun_name =
+ String::NewFromUtf8(isolate, "toJSONProtocol");
Handle<Object> event_data = event_details.GetEventData();
Local<Function> to_json_fun =
Local<Function>::Cast(event_data->Get(to_json_fun_name));
@@ -80,7 +82,7 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
Shell::ReportException(isolate, &try_catch);
return;
}
- String::Utf8Value str(details->Get(String::New("text")));
+ String::Utf8Value str(details->Get(String::NewFromUtf8(isolate, "text")));
if (str.length() == 0) {
// Empty string is used to signal not to process this event.
return;
@@ -88,7 +90,8 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
printf("%s\n", *str);
// Get the debug command processor.
- Local<String> fun_name = String::New("debugCommandProcessor");
+ Local<String> fun_name =
+ String::NewFromUtf8(isolate, "debugCommandProcessor");
Handle<Object> exec_state = event_details.GetExecutionState();
Local<Function> fun = Local<Function>::Cast(exec_state->Get(fun_name));
Local<Object> cmd_processor =
@@ -112,8 +115,8 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
TryCatch try_catch;
// Convert the debugger command to a JSON debugger request.
- Handle<Value> request =
- Shell::DebugCommandToJSONRequest(isolate, String::New(command));
+ Handle<Value> request = Shell::DebugCommandToJSONRequest(
+ isolate, String::NewFromUtf8(isolate, command));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
continue;
@@ -134,7 +137,7 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
// Invoke the JavaScript to convert the debug command line to a JSON
// request, invoke the JSON request and convert the JSON respose to a text
// representation.
- fun_name = String::New("processDebugRequest");
+ fun_name = String::NewFromUtf8(isolate, "processDebugRequest");
fun = Handle<Function>::Cast(cmd_processor->Get(fun_name));
args[0] = request;
Handle<Value> response_val = fun->Call(cmd_processor, kArgc, args);
@@ -151,12 +154,14 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
Shell::ReportException(isolate, &try_catch);
continue;
}
- String::Utf8Value text_str(response_details->Get(String::New("text")));
+ String::Utf8Value text_str(
+ response_details->Get(String::NewFromUtf8(isolate, "text")));
if (text_str.length() > 0) {
printf("%s\n", *text_str);
}
- running =
- response_details->Get(String::New("running"))->ToBoolean()->Value();
+ running = response_details->Get(String::NewFromUtf8(isolate, "running"))
+ ->ToBoolean()
+ ->Value();
}
}
@@ -273,15 +278,14 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
// Print the event details.
TryCatch try_catch;
- Handle<Object> details =
- Shell::DebugMessageDetails(isolate_,
- Handle<String>::Cast(String::New(message)));
+ Handle<Object> details = Shell::DebugMessageDetails(
+ isolate_, Handle<String>::Cast(String::NewFromUtf8(isolate_, message)));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
return;
}
- String::Utf8Value str(details->Get(String::New("text")));
+ String::Utf8Value str(details->Get(String::NewFromUtf8(isolate_, "text")));
if (str.length() == 0) {
// Empty string is used to signal not to process this event.
return;
@@ -292,7 +296,9 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
printf("???\n");
}
- bool is_running = details->Get(String::New("running"))->ToBoolean()->Value();
+ bool is_running = details->Get(String::NewFromUtf8(isolate_, "running"))
+ ->ToBoolean()
+ ->Value();
PrintPrompt(is_running);
}
@@ -303,8 +309,8 @@ void RemoteDebugger::HandleKeyboardCommand(char* command) {
// Convert the debugger command to a JSON debugger request.
TryCatch try_catch;
- Handle<Value> request =
- Shell::DebugCommandToJSONRequest(isolate_, String::New(command));
+ Handle<Value> request = Shell::DebugCommandToJSONRequest(
+ isolate_, String::NewFromUtf8(isolate_, command));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
@@ -327,13 +333,13 @@ void ReceiverThread::Run() {
// Receive the connect message (with empty body).
i::SmartArrayPointer<char> message =
i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
- ASSERT(*message == NULL);
+ ASSERT(message.get() == NULL);
while (true) {
// Receive a message.
i::SmartArrayPointer<char> message =
i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
- if (*message == NULL) {
+ if (message.get() == NULL) {
remote_debugger_->ConnectionClosed();
return;
}
diff --git a/deps/v8/src/d8-debug.h b/deps/v8/src/d8-debug.h
index 55876229a3..f753177263 100644
--- a/deps/v8/src/d8-debug.h
+++ b/deps/v8/src/d8-debug.h
@@ -135,7 +135,7 @@ class RemoteDebuggerEvent {
static const int kDisconnect = 3;
int type() { return type_; }
- char* data() { return *data_; }
+ char* data() { return data_.get(); }
private:
void set_next(RemoteDebuggerEvent* event) { next_ = event; }
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 81c15ae742..36ade48b8a 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -188,12 +188,12 @@ class ExecArgs {
ExecArgs() {
exec_args_[0] = NULL;
}
- bool Init(Handle<Value> arg0, Handle<Array> command_args) {
+ bool Init(Isolate* isolate, Handle<Value> arg0, Handle<Array> command_args) {
String::Utf8Value prog(arg0);
if (*prog == NULL) {
const char* message =
"os.system(): String conversion of program name failed";
- ThrowException(String::New(message));
+ isolate->ThrowException(String::NewFromUtf8(isolate, message));
return false;
}
int len = prog.length() + 3;
@@ -202,13 +202,13 @@ class ExecArgs {
exec_args_[0] = c_arg;
int i = 1;
for (unsigned j = 0; j < command_args->Length(); i++, j++) {
- Handle<Value> arg(command_args->Get(Integer::New(j)));
+ Handle<Value> arg(command_args->Get(Integer::New(isolate, j)));
String::Utf8Value utf8_arg(arg);
if (*utf8_arg == NULL) {
exec_args_[i] = NULL; // Consistent state for destructor.
const char* message =
"os.system(): String conversion of argument failed.";
- ThrowException(String::New(message));
+ isolate->ThrowException(String::NewFromUtf8(isolate, message));
return false;
}
int len = utf8_arg.length() + 1;
@@ -245,8 +245,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
if (args[3]->IsNumber()) {
*total_timeout = args[3]->Int32Value();
} else {
- args.GetIsolate()->ThrowException(
- String::New("system: Argument 4 must be a number"));
+ args.GetIsolate()->ThrowException(String::NewFromUtf8(
+ args.GetIsolate(), "system: Argument 4 must be a number"));
return false;
}
}
@@ -254,8 +254,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
if (args[2]->IsNumber()) {
*read_timeout = args[2]->Int32Value();
} else {
- args.GetIsolate()->ThrowException(
- String::New("system: Argument 3 must be a number"));
+ args.GetIsolate()->ThrowException(String::NewFromUtf8(
+ args.GetIsolate(), "system: Argument 3 must be a number"));
return false;
}
}
@@ -293,14 +293,14 @@ static void ExecSubprocess(int* exec_error_fds,
// Runs in the parent process. Checks that the child was able to exec (closing
// the file desriptor), or reports an error if it failed.
-static bool ChildLaunchedOK(int* exec_error_fds) {
+static bool ChildLaunchedOK(Isolate* isolate, int* exec_error_fds) {
int bytes_read;
int err;
do {
bytes_read = read(exec_error_fds[kReadFD], &err, sizeof(err));
} while (bytes_read == -1 && errno == EINTR);
if (bytes_read != 0) {
- ThrowException(String::New(strerror(err)));
+ isolate->ThrowException(String::NewFromUtf8(isolate, strerror(err)));
return false;
}
return true;
@@ -309,18 +309,20 @@ static bool ChildLaunchedOK(int* exec_error_fds) {
// Accumulates the output from the child in a string handle. Returns true if it
// succeeded or false if an exception was thrown.
-static Handle<Value> GetStdout(int child_fd,
+static Handle<Value> GetStdout(Isolate* isolate,
+ int child_fd,
struct timeval& start_time,
int read_timeout,
int total_timeout) {
- Handle<String> accumulator = String::Empty();
+ Handle<String> accumulator = String::Empty(isolate);
int fullness = 0;
static const int kStdoutReadBufferSize = 4096;
char buffer[kStdoutReadBufferSize];
if (fcntl(child_fd, F_SETFL, O_NONBLOCK) != 0) {
- return ThrowException(String::New(strerror(errno)));
+ return isolate->ThrowException(
+ String::NewFromUtf8(isolate, strerror(errno)));
}
int bytes_read;
@@ -335,7 +337,8 @@ static Handle<Value> GetStdout(int child_fd,
total_timeout,
start_time) ||
(TimeIsOut(start_time, total_timeout))) {
- return ThrowException(String::New("Timed out waiting for output"));
+ return isolate->ThrowException(
+ String::NewFromUtf8(isolate, "Timed out waiting for output"));
}
continue;
} else if (errno == EINTR) {
@@ -348,7 +351,8 @@ static Handle<Value> GetStdout(int child_fd,
int length = bytes_read == 0 ?
bytes_read + fullness :
LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness);
- Handle<String> addition = String::New(buffer, length);
+ Handle<String> addition =
+ String::NewFromUtf8(isolate, buffer, String::kNormalString, length);
accumulator = String::Concat(accumulator, addition);
fullness = bytes_read + fullness - length;
memcpy(buffer, buffer + length, fullness);
@@ -377,7 +381,8 @@ static Handle<Value> GetStdout(int child_fd,
// Get exit status of child.
-static bool WaitForChild(int pid,
+static bool WaitForChild(Isolate* isolate,
+ int pid,
ZombieProtector& child_waiter,
struct timeval& start_time,
int read_timeout,
@@ -394,7 +399,8 @@ static bool WaitForChild(int pid,
if (useconds < 1000000) useconds <<= 1;
if ((read_timeout != -1 && useconds / 1000 > read_timeout) ||
(TimeIsOut(start_time, total_timeout))) {
- ThrowException(String::New("Timed out waiting for process to terminate"));
+ isolate->ThrowException(String::NewFromUtf8(
+ isolate, "Timed out waiting for process to terminate"));
kill(pid, SIGINT);
return false;
}
@@ -405,7 +411,7 @@ static bool WaitForChild(int pid,
sizeof(message),
"Child killed by signal %d",
child_info.si_status);
- ThrowException(String::New(message));
+ isolate->ThrowException(String::NewFromUtf8(isolate, message));
return false;
}
if (child_info.si_code == CLD_EXITED && child_info.si_status != 0) {
@@ -414,7 +420,7 @@ static bool WaitForChild(int pid,
sizeof(message),
"Child exited with status %d",
child_info.si_status);
- ThrowException(String::New(message));
+ isolate->ThrowException(String::NewFromUtf8(isolate, message));
return false;
}
@@ -429,7 +435,7 @@ static bool WaitForChild(int pid,
sizeof(message),
"Child killed by signal %d",
WTERMSIG(child_status));
- ThrowException(String::New(message));
+ isolate->ThrowException(String::NewFromUtf8(isolate, message));
return false;
}
if (WEXITSTATUS(child_status) != 0) {
@@ -439,7 +445,7 @@ static bool WaitForChild(int pid,
sizeof(message),
"Child exited with status %d",
exit_status);
- ThrowException(String::New(message));
+ isolate->ThrowException(String::NewFromUtf8(isolate, message));
return false;
}
@@ -458,22 +464,22 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
Handle<Array> command_args;
if (args.Length() > 1) {
if (!args[1]->IsArray()) {
- args.GetIsolate()->ThrowException(
- String::New("system: Argument 2 must be an array"));
+ args.GetIsolate()->ThrowException(String::NewFromUtf8(
+ args.GetIsolate(), "system: Argument 2 must be an array"));
return;
}
command_args = Handle<Array>::Cast(args[1]);
} else {
- command_args = Array::New(0);
+ command_args = Array::New(args.GetIsolate(), 0);
}
if (command_args->Length() > ExecArgs::kMaxArgs) {
- args.GetIsolate()->ThrowException(
- String::New("Too many arguments to system()"));
+ args.GetIsolate()->ThrowException(String::NewFromUtf8(
+ args.GetIsolate(), "Too many arguments to system()"));
return;
}
if (args.Length() < 1) {
- args.GetIsolate()->ThrowException(
- String::New("Too few arguments to system()"));
+ args.GetIsolate()->ThrowException(String::NewFromUtf8(
+ args.GetIsolate(), "Too few arguments to system()"));
return;
}
@@ -481,7 +487,7 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
gettimeofday(&start_time, NULL);
ExecArgs exec_args;
- if (!exec_args.Init(args[0], command_args)) {
+ if (!exec_args.Init(args.GetIsolate(), args[0], command_args)) {
return;
}
int exec_error_fds[2];
@@ -489,12 +495,12 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (pipe(exec_error_fds) != 0) {
args.GetIsolate()->ThrowException(
- String::New("pipe syscall failed."));
+ String::NewFromUtf8(args.GetIsolate(), "pipe syscall failed."));
return;
}
if (pipe(stdout_fds) != 0) {
args.GetIsolate()->ThrowException(
- String::New("pipe syscall failed."));
+ String::NewFromUtf8(args.GetIsolate(), "pipe syscall failed."));
return;
}
@@ -511,9 +517,10 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
OpenFDCloser error_read_closer(exec_error_fds[kReadFD]);
OpenFDCloser stdout_read_closer(stdout_fds[kReadFD]);
- if (!ChildLaunchedOK(exec_error_fds)) return;
+ if (!ChildLaunchedOK(args.GetIsolate(), exec_error_fds)) return;
- Handle<Value> accumulator = GetStdout(stdout_fds[kReadFD],
+ Handle<Value> accumulator = GetStdout(args.GetIsolate(),
+ stdout_fds[kReadFD],
start_time,
read_timeout,
total_timeout);
@@ -523,7 +530,8 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- if (!WaitForChild(pid,
+ if (!WaitForChild(args.GetIsolate(),
+ pid,
child_waiter,
start_time,
read_timeout,
@@ -538,17 +546,20 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "chdir() takes one argument";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.chdir(): String conversion of argument failed.";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
if (chdir(*directory) != 0) {
- args.GetIsolate()->ThrowException(String::New(strerror(errno)));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), strerror(errno)));
return;
}
}
@@ -557,7 +568,8 @@ void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "umask() takes one argument";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
if (args[0]->IsNumber()) {
@@ -567,50 +579,51 @@ void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
} else {
const char* message = "umask() argument must be numeric";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
}
-static bool CheckItsADirectory(char* directory) {
+static bool CheckItsADirectory(Isolate* isolate, char* directory) {
struct stat stat_buf;
int stat_result = stat(directory, &stat_buf);
if (stat_result != 0) {
- ThrowException(String::New(strerror(errno)));
+ isolate->ThrowException(String::NewFromUtf8(isolate, strerror(errno)));
return false;
}
if ((stat_buf.st_mode & S_IFDIR) != 0) return true;
- ThrowException(String::New(strerror(EEXIST)));
+ isolate->ThrowException(String::NewFromUtf8(isolate, strerror(EEXIST)));
return false;
}
// Returns true for success. Creates intermediate directories as needed. No
// error if the directory exists already.
-static bool mkdirp(char* directory, mode_t mask) {
+static bool mkdirp(Isolate* isolate, char* directory, mode_t mask) {
int result = mkdir(directory, mask);
if (result == 0) return true;
if (errno == EEXIST) {
- return CheckItsADirectory(directory);
+ return CheckItsADirectory(isolate, directory);
} else if (errno == ENOENT) { // Intermediate path element is missing.
char* last_slash = strrchr(directory, '/');
if (last_slash == NULL) {
- ThrowException(String::New(strerror(errno)));
+ isolate->ThrowException(String::NewFromUtf8(isolate, strerror(errno)));
return false;
}
*last_slash = 0;
- if (!mkdirp(directory, mask)) return false;
+ if (!mkdirp(isolate, directory, mask)) return false;
*last_slash = '/';
result = mkdir(directory, mask);
if (result == 0) return true;
if (errno == EEXIST) {
- return CheckItsADirectory(directory);
+ return CheckItsADirectory(isolate, directory);
}
- ThrowException(String::New(strerror(errno)));
+ isolate->ThrowException(String::NewFromUtf8(isolate, strerror(errno)));
return false;
} else {
- ThrowException(String::New(strerror(errno)));
+ isolate->ThrowException(String::NewFromUtf8(isolate, strerror(errno)));
return false;
}
}
@@ -623,34 +636,39 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
mask = args[1]->Int32Value();
} else {
const char* message = "mkdirp() second argument must be numeric";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
} else if (args.Length() != 1) {
const char* message = "mkdirp() takes one or two arguments";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.mkdirp(): String conversion of argument failed.";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
- mkdirp(*directory, mask);
+ mkdirp(args.GetIsolate(), *directory, mask);
}
void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "rmdir() takes one or two arguments";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.rmdir(): String conversion of argument failed.";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
rmdir(*directory);
@@ -660,7 +678,8 @@ void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2) {
const char* message = "setenv() takes two arguments";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
String::Utf8Value var(args[0]);
@@ -668,13 +687,15 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
if (*value == NULL) {
const char* message =
"os.setenv(): String conversion of variable contents failed.";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
setenv(*var, *value, 1);
@@ -684,29 +705,37 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "unsetenv() takes one argument";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
String::Utf8Value var(args[0]);
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
- args.GetIsolate()->ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
unsetenv(*var);
}
-void Shell::AddOSMethods(Handle<ObjectTemplate> os_templ) {
- os_templ->Set(String::New("system"), FunctionTemplate::New(System));
- os_templ->Set(String::New("chdir"), FunctionTemplate::New(ChangeDirectory));
- os_templ->Set(String::New("setenv"), FunctionTemplate::New(SetEnvironment));
- os_templ->Set(String::New("unsetenv"),
- FunctionTemplate::New(UnsetEnvironment));
- os_templ->Set(String::New("umask"), FunctionTemplate::New(SetUMask));
- os_templ->Set(String::New("mkdirp"), FunctionTemplate::New(MakeDirectory));
- os_templ->Set(String::New("rmdir"), FunctionTemplate::New(RemoveDirectory));
+void Shell::AddOSMethods(Isolate* isolate, Handle<ObjectTemplate> os_templ) {
+ os_templ->Set(String::NewFromUtf8(isolate, "system"),
+ FunctionTemplate::New(isolate, System));
+ os_templ->Set(String::NewFromUtf8(isolate, "chdir"),
+ FunctionTemplate::New(isolate, ChangeDirectory));
+ os_templ->Set(String::NewFromUtf8(isolate, "setenv"),
+ FunctionTemplate::New(isolate, SetEnvironment));
+ os_templ->Set(String::NewFromUtf8(isolate, "unsetenv"),
+ FunctionTemplate::New(isolate, UnsetEnvironment));
+ os_templ->Set(String::NewFromUtf8(isolate, "umask"),
+ FunctionTemplate::New(isolate, SetUMask));
+ os_templ->Set(String::NewFromUtf8(isolate, "mkdirp"),
+ FunctionTemplate::New(isolate, MakeDirectory));
+ os_templ->Set(String::NewFromUtf8(isolate, "rmdir"),
+ FunctionTemplate::New(isolate, RemoveDirectory));
}
} // namespace v8
diff --git a/deps/v8/src/d8-readline.cc b/deps/v8/src/d8-readline.cc
index 0226f31c0b..57b63bf4e1 100644
--- a/deps/v8/src/d8-readline.cc
+++ b/deps/v8/src/d8-readline.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <cstdio> // NOLINT
+#include <stdio.h> // NOLINT
#include <string.h> // NOLINT
#include <readline/readline.h> // NOLINT
#include <readline/history.h> // NOLINT
@@ -109,12 +109,9 @@ Handle<String> ReadLineEditor::Prompt(const char* prompt) {
Unlocker unlock(Isolate::GetCurrent());
result = readline(prompt);
}
- if (result != NULL) {
- AddHistory(result);
- } else {
- return Handle<String>();
- }
- return String::New(result);
+ if (result == NULL) return Handle<String>();
+ AddHistory(result);
+ return String::NewFromUtf8(isolate_, result);
}
@@ -153,15 +150,20 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
HandleScope scope(isolate);
Handle<Array> completions;
if (state == 0) {
- Local<String> full_text = String::New(rl_line_buffer, rl_point);
- completions = Shell::GetCompletions(isolate, String::New(text), full_text);
+ Local<String> full_text = String::NewFromUtf8(isolate,
+ rl_line_buffer,
+ String::kNormalString,
+ rl_point);
+ completions = Shell::GetCompletions(isolate,
+ String::NewFromUtf8(isolate, text),
+ full_text);
current_completions.Reset(isolate, completions);
current_index = 0;
} else {
completions = Local<Array>::New(isolate, current_completions);
}
if (current_index < completions->Length()) {
- Handle<Integer> index = Integer::New(current_index);
+ Handle<Integer> index = Integer::New(isolate, current_index);
Handle<Value> str_obj = completions->Get(index);
current_index++;
String::Utf8Value str(str_obj);
diff --git a/deps/v8/src/d8-windows.cc b/deps/v8/src/d8-windows.cc
index eeb4735bbc..edf5085d49 100644
--- a/deps/v8/src/d8-windows.cc
+++ b/deps/v8/src/d8-windows.cc
@@ -35,7 +35,7 @@
namespace v8 {
-void Shell::AddOSMethods(Handle<ObjectTemplate> os_templ) {
+void Shell::AddOSMethods(Isolate* isolate, Handle<ObjectTemplate> os_templ) {
}
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index aec0c7418e..76ff4f9431 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -49,7 +49,6 @@
#endif // !V8_SHARED
#ifdef V8_SHARED
-#include "../include/v8-defaults.h"
#include "../include/v8-testing.h"
#endif // V8_SHARED
@@ -62,12 +61,12 @@
#ifndef V8_SHARED
#include "api.h"
#include "checks.h"
+#include "cpu.h"
#include "d8-debug.h"
#include "debug.h"
#include "natives.h"
#include "platform.h"
#include "v8.h"
-#include "v8-defaults.h"
#endif // V8_SHARED
#if !defined(_WIN32) && !defined(_WIN64)
@@ -81,8 +80,8 @@
namespace v8 {
-static Handle<Value> Throw(const char* message) {
- return ThrowException(String::New(message));
+static Handle<Value> Throw(Isolate* isolate, const char* message) {
+ return isolate->ThrowException(String::NewFromUtf8(isolate, message));
}
@@ -91,15 +90,15 @@ class PerIsolateData {
public:
explicit PerIsolateData(Isolate* isolate) : isolate_(isolate), realms_(NULL) {
HandleScope scope(isolate);
- isolate->SetData(this);
+ isolate->SetData(0, this);
}
~PerIsolateData() {
- isolate_->SetData(NULL); // Not really needed, just to be sure...
+ isolate_->SetData(0, NULL); // Not really needed, just to be sure...
}
inline static PerIsolateData* Get(Isolate* isolate) {
- return reinterpret_cast<PerIsolateData*>(isolate->GetData());
+ return reinterpret_cast<PerIsolateData*>(isolate->GetData(0));
}
class RealmScope {
@@ -169,10 +168,9 @@ ShellOptions Shell::options;
const char* Shell::kPrompt = "d8> ";
+#ifndef V8_SHARED
const int MB = 1024 * 1024;
-
-#ifndef V8_SHARED
bool CounterMap::Match(void* key1, void* key2) {
const char* name1 = reinterpret_cast<const char*>(key1);
const char* name2 = reinterpret_cast<const char*>(key2);
@@ -245,7 +243,8 @@ bool Shell::ExecuteString(Isolate* isolate,
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
Handle<Object> global = context->Global();
- Handle<Value> fun = global->Get(String::New("Stringify"));
+ Handle<Value> fun =
+ global->Get(String::NewFromUtf8(isolate, "Stringify"));
Handle<Value> argv[1] = { result };
Handle<Value> s = Handle<Function>::Cast(fun)->Call(global, 1, argv);
if (try_catch.HasCaught()) return true;
@@ -268,17 +267,16 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realms_ = new Persistent<Context>[1];
data_->realms_[0].Reset(data_->isolate_,
data_->isolate_->GetEnteredContext());
- data_->realm_shared_.Clear();
}
PerIsolateData::RealmScope::~RealmScope() {
// Drop realms to avoid keeping them alive.
for (int i = 0; i < data_->realm_count_; ++i)
- data_->realms_[i].Dispose();
+ data_->realms_[i].Reset();
delete[] data_->realms_;
if (!data_->realm_shared_.IsEmpty())
- data_->realm_shared_.Dispose();
+ data_->realm_shared_.Reset();
}
@@ -314,7 +312,7 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsObject()) {
- Throw("Invalid argument");
+ Throw(args.GetIsolate(), "Invalid argument");
return;
}
int index = data->RealmFind(args[0]->ToObject()->CreationContext());
@@ -328,12 +326,12 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
PerIsolateData* data = PerIsolateData::Get(args.GetIsolate());
if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw("Invalid argument");
+ Throw(args.GetIsolate(), "Invalid argument");
return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw("Invalid realm index");
+ Throw(args.GetIsolate(), "Invalid realm index");
return;
}
args.GetReturnValue().Set(
@@ -364,18 +362,17 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw("Invalid argument");
+ Throw(args.GetIsolate(), "Invalid argument");
return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty() ||
index == 0 ||
index == data->realm_current_ || index == data->realm_switch_) {
- Throw("Invalid realm index");
+ Throw(args.GetIsolate(), "Invalid realm index");
return;
}
- data->realms_[index].Dispose();
- data->realms_[index].Clear();
+ data->realms_[index].Reset();
}
@@ -384,12 +381,12 @@ void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw("Invalid argument");
+ Throw(args.GetIsolate(), "Invalid argument");
return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw("Invalid realm index");
+ Throw(args.GetIsolate(), "Invalid realm index");
return;
}
data->realm_switch_ = index;
@@ -401,12 +398,12 @@ void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 2 || !args[0]->IsNumber() || !args[1]->IsString()) {
- Throw("Invalid argument");
+ Throw(args.GetIsolate(), "Invalid argument");
return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw("Invalid realm index");
+ Throw(args.GetIsolate(), "Invalid realm index");
return;
}
Handle<Script> script = Script::New(args[1]->ToString());
@@ -433,7 +430,6 @@ void Shell::RealmSharedSet(Local<String> property,
const PropertyCallbackInfo<void>& info) {
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose();
data->realm_shared_.Reset(isolate, value);
}
@@ -473,12 +469,12 @@ void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value file(args[0]);
if (*file == NULL) {
- Throw("Error loading file");
+ Throw(args.GetIsolate(), "Error loading file");
return;
}
Handle<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
- Throw("Error loading file");
+ Throw(args.GetIsolate(), "Error loading file");
return;
}
args.GetReturnValue().Set(source);
@@ -488,7 +484,7 @@ void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
static const int kBufferSize = 256;
char buffer[kBufferSize];
- Handle<String> accumulator = String::New("");
+ Handle<String> accumulator = String::NewFromUtf8(isolate, "");
int length;
while (true) {
// Continue reading if the line ends with an escape '\\' or the line has
@@ -504,12 +500,18 @@ Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
if (length == 0) {
return accumulator;
} else if (buffer[length-1] != '\n') {
- accumulator = String::Concat(accumulator, String::New(buffer, length));
+ accumulator = String::Concat(
+ accumulator,
+ String::NewFromUtf8(isolate, buffer, String::kNormalString, length));
} else if (length > 1 && buffer[length-2] == '\\') {
buffer[length-2] = '\n';
- accumulator = String::Concat(accumulator, String::New(buffer, length-1));
+ accumulator = String::Concat(
+ accumulator, String::NewFromUtf8(isolate, buffer,
+ String::kNormalString, length - 1));
} else {
- return String::Concat(accumulator, String::New(buffer, length-1));
+ return String::Concat(
+ accumulator, String::NewFromUtf8(isolate, buffer,
+ String::kNormalString, length - 1));
}
}
}
@@ -520,20 +522,20 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope handle_scope(args.GetIsolate());
String::Utf8Value file(args[i]);
if (*file == NULL) {
- Throw("Error loading file");
+ Throw(args.GetIsolate(), "Error loading file");
return;
}
Handle<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
- Throw("Error loading file");
+ Throw(args.GetIsolate(), "Error loading file");
return;
}
if (!ExecuteString(args.GetIsolate(),
source,
- String::New(*file),
+ String::NewFromUtf8(args.GetIsolate(), *file),
false,
true)) {
- Throw("Error executing file");
+ Throw(args.GetIsolate(), "Error executing file");
return;
}
}
@@ -548,7 +550,8 @@ void Shell::Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
- args.GetReturnValue().Set(String::New(V8::GetVersion()));
+ args.GetReturnValue().Set(
+ String::NewFromUtf8(args.GetIsolate(), V8::GetVersion()));
}
@@ -556,7 +559,7 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
HandleScope handle_scope(isolate);
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
Handle<Context> utility_context;
- bool enter_context = !Context::InContext();
+ bool enter_context = !isolate->InContext();
if (enter_context) {
utility_context = Local<Context>::New(isolate, utility_context_);
utility_context->Enter();
@@ -606,49 +609,52 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
Handle<Array> Shell::GetCompletions(Isolate* isolate,
Handle<String> text,
Handle<String> full) {
- HandleScope handle_scope(isolate);
+ EscapableHandleScope handle_scope(isolate);
v8::Local<v8::Context> utility_context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(utility_context);
Handle<Object> global = utility_context->Global();
- Handle<Value> fun = global->Get(String::New("GetCompletions"));
+ Local<Value> fun =
+ global->Get(String::NewFromUtf8(isolate, "GetCompletions"));
static const int kArgc = 3;
v8::Local<v8::Context> evaluation_context =
v8::Local<v8::Context>::New(isolate, evaluation_context_);
Handle<Value> argv[kArgc] = { evaluation_context->Global(), text, full };
- Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return handle_scope.Close(Handle<Array>::Cast(val));
+ Local<Value> val = Local<Function>::Cast(fun)->Call(global, kArgc, argv);
+ return handle_scope.Escape(Local<Array>::Cast(val));
}
#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Object> Shell::DebugMessageDetails(Isolate* isolate,
- Handle<String> message) {
- HandleScope handle_scope(isolate);
+Local<Object> Shell::DebugMessageDetails(Isolate* isolate,
+ Handle<String> message) {
+ EscapableHandleScope handle_scope(isolate);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
Handle<Object> global = context->Global();
- Handle<Value> fun = global->Get(String::New("DebugMessageDetails"));
+ Handle<Value> fun =
+ global->Get(String::NewFromUtf8(isolate, "DebugMessageDetails"));
static const int kArgc = 1;
Handle<Value> argv[kArgc] = { message };
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return Handle<Object>::Cast(val);
+ return handle_scope.Escape(Local<Object>(Handle<Object>::Cast(val)));
}
-Handle<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
- Handle<String> command) {
- HandleScope handle_scope(isolate);
+Local<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
+ Handle<String> command) {
+ EscapableHandleScope handle_scope(isolate);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
Handle<Object> global = context->Global();
- Handle<Value> fun = global->Get(String::New("DebugCommandToJSONRequest"));
+ Handle<Value> fun =
+ global->Get(String::NewFromUtf8(isolate, "DebugCommandToJSONRequest"));
static const int kArgc = 1;
Handle<Value> argv[kArgc] = { command };
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return val;
+ return handle_scope.Escape(Local<Value>(val));
}
@@ -783,8 +789,8 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
debug->Load();
i::Handle<i::JSObject> js_debug
= i::Handle<i::JSObject>(debug->debug_context()->global_object());
- utility_context->Global()->Set(String::New("$debug"),
- Utils::ToLocal(js_debug));
+ utility_context->Global()->Set(String::NewFromUtf8(isolate, "$debug"),
+ Utils::ToLocal(js_debug));
debug->debug_context()->set_security_token(
reinterpret_cast<i::Isolate*>(isolate)->heap()->undefined_value());
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -795,10 +801,12 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
i::NativesCollection<i::D8>::GetRawScriptSource(source_index);
i::Vector<const char> shell_source_name =
i::NativesCollection<i::D8>::GetScriptName(source_index);
- Handle<String> source = String::New(shell_source.start(),
- shell_source.length());
- Handle<String> name = String::New(shell_source_name.start(),
- shell_source_name.length());
+ Handle<String> source =
+ String::NewFromUtf8(isolate, shell_source.start(), String::kNormalString,
+ shell_source.length());
+ Handle<String> name =
+ String::NewFromUtf8(isolate, shell_source_name.start(),
+ String::kNormalString, shell_source_name.length());
Handle<Script> script = Script::Compile(source, name);
script->Run();
// Mark the d8 shell script as native to avoid it showing up as normal source
@@ -850,49 +858,56 @@ class BZip2Decompressor : public v8::StartupDataDecompressor {
Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
- Handle<ObjectTemplate> global_template = ObjectTemplate::New();
- global_template->Set(String::New("print"), FunctionTemplate::New(Print));
- global_template->Set(String::New("write"), FunctionTemplate::New(Write));
- global_template->Set(String::New("read"), FunctionTemplate::New(Read));
- global_template->Set(String::New("readbuffer"),
- FunctionTemplate::New(ReadBuffer));
- global_template->Set(String::New("readline"),
- FunctionTemplate::New(ReadLine));
- global_template->Set(String::New("load"), FunctionTemplate::New(Load));
- global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
- global_template->Set(String::New("version"), FunctionTemplate::New(Version));
+ Handle<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
+ global_template->Set(String::NewFromUtf8(isolate, "print"),
+ FunctionTemplate::New(isolate, Print));
+ global_template->Set(String::NewFromUtf8(isolate, "write"),
+ FunctionTemplate::New(isolate, Write));
+ global_template->Set(String::NewFromUtf8(isolate, "read"),
+ FunctionTemplate::New(isolate, Read));
+ global_template->Set(String::NewFromUtf8(isolate, "readbuffer"),
+ FunctionTemplate::New(isolate, ReadBuffer));
+ global_template->Set(String::NewFromUtf8(isolate, "readline"),
+ FunctionTemplate::New(isolate, ReadLine));
+ global_template->Set(String::NewFromUtf8(isolate, "load"),
+ FunctionTemplate::New(isolate, Load));
+ global_template->Set(String::NewFromUtf8(isolate, "quit"),
+ FunctionTemplate::New(isolate, Quit));
+ global_template->Set(String::NewFromUtf8(isolate, "version"),
+ FunctionTemplate::New(isolate, Version));
// Bind the Realm object.
- Handle<ObjectTemplate> realm_template = ObjectTemplate::New();
- realm_template->Set(String::New("current"),
- FunctionTemplate::New(RealmCurrent));
- realm_template->Set(String::New("owner"),
- FunctionTemplate::New(RealmOwner));
- realm_template->Set(String::New("global"),
- FunctionTemplate::New(RealmGlobal));
- realm_template->Set(String::New("create"),
- FunctionTemplate::New(RealmCreate));
- realm_template->Set(String::New("dispose"),
- FunctionTemplate::New(RealmDispose));
- realm_template->Set(String::New("switch"),
- FunctionTemplate::New(RealmSwitch));
- realm_template->Set(String::New("eval"),
- FunctionTemplate::New(RealmEval));
- realm_template->SetAccessor(String::New("shared"),
+ Handle<ObjectTemplate> realm_template = ObjectTemplate::New(isolate);
+ realm_template->Set(String::NewFromUtf8(isolate, "current"),
+ FunctionTemplate::New(isolate, RealmCurrent));
+ realm_template->Set(String::NewFromUtf8(isolate, "owner"),
+ FunctionTemplate::New(isolate, RealmOwner));
+ realm_template->Set(String::NewFromUtf8(isolate, "global"),
+ FunctionTemplate::New(isolate, RealmGlobal));
+ realm_template->Set(String::NewFromUtf8(isolate, "create"),
+ FunctionTemplate::New(isolate, RealmCreate));
+ realm_template->Set(String::NewFromUtf8(isolate, "dispose"),
+ FunctionTemplate::New(isolate, RealmDispose));
+ realm_template->Set(String::NewFromUtf8(isolate, "switch"),
+ FunctionTemplate::New(isolate, RealmSwitch));
+ realm_template->Set(String::NewFromUtf8(isolate, "eval"),
+ FunctionTemplate::New(isolate, RealmEval));
+ realm_template->SetAccessor(String::NewFromUtf8(isolate, "shared"),
RealmSharedGet, RealmSharedSet);
- global_template->Set(String::New("Realm"), realm_template);
+ global_template->Set(String::NewFromUtf8(isolate, "Realm"), realm_template);
#ifndef V8_SHARED
- Handle<ObjectTemplate> performance_template = ObjectTemplate::New();
- performance_template->Set(String::New("now"),
- FunctionTemplate::New(PerformanceNow));
- global_template->Set(String::New("performance"), performance_template);
+ Handle<ObjectTemplate> performance_template = ObjectTemplate::New(isolate);
+ performance_template->Set(String::NewFromUtf8(isolate, "now"),
+ FunctionTemplate::New(isolate, PerformanceNow));
+ global_template->Set(String::NewFromUtf8(isolate, "performance"),
+ performance_template);
#endif // V8_SHARED
#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
- Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
- AddOSMethods(os_templ);
- global_template->Set(String::New("os"), os_templ);
+ Handle<ObjectTemplate> os_templ = ObjectTemplate::New(isolate);
+ AddOSMethods(isolate, os_templ);
+ global_template->Set(String::NewFromUtf8(isolate, "os"), os_templ);
#endif // V8_SHARED
return global_template;
@@ -950,7 +965,7 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#endif // V8_SHARED
// Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- HandleScope handle_scope(isolate);
+ EscapableHandleScope handle_scope(isolate);
Local<Context> context = Context::New(isolate, NULL, global_template);
ASSERT(!context.IsEmpty());
Context::Scope scope(context);
@@ -967,10 +982,10 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
}
i::Handle<i::JSArray> arguments_jsarray =
factory->NewJSArrayWithElements(arguments_array);
- context->Global()->Set(String::New("arguments"),
+ context->Global()->Set(String::NewFromUtf8(isolate, "arguments"),
Utils::ToLocal(arguments_jsarray));
#endif // V8_SHARED
- return handle_scope.Close(context);
+ return handle_scope.Escape(context);
}
@@ -1081,16 +1096,22 @@ static char* ReadChars(Isolate* isolate, const char* name, int* size_out) {
return chars;
}
-static void ReadBufferWeakCallback(v8::Isolate* isolate,
- Persistent<ArrayBuffer>* array_buffer,
- uint8_t* data) {
- size_t byte_length =
- Local<ArrayBuffer>::New(isolate, *array_buffer)->ByteLength();
- isolate->AdjustAmountOfExternalAllocatedMemory(
+
+struct DataAndPersistent {
+ uint8_t* data;
+ Persistent<ArrayBuffer> handle;
+};
+
+
+static void ReadBufferWeakCallback(
+ const v8::WeakCallbackData<ArrayBuffer, DataAndPersistent>& data) {
+ size_t byte_length = data.GetValue()->ByteLength();
+ data.GetIsolate()->AdjustAmountOfExternalAllocatedMemory(
-static_cast<intptr_t>(byte_length));
- delete[] data;
- array_buffer->Dispose();
+ delete[] data.GetParameter()->data;
+ data.GetParameter()->handle.Reset();
+ delete data.GetParameter();
}
@@ -1099,21 +1120,24 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value filename(args[0]);
int length;
if (*filename == NULL) {
- Throw("Error loading file");
+ Throw(args.GetIsolate(), "Error loading file");
return;
}
Isolate* isolate = args.GetIsolate();
- uint8_t* data = reinterpret_cast<uint8_t*>(
+ DataAndPersistent* data = new DataAndPersistent;
+ data->data = reinterpret_cast<uint8_t*>(
ReadChars(args.GetIsolate(), *filename, &length));
- if (data == NULL) {
- Throw("Error reading file");
+ if (data->data == NULL) {
+ delete data;
+ Throw(args.GetIsolate(), "Error reading file");
return;
}
- Handle<v8::ArrayBuffer> buffer = ArrayBuffer::New(data, length);
- v8::Persistent<v8::ArrayBuffer> weak_handle(isolate, buffer);
- weak_handle.MakeWeak(data, ReadBufferWeakCallback);
- weak_handle.MarkIndependent();
+ Handle<v8::ArrayBuffer> buffer =
+ ArrayBuffer::New(isolate, data->data, length);
+ data->handle.Reset(isolate, buffer);
+ data->handle.SetWeak(data, ReadBufferWeakCallback);
+ data->handle.MarkIndependent();
isolate->AdjustAmountOfExternalAllocatedMemory(length);
args.GetReturnValue().Set(buffer);
@@ -1148,7 +1172,8 @@ Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
int size = 0;
char* chars = ReadChars(isolate, name, &size);
if (chars == NULL) return Handle<String>();
- Handle<String> result = String::New(chars, size);
+ Handle<String> result =
+ String::NewFromUtf8(isolate, chars, String::kNormalString, size);
delete[] chars;
return result;
}
@@ -1161,7 +1186,7 @@ void Shell::RunShell(Isolate* isolate) {
v8::Local<v8::Context>::New(isolate, evaluation_context_);
v8::Context::Scope context_scope(context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- Handle<String> name = String::New("(d8)");
+ Handle<String> name = String::NewFromUtf8(isolate, "(d8)");
LineEditor* console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
console->Open(isolate);
@@ -1230,7 +1255,8 @@ void ShellThread::Run() {
Shell::Exit(1);
}
- Shell::ExecuteString(isolate_, str, String::New(filename), false, false);
+ Shell::ExecuteString(
+ isolate_, str, String::NewFromUtf8(isolate_, filename), false, false);
}
ptr = next_line;
@@ -1254,8 +1280,8 @@ void SourceGroup::Execute(Isolate* isolate) {
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
// Execute argument given to -e option directly.
HandleScope handle_scope(isolate);
- Handle<String> file_name = String::New("unnamed");
- Handle<String> source = String::New(argv_[i + 1]);
+ Handle<String> file_name = String::NewFromUtf8(isolate, "unnamed");
+ Handle<String> source = String::NewFromUtf8(isolate, argv_[i + 1]);
if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
exception_was_thrown = true;
break;
@@ -1266,7 +1292,7 @@ void SourceGroup::Execute(Isolate* isolate) {
} else {
// Use all other arguments as names of files to load and run.
HandleScope handle_scope(isolate);
- Handle<String> file_name = String::New(arg);
+ Handle<String> file_name = String::NewFromUtf8(isolate, arg);
Handle<String> source = ReadFile(isolate, arg);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", arg);
@@ -1288,7 +1314,8 @@ Handle<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
int size;
char* chars = ReadChars(isolate, name, &size);
if (chars == NULL) return Handle<String>();
- Handle<String> result = String::New(chars, size);
+ Handle<String> result =
+ String::NewFromUtf8(isolate, chars, String::kNormalString, size);
delete[] chars;
return result;
}
@@ -1364,6 +1391,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--stress-deopt") == 0) {
options.stress_deopt = true;
argv[i] = NULL;
+ } else if (strcmp(argv[i], "--mock-arraybuffer-allocator") == 0) {
+ options.mock_arraybuffer_allocator = true;
+ argv[i] = NULL;
} else if (strcmp(argv[i], "--noalways-opt") == 0) {
// No support for stressing if we can't use --always-opt.
options.stress_opt = false;
@@ -1377,43 +1407,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--send-idle-notification") == 0) {
options.send_idle_notification = true;
argv[i] = NULL;
- } else if (strcmp(argv[i], "--preemption") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#else
- options.use_preemption = true;
- argv[i] = NULL;
-#endif // V8_SHARED
- } else if (strcmp(argv[i], "--nopreemption") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#else
- options.use_preemption = false;
- argv[i] = NULL;
-#endif // V8_SHARED
- } else if (strcmp(argv[i], "--preemption-interval") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#else
- if (++i < argc) {
- argv[i-1] = NULL;
- char* end = NULL;
- options.preemption_interval = strtol(argv[i], &end, 10); // NOLINT
- if (options.preemption_interval <= 0
- || *end != '\0'
- || errno == ERANGE) {
- printf("Invalid value for --preemption-interval '%s'\n", argv[i]);
- return false;
- }
- argv[i] = NULL;
- } else {
- printf("Missing value for --preemption-interval\n");
- return false;
- }
-#endif // V8_SHARED
} else if (strcmp(argv[i], "-f") == 0) {
// Ignore any -f flags for compatibility with other stand-alone
// JavaScript engines.
@@ -1552,14 +1545,6 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
V8::IdleNotification(kLongIdlePauseInMs);
}
}
-
-#ifndef V8_SHARED
- // Start preemption if threads have been created and preemption is enabled.
- if (threads.length() > 0
- && options.use_preemption) {
- Locker::StartPreemption(isolate, options.preemption_interval);
- }
-#endif // V8_SHARED
}
#ifndef V8_SHARED
@@ -1572,11 +1557,6 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
thread->Join();
delete thread;
}
-
- if (threads.length() > 0 && options.use_preemption) {
- Locker lock(isolate);
- Locker::StopPreemption(isolate);
- }
#endif // V8_SHARED
return 0;
}
@@ -1584,12 +1564,14 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
#ifdef V8_SHARED
static void SetStandaloneFlagsViaCommandLine() {
- int fake_argc = 2;
- char **fake_argv = new char*[2];
+ int fake_argc = 3;
+ char **fake_argv = new char*[3];
fake_argv[0] = NULL;
fake_argv[1] = strdup("--trace-hydrogen-file=hydrogen.cfg");
+ fake_argv[2] = strdup("--redirect-code-traces-to=code.asm");
v8::V8::SetFlagsFromCommandLine(&fake_argc, fake_argv, false);
free(fake_argv[1]);
+ free(fake_argv[2]);
delete[] fake_argv;
}
#endif
@@ -1672,19 +1654,43 @@ class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
};
+class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ virtual void* Allocate(size_t) V8_OVERRIDE {
+ return malloc(0);
+ }
+ virtual void* AllocateUninitialized(size_t length) V8_OVERRIDE {
+ return malloc(0);
+ }
+ virtual void Free(void*, size_t) V8_OVERRIDE {
+ }
+};
+
+
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICU();
#ifndef V8_SHARED
i::FLAG_trace_hydrogen_file = "hydrogen.cfg";
+ i::FLAG_redirect_code_traces_to = "code.asm";
#else
SetStandaloneFlagsViaCommandLine();
#endif
- v8::SetDefaultResourceConstraintsForCurrentPlatform();
ShellArrayBufferAllocator array_buffer_allocator;
- v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
+ MockArrayBufferAllocator mock_arraybuffer_allocator;
+ if (options.mock_arraybuffer_allocator) {
+ v8::V8::SetArrayBufferAllocator(&mock_arraybuffer_allocator);
+ } else {
+ v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
+ }
int result = 0;
Isolate* isolate = Isolate::GetCurrent();
+#ifndef V8_SHARED
+ v8::ResourceConstraints constraints;
+ constraints.ConfigureDefaults(i::OS::TotalPhysicalMemory(),
+ i::CPU::NumberOfProcessorsOnline());
+ v8::SetResourceConstraints(isolate, &constraints);
+#endif
DumbLineEditor dumb_line_editor(isolate);
{
Initialize(isolate);
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index 097abc0465..0e51baaaca 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -32,6 +32,7 @@
# Enable support for Intel VTune. Supported on ia32/x64 only
'v8_enable_vtunejit%': 0,
'v8_enable_i18n_support%': 1,
+ 'v8_toolset_for_d8%': 'target',
},
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
@@ -49,6 +50,9 @@
'd8.cc',
],
'conditions': [
+ [ 'want_separate_host_toolset==1', {
+ 'toolsets': [ '<(v8_toolset_for_d8)', ],
+ }],
[ 'console=="readline"', {
'libraries': [ '-lreadline', ],
'sources': [ 'd8-readline.cc' ],
@@ -66,7 +70,8 @@
],
}],
['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
- or OS=="openbsd" or OS=="solaris" or OS=="android")', {
+ or OS=="openbsd" or OS=="solaris" or OS=="android" \
+ or OS=="qnx")', {
'sources': [ 'd8-posix.cc', ]
}],
[ 'OS=="win"', {
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 411dfdda3e..db2edb93c9 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -219,8 +219,6 @@ class ShellOptions {
public:
ShellOptions() :
#ifndef V8_SHARED
- use_preemption(true),
- preemption_interval(10),
num_parallel_files(0),
parallel_files(NULL),
#endif // V8_SHARED
@@ -233,6 +231,7 @@ class ShellOptions {
test_shell(false),
dump_heap_constants(false),
expected_to_throw(false),
+ mock_arraybuffer_allocator(false),
num_isolates(1),
isolate_sources(NULL) { }
@@ -244,8 +243,6 @@ class ShellOptions {
}
#ifndef V8_SHARED
- bool use_preemption;
- int preemption_interval;
int num_parallel_files;
char** parallel_files;
#endif // V8_SHARED
@@ -258,6 +255,7 @@ class ShellOptions {
bool test_shell;
bool dump_heap_constants;
bool expected_to_throw;
+ bool mock_arraybuffer_allocator;
int num_isolates;
SourceGroup* isolate_sources;
};
@@ -296,10 +294,10 @@ class Shell : public i::AllStatic {
static void MapCounters(const char* name);
#ifdef ENABLE_DEBUGGER_SUPPORT
- static Handle<Object> DebugMessageDetails(Isolate* isolate,
- Handle<String> message);
- static Handle<Value> DebugCommandToJSONRequest(Isolate* isolate,
- Handle<String> command);
+ static Local<Object> DebugMessageDetails(Isolate* isolate,
+ Handle<String> message);
+ static Local<Value> DebugCommandToJSONRequest(Isolate* isolate,
+ Handle<String> command);
static void DispatchDebugMessages();
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -379,7 +377,8 @@ class Shell : public i::AllStatic {
static void MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void AddOSMethods(Handle<ObjectTemplate> os_template);
+ static void AddOSMethods(v8::Isolate* isolate,
+ Handle<ObjectTemplate> os_template);
static const char* kPrompt;
static ShellOptions options;
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index a377451770..4afd8dc60c 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -36,7 +36,6 @@ namespace v8 {
namespace internal {
-static const int kDays4Years[] = {0, 365, 2 * 365, 3 * 365 + 1};
static const int kDaysIn4Years = 4 * 365 + 1;
static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index 7a2f4d9e30..f3d4af244f 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -302,8 +302,7 @@ function DateUTC(year, month, date, hours, minutes, seconds, ms) {
}
-// Mozilla-specific extension. Returns the number of milliseconds
-// elapsed since 1 January 1970 00:00:00 UTC.
+// ECMA 262 - 15.9.4.4
function DateNow() {
return %DateCurrentTime();
}
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index 27584ce39e..7dc489de34 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -122,7 +122,7 @@ class DateParser : public AllStatic {
}
bool SkipWhiteSpace() {
- if (unicode_cache_->IsWhiteSpace(ch_)) {
+ if (unicode_cache_->IsWhiteSpaceOrLineTerminator(ch_)) {
Next();
return true;
}
diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc
index 51823aaf24..49790cee99 100644
--- a/deps/v8/src/debug-agent.cc
+++ b/deps/v8/src/debug-agent.cc
@@ -173,6 +173,11 @@ void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
}
+DebuggerAgentSession::~DebuggerAgentSession() {
+ delete client_;
+}
+
+
void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
// Don't do anything during termination.
if (terminate_) {
@@ -192,7 +197,7 @@ void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
void DebuggerAgentSession::Run() {
// Send the hello message.
- bool ok = DebuggerAgentUtil::SendConnectMessage(client_, *agent_->name_);
+ bool ok = DebuggerAgentUtil::SendConnectMessage(client_, agent_->name_.get());
if (!ok) return;
while (true) {
@@ -200,7 +205,7 @@ void DebuggerAgentSession::Run() {
SmartArrayPointer<char> message =
DebuggerAgentUtil::ReceiveMessage(client_);
- const char* msg = *message;
+ const char* msg = message.get();
bool is_closing_session = (msg == NULL);
if (msg == NULL) {
diff --git a/deps/v8/src/debug-agent.h b/deps/v8/src/debug-agent.h
index 138e51acc6..e81e4cd6aa 100644
--- a/deps/v8/src/debug-agent.h
+++ b/deps/v8/src/debug-agent.h
@@ -83,6 +83,7 @@ class DebuggerAgentSession: public Thread {
DebuggerAgentSession(DebuggerAgent* agent, Socket* client)
: Thread("v8:DbgAgntSessn"),
agent_(agent), client_(client) {}
+ ~DebuggerAgentSession();
void DebuggerMessage(Vector<uint16_t> message);
void Shutdown();
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 35970e5ee9..d46c7b1ac6 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -86,13 +86,6 @@ static void PrintLn(v8::Local<v8::Value> value) {
}
-static Handle<Code> ComputeCallDebugPrepareStepIn(Isolate* isolate,
- int argc,
- Code::Kind kind) {
- return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
-}
-
-
static v8::Handle<v8::Context> GetDebugEventContext(Isolate* isolate) {
Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
// Isolate::context() may have been NULL when "script collected" event
@@ -413,59 +406,41 @@ bool BreakLocationIterator::IsStepInLocation(Isolate* isolate) {
if (target_code->kind() == Code::STUB) {
return target_code->major_key() == CodeStub::CallFunction;
}
- return target_code->is_call_stub() || target_code->is_keyed_call_stub();
- } else {
- return false;
}
+ return false;
}
void BreakLocationIterator::PrepareStepIn(Isolate* isolate) {
+#ifdef DEBUG
HandleScope scope(isolate);
-
// Step in can only be prepared if currently positioned on an IC call,
// construct call or CallFunction stub call.
Address target = rinfo()->target_address();
Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
- if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) {
- // Step in through IC call is handled by the runtime system. Therefore make
- // sure that the any current IC is cleared and the runtime system is
- // called. If the executing code has a debug break at the location change
- // the call in the original code as it is the code there that will be
- // executed in place of the debug break call.
- Handle<Code> stub = ComputeCallDebugPrepareStepIn(
- isolate, target_code->arguments_count(), target_code->kind());
- if (IsDebugBreak()) {
- original_rinfo()->set_target_address(stub->entry());
- } else {
- rinfo()->set_target_address(stub->entry());
- }
- } else {
-#ifdef DEBUG
- // All the following stuff is needed only for assertion checks so the code
- // is wrapped in ifdef.
- Handle<Code> maybe_call_function_stub = target_code;
- if (IsDebugBreak()) {
- Address original_target = original_rinfo()->target_address();
- maybe_call_function_stub =
- Handle<Code>(Code::GetCodeFromTargetAddress(original_target));
- }
- bool is_call_function_stub =
- (maybe_call_function_stub->kind() == Code::STUB &&
- maybe_call_function_stub->major_key() == CodeStub::CallFunction);
-
- // Step in through construct call requires no changes to the running code.
- // Step in through getters/setters should already be prepared as well
- // because caller of this function (Debug::PrepareStep) is expected to
- // flood the top frame's function with one shot breakpoints.
- // Step in through CallFunction stub should also be prepared by caller of
- // this function (Debug::PrepareStep) which should flood target function
- // with breakpoints.
- ASSERT(RelocInfo::IsConstructCall(rmode()) ||
- target_code->is_inline_cache_stub() ||
- is_call_function_stub);
+ // All the following stuff is needed only for assertion checks so the code
+ // is wrapped in ifdef.
+ Handle<Code> maybe_call_function_stub = target_code;
+ if (IsDebugBreak()) {
+ Address original_target = original_rinfo()->target_address();
+ maybe_call_function_stub =
+ Handle<Code>(Code::GetCodeFromTargetAddress(original_target));
+ }
+ bool is_call_function_stub =
+ (maybe_call_function_stub->kind() == Code::STUB &&
+ maybe_call_function_stub->major_key() == CodeStub::CallFunction);
+
+ // Step in through construct call requires no changes to the running code.
+ // Step in through getters/setters should already be prepared as well
+ // because caller of this function (Debug::PrepareStep) is expected to
+ // flood the top frame's function with one shot breakpoints.
+ // Step in through CallFunction stub should also be prepared by caller of
+ // this function (Debug::PrepareStep) which should flood target function
+ // with breakpoints.
+ ASSERT(RelocInfo::IsConstructCall(rmode()) ||
+ target_code->is_inline_cache_stub() ||
+ is_call_function_stub);
#endif
- }
}
@@ -646,11 +621,10 @@ void ScriptCache::Add(Handle<Script> script) {
// Globalize the script object, make it weak and use the location of the
// global handle as the value in the hash map.
Handle<Script> script_ =
- Handle<Script>::cast(
- (global_handles->Create(*script)));
- global_handles->MakeWeak(reinterpret_cast<Object**>(script_.location()),
- this,
- ScriptCache::HandleWeakScript);
+ Handle<Script>::cast(global_handles->Create(*script));
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
+ this,
+ ScriptCache::HandleWeakScript);
entry->value = script_.location();
}
@@ -680,36 +654,37 @@ void ScriptCache::ProcessCollectedScripts() {
void ScriptCache::Clear() {
- GlobalHandles* global_handles = isolate_->global_handles();
// Iterate the script cache to get rid of all the weak handles.
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry != NULL);
Object** location = reinterpret_cast<Object**>(entry->value);
ASSERT((*location)->IsScript());
- global_handles->ClearWeakness(location);
- global_handles->Destroy(location);
+ GlobalHandles::ClearWeakness(location);
+ GlobalHandles::Destroy(location);
}
// Clear the content of the hash map.
HashMap::Clear();
}
-void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data) {
- ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
- // Find the location of the global handle.
- Script** location =
- reinterpret_cast<Script**>(Utils::OpenPersistent(*obj).location());
- ASSERT((*location)->IsScript());
+void ScriptCache::HandleWeakScript(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ // Retrieve the script identifier.
+ Handle<Object> object = Utils::OpenHandle(*data.GetValue());
+ int id = Handle<Script>::cast(object)->id()->value();
+ void* key = reinterpret_cast<void*>(id);
+ uint32_t hash = Hash(id);
- // Remove the entry from the cache.
- int id = (*location)->id()->value();
- script_cache->Remove(reinterpret_cast<void*>(id), Hash(id));
+ // Remove the corresponding entry from the cache.
+ ScriptCache* script_cache =
+ reinterpret_cast<ScriptCache*>(data.GetParameter());
+ HashMap::Entry* entry = script_cache->Lookup(key, hash, false);
+ Object** location = reinterpret_cast<Object**>(entry->value);
+ script_cache->Remove(key, hash);
script_cache->collected_scripts_.Add(id);
// Clear the weak handle.
- obj->Dispose();
+ GlobalHandles::Destroy(location);
}
@@ -728,11 +703,11 @@ void Debug::SetUp(bool create_heap_objects) {
}
-void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data) {
- Debug* debug = reinterpret_cast<Isolate*>(isolate)->debug();
- DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
+void Debug::HandleWeakDebugInfo(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ Debug* debug = reinterpret_cast<Isolate*>(data.GetIsolate())->debug();
+ DebugInfoListNode* node =
+ reinterpret_cast<DebugInfoListNode*>(data.GetParameter());
// We need to clear all breakpoints associated with the function to restore
// original code and avoid patching the code twice later because
// the function will live in the heap until next gc, and can be found by
@@ -741,29 +716,27 @@ void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
it.ClearAllDebugBreak();
debug->RemoveDebugInfo(node->debug_info());
#ifdef DEBUG
- node = debug->debug_info_list_;
- while (node != NULL) {
- ASSERT(node != reinterpret_cast<DebugInfoListNode*>(data));
- node = node->next();
+ for (DebugInfoListNode* n = debug->debug_info_list_;
+ n != NULL;
+ n = n->next()) {
+ ASSERT(n != node);
}
#endif
}
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
- GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
// Globalize the request debug info object and make it weak.
- debug_info_ = Handle<DebugInfo>::cast(
- (global_handles->Create(debug_info)));
- global_handles->MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
- this,
- Debug::HandleWeakDebugInfo);
+ GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
+ debug_info_ = Handle<DebugInfo>::cast(global_handles->Create(debug_info));
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
+ this,
+ Debug::HandleWeakDebugInfo);
}
DebugInfoListNode::~DebugInfoListNode() {
- debug_info_->GetIsolate()->global_handles()->Destroy(
- reinterpret_cast<Object**>(debug_info_.location()));
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_info_.location()));
}
@@ -785,14 +758,13 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
// Compile the script.
Handle<SharedFunctionInfo> function_info;
- function_info = Compiler::Compile(source_code,
- script_name,
- 0, 0,
- false,
- context,
- NULL, NULL,
- Handle<String>::null(),
- NATIVES_CODE);
+ function_info = Compiler::CompileScript(source_code,
+ script_name, 0, 0,
+ false,
+ context,
+ NULL, NULL,
+ Handle<String>::null(),
+ NATIVES_CODE);
// Silently ignore stack overflows during compilation.
if (function_info.is_null()) {
@@ -820,7 +792,7 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
isolate->ComputeLocation(&computed_location);
Handle<Object> message = MessageHandler::MakeMessageObject(
isolate, "error_loading_debugger", &computed_location,
- Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
+ Vector<Handle<Object> >::empty(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) {
isolate->set_pending_exception(*exception);
@@ -857,11 +829,12 @@ bool Debug::Load() {
// Create the debugger context.
HandleScope scope(isolate_);
+ ExtensionConfiguration no_extensions;
Handle<Context> context =
isolate_->bootstrapper()->CreateEnvironment(
Handle<Object>::null(),
v8::Handle<ObjectTemplate>(),
- NULL);
+ &no_extensions);
// Fail if no context could be created.
if (context.is_null()) return false;
@@ -921,8 +894,7 @@ void Debug::Unload() {
DestroyScriptCache();
// Clear debugger context global handle.
- isolate_->global_handles()->Destroy(
- reinterpret_cast<Object**>(debug_context_.location()));
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_context_.location()));
debug_context_ = Handle<Context>();
}
@@ -1453,9 +1425,6 @@ void Debug::PrepareStep(StepAction step_action,
bool is_call_target = false;
Address target = it.rinfo()->target_address();
Code* code = Code::GetCodeFromTargetAddress(target);
- if (code->is_call_stub() || code->is_keyed_call_stub()) {
- is_call_target = true;
- }
if (code->is_inline_cache_stub()) {
is_inline_cache_stub = true;
is_load_or_store = !is_call_target;
@@ -1559,6 +1528,20 @@ void Debug::PrepareStep(StepAction step_action,
ASSERT(expressions_count - 2 - call_function_arg_count >= 0);
Object* fun = frame->GetExpression(
expressions_count - 2 - call_function_arg_count);
+
+ // Flood the actual target of call/apply.
+ if (fun->IsJSFunction()) {
+ Isolate* isolate = JSFunction::cast(fun)->GetIsolate();
+ Code* apply = isolate->builtins()->builtin(Builtins::kFunctionApply);
+ Code* call = isolate->builtins()->builtin(Builtins::kFunctionCall);
+ while (fun->IsJSFunction()) {
+ Code* code = JSFunction::cast(fun)->shared()->code();
+ if (code != apply && code != call) break;
+ fun = frame->GetExpression(
+ expressions_count - 1 - call_function_arg_count);
+ }
+ }
+
if (fun->IsJSFunction()) {
Handle<JSFunction> js_function(JSFunction::cast(fun));
if (js_function->shared()->bound()) {
@@ -1660,11 +1643,6 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
// used by the call site.
if (code->is_inline_cache_stub()) {
switch (code->kind()) {
- case Code::CALL_IC:
- case Code::KEYED_CALL_IC:
- return isolate->stub_cache()->ComputeCallDebugBreak(
- code->arguments_count(), code->kind());
-
case Code::LOAD_IC:
return isolate->builtins()->LoadIC_DebugBreak();
@@ -1871,41 +1849,6 @@ void Debug::ClearStepNext() {
}
-// Helper function to compile full code for debugging. This code will
-// have debug break slots and deoptimization information. Deoptimization
-// information is required in case that an optimized version of this
-// function is still activated on the stack. It will also make sure that
-// the full code is compiled with the same flags as the previous version,
-// that is flags which can change the code generated. The current method
-// of mapping from already compiled full code without debug break slots
-// to full code with debug break slots depends on the generated code is
-// otherwise exactly the same.
-static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
- Handle<Code> current_code) {
- ASSERT(!current_code->has_debug_break_slots());
-
- CompilationInfoWithZone info(function);
- info.MarkCompilingForDebugging(current_code);
- ASSERT(!info.shared_info()->is_compiled());
- ASSERT(!info.isolate()->has_pending_exception());
-
- // Use compile lazy which will end up compiling the full code in the
- // configuration configured above.
- bool result = Compiler::CompileLazy(&info);
- ASSERT(result != info.isolate()->has_pending_exception());
- info.isolate()->clear_pending_exception();
-#if DEBUG
- if (result) {
- Handle<Code> new_code(function->shared()->code());
- ASSERT(new_code->has_debug_break_slots());
- ASSERT(current_code->is_compiled_optimizable() ==
- new_code->is_compiled_optimizable());
- }
-#endif
- return result;
-}
-
-
static void CollectActiveFunctionsFromThread(
Isolate* isolate,
ThreadLocalTop* top,
@@ -2056,14 +1999,13 @@ void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
- if (FLAG_concurrent_recompilation) {
+ if (isolate_->concurrent_recompilation_enabled()) {
isolate_->optimizing_compiler_thread()->Flush();
}
Deoptimizer::DeoptimizeAll(isolate_);
- Handle<Code> lazy_compile =
- Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
+ Handle<Code> lazy_compile = isolate_->builtins()->CompileUnoptimized();
// There will be at least one break point when we are done.
has_break_points_ = true;
@@ -2115,9 +2057,9 @@ void Debug::PrepareForBreakPoints() {
function->set_code(*lazy_compile);
function->shared()->set_code(*lazy_compile);
} else if (kind == Code::BUILTIN &&
- (function->IsInRecompileQueue() ||
- function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForConcurrentRecompilation())) {
+ (function->IsInOptimizationQueue() ||
+ function->IsMarkedForOptimization() ||
+ function->IsMarkedForConcurrentOptimization())) {
// Abort in-flight compilation.
Code* shared_code = function->shared()->code();
if (shared_code->kind() == Code::FUNCTION &&
@@ -2162,19 +2104,13 @@ void Debug::PrepareForBreakPoints() {
if (!shared->code()->has_debug_break_slots()) {
// Try to compile the full code with debug break slots. If it
// fails just keep the current code.
- Handle<Code> current_code(function->shared()->code());
- shared->set_code(*lazy_compile);
bool prev_force_debugger_active =
isolate_->debugger()->force_debugger_active();
isolate_->debugger()->set_force_debugger_active(true);
- ASSERT(current_code->kind() == Code::FUNCTION);
- CompileFullCodeForDebugging(function, current_code);
+ Handle<Code> code = Compiler::GetCodeForDebugging(function);
+ function->ReplaceCode(*code);
isolate_->debugger()->set_force_debugger_active(
prev_force_debugger_active);
- if (!shared->is_compiled()) {
- shared->set_code(*current_code);
- continue;
- }
}
// Keep function code in sync with shared function info.
@@ -2287,11 +2223,10 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// will compile all inner functions that cannot be compiled without a
// context, because Compiler::BuildFunctionInfo checks whether the
// debugger is active.
- if (target_function.is_null()) {
- SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
- } else {
- JSFunction::CompileLazy(target_function, KEEP_EXCEPTION);
- }
+ Handle<Code> result = target_function.is_null()
+ ? Compiler::GetUnoptimizedCode(target)
+ : Compiler::GetUnoptimizedCode(target_function);
+ if (result.is_null()) return isolate_->heap()->undefined_value();
}
} // End while loop.
@@ -2315,7 +2250,7 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
// Ensure function is compiled. Return false if this failed.
if (!function.is_null() &&
- !JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION)) {
+ !Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
return false;
}
@@ -2601,6 +2536,21 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
}
+void Debug::RecordEvalCaller(Handle<Script> script) {
+ script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
+ // For eval scripts add information on the function from which eval was
+ // called.
+ StackTraceFrameIterator it(script->GetIsolate());
+ if (!it.done()) {
+ script->set_eval_from_shared(it.frame()->function()->shared());
+ Code* code = it.frame()->LookupCode();
+ int offset = static_cast<int>(
+ it.frame()->pc() - code->instruction_start());
+ script->set_eval_from_instructions_offset(Smi::FromInt(offset));
+ }
+}
+
+
void Debug::AfterGarbageCollection() {
// Generate events for collected scripts.
if (script_cache_ != NULL) {
@@ -3071,6 +3021,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
bool auto_continue) {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
HandleScope scope(isolate_);
if (!isolate_->debug()->Load()) return;
@@ -3131,12 +3082,12 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
{
v8::Local<v8::Object> api_exec_state =
v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state));
- v8::Local<v8::String> fun_name =
- v8::String::New("debugCommandProcessor");
+ v8::Local<v8::String> fun_name = v8::String::NewFromUtf8(
+ isolate, "debugCommandProcessor");
v8::Local<v8::Function> fun =
v8::Local<v8::Function>::Cast(api_exec_state->Get(fun_name));
- v8::Handle<v8::Boolean> running = v8::Boolean::New(auto_continue);
+ v8::Handle<v8::Boolean> running = v8::Boolean::New(isolate, auto_continue);
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { running };
cmd_processor = v8::Local<v8::Object>::Cast(
@@ -3179,11 +3130,12 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
v8::Local<v8::Function> fun;
v8::Local<v8::Value> request;
v8::TryCatch try_catch;
- fun_name = v8::String::New("processDebugRequest");
+ fun_name = v8::String::NewFromUtf8(isolate, "processDebugRequest");
fun = v8::Local<v8::Function>::Cast(cmd_processor->Get(fun_name));
- request = v8::String::New(command.text().start(),
- command.text().length());
+ request = v8::String::NewFromTwoByte(isolate, command.text().start(),
+ v8::String::kNormalString,
+ command.text().length());
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { request };
v8::Local<v8::Value> response_val = fun->Call(cmd_processor, kArgc, argv);
@@ -3195,7 +3147,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
if (!response_val->IsUndefined()) {
response = v8::Local<v8::String>::Cast(response_val);
} else {
- response = v8::String::New("");
+ response = v8::String::NewFromUtf8(isolate, "");
}
// Log the JSON request/response.
@@ -3205,7 +3157,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
}
// Get the running state.
- fun_name = v8::String::New("isRunning");
+ fun_name = v8::String::NewFromUtf8(isolate, "isRunning");
fun = v8::Local<v8::Function>::Cast(cmd_processor->Get(fun_name));
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { response };
@@ -3247,12 +3199,12 @@ void Debugger::SetEventListener(Handle<Object> callback,
// Clear the global handles for the event listener and the event listener data
// object.
if (!event_listener_.is_null()) {
- global_handles->Destroy(
+ GlobalHandles::Destroy(
reinterpret_cast<Object**>(event_listener_.location()));
event_listener_ = Handle<Object>();
}
if (!event_listener_data_.is_null()) {
- global_handles->Destroy(
+ GlobalHandles::Destroy(
reinterpret_cast<Object**>(event_listener_data_.location()));
event_listener_data_ = Handle<Object>();
}
@@ -3638,7 +3590,7 @@ v8::Handle<v8::Object> MessageImpl::GetEventData() const {
v8::Handle<v8::String> MessageImpl::GetJSON() const {
- v8::HandleScope scope(
+ v8::EscapableHandleScope scope(
reinterpret_cast<v8::Isolate*>(event_data_->GetIsolate()));
if (IsEvent()) {
@@ -3654,7 +3606,7 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const {
if (caught_exception || !json->IsString()) {
return v8::Handle<v8::String>();
}
- return scope.Close(v8::Utils::ToLocal(Handle<String>::cast(json)));
+ return scope.Escape(v8::Utils::ToLocal(Handle<String>::cast(json)));
} else {
return v8::Utils::ToLocal(response_json_);
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 8e71ea6705..564f9e8854 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -38,7 +38,6 @@
#include "frames-inl.h"
#include "hashmap.h"
#include "platform.h"
-#include "platform/socket.h"
#include "string-stream.h"
#include "v8threads.h"
@@ -201,9 +200,8 @@ class ScriptCache : private HashMap {
void Clear();
// Weak handle callback for scripts in the cache.
- static void HandleWeakScript(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data);
+ static void HandleWeakScript(
+ const v8::WeakCallbackData<v8::Value, void>& data);
Isolate* isolate_;
// List used during GC to temporarily store id's of collected scripts.
@@ -403,9 +401,8 @@ class Debug {
static const int kEstimatedNofBreakPointsInFunction = 16;
// Passed to MakeWeak.
- static void HandleWeakDebugInfo(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data);
+ static void HandleWeakDebugInfo(
+ const v8::WeakCallbackData<v8::Value, void>& data);
friend class Debugger;
friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc
@@ -426,6 +423,9 @@ class Debug {
void AddScriptToScriptCache(Handle<Script> script);
Handle<FixedArray> GetLoadedScripts();
+ // Record function from which eval was called.
+ static void RecordEvalCaller(Handle<Script> script);
+
// Garbage collection notifications.
void AfterGarbageCollection();
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index c7311b3cc9..68b10d9612 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -181,7 +181,8 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
// Always use the actual stack slots when calculating the fp to sp
// delta adding two for the function and context.
unsigned stack_slots = code->stack_slots();
- unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize);
+ unsigned fp_to_sp_delta = (stack_slots * kPointerSize) +
+ StandardFrameConstants::kFixedFrameSizeFromFp;
Deoptimizer* deoptimizer = new Deoptimizer(isolate,
function,
@@ -344,9 +345,11 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
if (FLAG_trace_deopt) {
- PrintF("[deoptimizer unlinked: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimizer unlinked: ");
+ function->PrintName(scope.file());
+ PrintF(scope.file(),
+ " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
}
}
};
@@ -409,7 +412,8 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
if (FLAG_trace_deopt) {
- PrintF("[deoptimize all code in all contexts]\n");
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
}
DisallowHeapAllocation no_allocation;
// For all contexts, mark all code, then deoptimize.
@@ -425,7 +429,8 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
if (FLAG_trace_deopt) {
- PrintF("[deoptimize marked code in all contexts]\n");
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimize marked code in all contexts]\n");
}
DisallowHeapAllocation no_allocation;
// For all contexts, deoptimize code already marked.
@@ -440,7 +445,8 @@ void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
if (FLAG_trace_deopt) {
- PrintF("[deoptimize global object @ 0x%08" V8PRIxPTR "]\n",
+ CodeTracer::Scope scope(object->GetHeap()->isolate()->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimize global object @ 0x%08" V8PRIxPTR "]\n",
reinterpret_cast<intptr_t>(object));
}
if (object->IsJSGlobalProxy()) {
@@ -541,7 +547,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
materialized_objects_(NULL),
materialization_value_index_(0),
materialization_object_index_(0),
- trace_(false) {
+ trace_scope_(NULL) {
// For COMPILED_STUBs called from builtins, the function pointer is a SMI
// indicating an internal frame.
if (function->IsSmi()) {
@@ -571,7 +577,8 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
StackFrame::Type frame_type = function == NULL
? StackFrame::STUB
: StackFrame::JAVA_SCRIPT;
- trace_ = TraceEnabledFor(type, frame_type);
+ trace_scope_ = TraceEnabledFor(type, frame_type) ?
+ new CodeTracer::Scope(isolate->GetCodeTracer()) : NULL;
#ifdef DEBUG
CHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
@@ -604,9 +611,10 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
void Deoptimizer::PrintFunctionName() {
if (function_->IsJSFunction()) {
- function_->PrintName();
+ function_->PrintName(trace_scope_->file());
} else {
- PrintF("%s", Code::Kind2String(compiled_code_->kind()));
+ PrintF(trace_scope_->file(),
+ "%s", Code::Kind2String(compiled_code_->kind()));
}
}
@@ -614,6 +622,7 @@ void Deoptimizer::PrintFunctionName() {
Deoptimizer::~Deoptimizer() {
ASSERT(input_ == NULL && output_ == NULL);
ASSERT(disallow_heap_allocation_ == NULL);
+ delete trace_scope_;
}
@@ -681,13 +690,13 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
return data->PcAndState(i)->value();
}
}
- PrintF("[couldn't find pc offset for node=%d]\n", id.ToInt());
- PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
+ PrintF(stderr, "[couldn't find pc offset for node=%d]\n", id.ToInt());
+ PrintF(stderr, "[method: %s]\n", shared->DebugName()->ToCString().get());
// Print the source code if available.
HeapStringAllocator string_allocator;
StringStream stream(&string_allocator);
shared->SourceCodePrint(&stream, -1);
- PrintF("[source:\n%s\n]", *stream.ToCString());
+ PrintF(stderr, "[source:\n%s\n]", stream.ToCString().get());
FATAL("unable to find pc offset during deoptimization");
return -1;
@@ -722,22 +731,29 @@ void Deoptimizer::DoComputeOutputFrames() {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
ElapsedTimer timer;
- if (trace_) {
+
+ // Determine basic deoptimization information. The optimized frame is
+ // described by the input data.
+ DeoptimizationInputData* input_data =
+ DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
+
+ if (trace_scope_ != NULL) {
timer.Start();
- PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
+ PrintF(trace_scope_->file(),
+ "[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
MessageFor(bailout_type_),
reinterpret_cast<intptr_t>(function_));
PrintFunctionName();
- PrintF(" @%d, FP to SP delta: %d]\n", bailout_id_, fp_to_sp_delta_);
+ PrintF(trace_scope_->file(),
+ " (opt #%d) @%d, FP to SP delta: %d]\n",
+ input_data->OptimizationId()->value(),
+ bailout_id_,
+ fp_to_sp_delta_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
- compiled_code_->PrintDeoptLocation(bailout_id_);
+ compiled_code_->PrintDeoptLocation(trace_scope_->file(), bailout_id_);
}
}
- // Determine basic deoptimization information. The optimized frame is
- // described by the input data.
- DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
@@ -760,6 +776,11 @@ void Deoptimizer::DoComputeOutputFrames() {
}
output_count_ = count;
+ Register fp_reg = JavaScriptFrame::fp_register();
+ stack_fp_ = reinterpret_cast<Address>(
+ input_->GetRegister(fp_reg.code()) +
+ has_alignment_padding_ * kPointerSize);
+
// Translate each output frame.
for (int i = 0; i < count; ++i) {
// Read the ast node id, function, and frame height for this output frame.
@@ -803,15 +824,17 @@ void Deoptimizer::DoComputeOutputFrames() {
}
// Print some helpful diagnostic information.
- if (trace_) {
+ if (trace_scope_ != NULL) {
double ms = timer.Elapsed().InMillisecondsF();
int index = output_count_ - 1; // Index of the topmost frame.
JSFunction* function = output_[index]->GetFunction();
- PrintF("[deoptimizing (%s): end 0x%08" V8PRIxPTR " ",
+ PrintF(trace_scope_->file(),
+ "[deoptimizing (%s): end 0x%08" V8PRIxPTR " ",
MessageFor(bailout_type_),
reinterpret_cast<intptr_t>(function));
PrintFunctionName();
- PrintF(" @%d => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
+ PrintF(trace_scope_->file(),
+ " @%d => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
" took %0.3f ms]\n",
bailout_id_,
node_id.ToInt(),
@@ -839,10 +862,11 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(), " translating ");
+ function->PrintName(trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ " => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
@@ -875,7 +899,8 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// If the optimized frame had alignment padding, adjust the frame pointer
// to point to the new position of the old frame pointer after padding
// is removed. Subtract 2 * kPointerSize for the context and function slots.
- top_address = input_->GetRegister(fp_reg.code()) - (2 * kPointerSize) -
+ top_address = input_->GetRegister(fp_reg.code()) -
+ StandardFrameConstants::kFixedFrameSizeFromFp -
height_in_bytes + has_alignment_padding_ * kPointerSize;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
@@ -909,8 +934,9 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetCallerPc(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_offset, output_offset, value);
}
@@ -932,14 +958,39 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
has_alignment_padding_ * kPointerSize) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
fp_value, output_offset, value);
}
ASSERT(!is_bottommost || !has_alignment_padding_ ||
(fp_value & kPointerSize) != 0);
+ if (FLAG_enable_ool_constant_pool) {
+ // For the bottommost output frame the constant pool pointer can be gotten
+ // from the input frame. For subsequent output frames, it can be gotten from
+ // the function's code.
+ Register constant_pool_reg =
+ JavaScriptFrame::constant_pool_pointer_register();
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = reinterpret_cast<intptr_t>(
+ function->shared()->code()->constant_pool());
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetConstantPool(value);
+ if (is_topmost) output_frame->SetRegister(constant_pool_reg.code(), value);
+ if (trace_scope_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; constant_pool\n",
+ top_address + output_offset, output_offset, value);
+ }
+ }
+
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
// so long as we don't inline functions that need local contexts.
@@ -954,8 +1005,9 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR "; context\n",
top_address + output_offset, output_offset, value);
}
@@ -968,8 +1020,9 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR "; function\n",
top_address + output_offset, output_offset, value);
}
@@ -1017,8 +1070,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " translating arguments adaptor => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
@@ -1052,8 +1106,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetCallerPc(output_offset, callers_pc);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
@@ -1064,19 +1119,34 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
fp_value, output_offset, value);
}
+ if (FLAG_enable_ool_constant_pool) {
+ // A marker value is used in place of the constant pool.
+ output_offset -= kPointerSize;
+ intptr_t constant_pool = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ output_frame->SetFrameSlot(output_offset, constant_pool);
+ if (trace_scope_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; constant_pool (adaptor sentinel)\n",
+ top_address + output_offset, output_offset, constant_pool);
+ }
+ }
+
// A marker value is used in place of the context.
output_offset -= kPointerSize;
intptr_t context = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
output_frame->SetFrameSlot(output_offset, context);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; context (adaptor sentinel)\n",
top_address + output_offset, output_offset, context);
}
@@ -1085,8 +1155,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; function\n",
top_address + output_offset, output_offset, value);
}
@@ -1095,8 +1166,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
@@ -1120,8 +1192,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " translating construct stub => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = ConstructFrameConstants::kFrameSize;
@@ -1163,8 +1236,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetCallerPc(output_offset, callers_pc);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
@@ -1175,18 +1249,32 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
fp_value, output_offset, value);
}
+ if (FLAG_enable_ool_constant_pool) {
+ // The constant pool pointer can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetConstantPool();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_scope_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; constant pool\n",
+ top_address + output_offset, output_offset, value);
+ }
+ }
+
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; context\n",
top_address + output_offset, output_offset, value);
}
@@ -1195,8 +1283,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; function (construct sentinel)\n",
top_address + output_offset, output_offset, value);
}
@@ -1205,8 +1294,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(construct_stub);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; code object\n",
top_address + output_offset, output_offset, value);
}
@@ -1215,8 +1305,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
@@ -1227,8 +1318,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; constructor function\n",
top_address + output_offset, output_offset, value);
}
@@ -1239,8 +1331,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; allocated receiver\n",
top_address + output_offset, output_offset, value);
}
@@ -1264,18 +1357,19 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
unsigned height = 0;
unsigned height_in_bytes = height * kPointerSize;
const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (trace_) {
- PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
- }
-
- // We need 1 stack entry for the return address + 4 stack entries from
- // StackFrame::INTERNAL (FP, context, frame type, code object, see
- // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
- // entry for the implicit return value, see
- // StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = (kPCOnStackSize / kPointerSize) +
- (kFPOnStackSize / kPointerSize) + 3 +
- (is_setter_stub_frame ? 1 : 0);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " translating %s stub => height=%u\n", kind, height_in_bytes);
+ }
+
+ // We need 1 stack entry for the return address and enough entries for the
+ // StackFrame::INTERNAL (FP, context, frame type, code object and constant
+ // pool (if FLAG_enable_ool_constant_pool)- see MacroAssembler::EnterFrame).
+ // For a setter stub frame we need one additional entry for the implicit
+ // return value, see StoreStubCompiler::CompileStoreViaSetter.
+ unsigned fixed_frame_entries =
+ (StandardFrameConstants::kFixedFrameSize / kPointerSize) + 1 +
+ (is_setter_stub_frame ? 1 : 0);
unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
@@ -1300,8 +1394,9 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetCallerPc(output_offset, callers_pc);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
@@ -1312,18 +1407,32 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; caller's fp\n",
fp_value, output_offset, value);
}
+ if (FLAG_enable_ool_constant_pool) {
+ // The constant pool pointer can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetConstantPool();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_scope_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; constant pool\n",
+ top_address + output_offset, output_offset, value);
+ }
+ }
+
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; context\n",
top_address + output_offset, output_offset, value);
}
@@ -1332,8 +1441,9 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; function (%s sentinel)\n",
top_address + output_offset, output_offset, value, kind);
}
@@ -1346,8 +1456,9 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
Code* accessor_stub = isolate_->builtins()->builtin(name);
value = reinterpret_cast<intptr_t>(accessor_stub);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; code object\n",
top_address + output_offset, output_offset, value);
}
@@ -1385,6 +1496,8 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// +-------------------------+ +-------------------------+
// | | saved frame (FP) | | saved frame (FP) |
// | +=========================+<-fpreg +=========================+<-fpreg
+ // | |constant pool (if ool_cp)| |constant pool (if ool_cp)|
+ // | +-------------------------+ +-------------------------|
// | | JSFunction context | | JSFunction context |
// v +-------------------------+ +-------------------------|
// | COMPILED_STUB marker | | STUB_FAILURE marker |
@@ -1423,8 +1536,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
int input_frame_size = input_->GetFrameSize();
int output_frame_size = height_in_bytes + fixed_frame_size;
- if (trace_) {
- PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " translating %s => StubFailureTrampolineStub, height=%d\n",
CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
height_in_bytes);
}
@@ -1441,7 +1555,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// context and function slots.
Register fp_reg = StubFailureTrampolineFrame::fp_register();
intptr_t top_address = input_->GetRegister(fp_reg.code()) -
- (2 * kPointerSize) - height_in_bytes;
+ StandardFrameConstants::kFixedFrameSizeFromFp - height_in_bytes;
output_frame->SetTop(top_address);
// Read caller's PC (JSFunction continuation) from the input frame.
@@ -1449,8 +1563,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
intptr_t value = input_->GetFrameSlot(input_frame_offset);
output_frame->SetCallerPc(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_frame_offset, output_frame_offset, value);
}
@@ -1463,12 +1578,29 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
intptr_t frame_ptr = input_->GetRegister(fp_reg.code());
output_frame->SetRegister(fp_reg.code(), frame_ptr);
output_frame->SetFp(frame_ptr);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
top_address + output_frame_offset, output_frame_offset, value);
}
+ if (FLAG_enable_ool_constant_pool) {
+ // The constant pool pointer can be gotten from the input frame.
+ Register constant_pool_pointer_register =
+ StubFailureTrampolineFrame::constant_pool_pointer_register();
+ input_frame_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_frame_offset);
+ output_frame->SetRegister(constant_pool_pointer_register.code(), value);
+ output_frame_offset -= kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_scope_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; constant_pool_pointer\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
+ }
+
// The context can be gotten from the input frame.
Register context_reg = StubFailureTrampolineFrame::context_register();
input_frame_offset -= kPointerSize;
@@ -1476,8 +1608,10 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
output_frame->SetRegister(context_reg.code(), value);
output_frame_offset -= kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ ASSERT(reinterpret_cast<Object*>(value)->IsContext());
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; context\n",
top_address + output_frame_offset, output_frame_offset, value);
}
@@ -1487,8 +1621,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
value = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; function (stub failure sentinel)\n",
top_address + output_frame_offset, output_frame_offset, value);
}
@@ -1509,8 +1644,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
output_frame->SetFrameSlot(args_arguments_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args.arguments %s\n",
top_address + args_arguments_offset, args_arguments_offset, value,
arg_count_known ? "" : "(the hole)");
@@ -1520,8 +1656,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
int length_frame_offset = output_frame_offset;
value = arg_count_known ? caller_arg_count : the_hole;
output_frame->SetFrameSlot(length_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args.length %s\n",
top_address + length_frame_offset, length_frame_offset, value,
arg_count_known ? "" : "(the hole)");
@@ -1531,34 +1668,54 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
value = frame_ptr + StandardFrameConstants::kCallerSPOffset -
(output_frame_size - output_frame_offset) + kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args*\n",
top_address + output_frame_offset, output_frame_offset, value);
}
// Copy the register parameters to the failure frame.
+ int arguments_length_offset = -1;
for (int i = 0; i < descriptor->register_param_count_; ++i) {
output_frame_offset -= kPointerSize;
DoTranslateCommand(iterator, 0, output_frame_offset);
+
+ if (!arg_count_known && descriptor->IsParameterCountRegister(i)) {
+ arguments_length_offset = output_frame_offset;
+ }
}
+ ASSERT(0 == output_frame_offset);
+
if (!arg_count_known) {
- DoTranslateCommand(iterator, 0, length_frame_offset,
- TRANSLATED_VALUE_IS_NATIVE);
- caller_arg_count = output_frame->GetFrameSlot(length_frame_offset);
+ ASSERT(arguments_length_offset >= 0);
+ // We know it's a smi because 1) the code stub guarantees the stack
+ // parameter count is in smi range, and 2) the DoTranslateCommand in the
+ // parameter loop above translated that to a tagged value.
+ Smi* smi_caller_arg_count = reinterpret_cast<Smi*>(
+ output_frame->GetFrameSlot(arguments_length_offset));
+ caller_arg_count = smi_caller_arg_count->value();
+ output_frame->SetFrameSlot(length_frame_offset, caller_arg_count);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; args.length\n",
+ top_address + length_frame_offset, length_frame_offset,
+ caller_arg_count);
+ }
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
(caller_arg_count - 1) * kPointerSize;
output_frame->SetFrameSlot(args_arguments_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args.arguments\n",
- top_address + args_arguments_offset, args_arguments_offset, value);
+ top_address + args_arguments_offset, args_arguments_offset,
+ value);
}
}
- ASSERT(0 == output_frame_offset);
-
// Copy the double registers from the input into the output frame.
CopyDoubleRegisters(output_frame);
@@ -1619,17 +1776,27 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
Handle<Map>::cast(MaterializeNextValue()), Representation::Tagged());
switch (map->instance_type()) {
case HEAP_NUMBER_TYPE: {
- Handle<HeapNumber> object = isolate_->factory()->NewHeapNumber(0.0);
- materialized_objects_->Add(object);
- Handle<Object> number = MaterializeNextValue();
- object->set_value(number->Number());
+ // Reuse the HeapNumber value directly as it is already properly
+ // tagged and skip materializing the HeapNumber explicitly.
+ Handle<Object> object = MaterializeNextValue();
+ if (object_index < prev_materialized_count_) {
+ materialized_objects_->Add(Handle<Object>(
+ previously_materialized_objects_->get(object_index), isolate_));
+ } else {
+ materialized_objects_->Add(object);
+ }
materialization_value_index_ += kDoubleSize / kPointerSize - 1;
break;
}
case JS_OBJECT_TYPE: {
Handle<JSObject> object =
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
- materialized_objects_->Add(object);
+ if (object_index < prev_materialized_count_) {
+ materialized_objects_->Add(Handle<Object>(
+ previously_materialized_objects_->get(object_index), isolate_));
+ } else {
+ materialized_objects_->Add(object);
+ }
Handle<Object> properties = MaterializeNextValue();
Handle<Object> elements = MaterializeNextValue();
object->set_properties(FixedArray::cast(*properties));
@@ -1643,7 +1810,12 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
case JS_ARRAY_TYPE: {
Handle<JSArray> object =
isolate_->factory()->NewJSArray(0, map->elements_kind());
- materialized_objects_->Add(object);
+ if (object_index < prev_materialized_count_) {
+ materialized_objects_->Add(Handle<Object>(
+ previously_materialized_objects_->get(object_index), isolate_));
+ } else {
+ materialized_objects_->Add(object);
+ }
Handle<Object> properties = MaterializeNextValue();
Handle<Object> elements = MaterializeNextValue();
Handle<Object> length = MaterializeNextValue();
@@ -1653,7 +1825,8 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
break;
}
default:
- PrintF("[couldn't handle instance type %d]\n", map->instance_type());
+ PrintF(stderr,
+ "[couldn't handle instance type %d]\n", map->instance_type());
UNREACHABLE();
}
}
@@ -1675,6 +1848,12 @@ Handle<Object> Deoptimizer::MaterializeNextValue() {
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
ASSERT_NE(DEBUGGER, bailout_type_);
+ MaterializedObjectStore* materialized_store =
+ isolate_->materialized_object_store();
+ previously_materialized_objects_ = materialized_store->Get(stack_fp_);
+ prev_materialized_count_ = previously_materialized_objects_.is_null() ?
+ 0 : previously_materialized_objects_->length();
+
// Walk all JavaScript output frames with the given frame iterator.
for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
if (frame_index != 0) it->Advance();
@@ -1698,8 +1877,9 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
- if (trace_) {
- PrintF("Materialized a new heap number %p [%e] in slot %p\n",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new heap number %p [%e] in slot %p\n",
reinterpret_cast<void*>(*num),
d.value(),
d.destination());
@@ -1712,8 +1892,9 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
HeapNumberMaterializationDescriptor<int> d =
deferred_objects_double_values_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
- if (trace_) {
- PrintF("Materialized a new heap number %p [%e] for object at %d\n",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new heap number %p [%e] for object at %d\n",
reinterpret_cast<void*>(*num),
d.value(),
d.destination());
@@ -1741,25 +1922,31 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
// the materialized object into the frame slot.
Handle<Object> object = MaterializeNextHeapObject();
Memory::Object_at(descriptor.slot_address()) = *object;
- if (trace_) {
+ if (trace_scope_ != NULL) {
if (descriptor.is_arguments()) {
- PrintF("Materialized %sarguments object of length %d for %p: ",
+ PrintF(trace_scope_->file(),
+ "Materialized %sarguments object of length %d for %p: ",
ArgumentsObjectIsAdapted(object_index) ? "(adapted) " : "",
Handle<JSObject>::cast(object)->elements()->length(),
reinterpret_cast<void*>(descriptor.slot_address()));
} else {
- PrintF("Materialized captured object of size %d for %p: ",
+ PrintF(trace_scope_->file(),
+ "Materialized captured object of size %d for %p: ",
Handle<HeapObject>::cast(object)->Size(),
reinterpret_cast<void*>(descriptor.slot_address()));
}
- object->ShortPrint();
- PrintF("\n");
+ object->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(), "\n");
}
}
ASSERT(materialization_object_index_ == materialized_objects_->length());
ASSERT(materialization_value_index_ == materialized_values_->length());
}
+
+ if (prev_materialized_count_ > 0) {
+ materialized_store->Remove(stack_fp_);
+ }
}
@@ -1785,8 +1972,9 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
int index = (info->parameters_count() - 1) -
static_cast<int>(slot - parameters_top) / kPointerSize;
- if (trace_) {
- PrintF("Materializing a new heap number %p [%e] in slot %p"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materializing a new heap number %p [%e] in slot %p"
"for parameter slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
@@ -1801,8 +1989,9 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
int index = info->expression_count() - 1 -
static_cast<int>(slot - expressions_top) / kPointerSize;
- if (trace_) {
- PrintF("Materializing a new heap number %p [%e] in slot %p"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materializing a new heap number %p [%e] in slot %p"
"for expression slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
@@ -1817,10 +2006,8 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
#endif
-static const char* TraceValueType(bool is_smi, bool is_native = false) {
- if (is_native) {
- return "native";
- } else if (is_smi) {
+static const char* TraceValueType(bool is_smi) {
+ if (is_smi) {
return "smi";
}
@@ -1851,14 +2038,18 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::REGISTER: {
int input_reg = iterator->Next();
intptr_t input_value = input_->GetRegister(input_reg);
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("0x%08" V8PRIxPTR " ; %s ", input_value,
+ PrintF(trace_scope_->file(),
+ "0x%08" V8PRIxPTR " ; %s ", input_value,
converter.NameOfCPURegister(input_reg));
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
+ reinterpret_cast<Object*>(input_value)->ShortPrint(
+ trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ "\n");
}
AddObjectTaggedValue(input_value);
return;
@@ -1868,11 +2059,13 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
int input_reg = iterator->Next();
intptr_t value = input_->GetRegister(input_reg);
bool is_smi = Smi::IsValid(value);
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("%" V8PRIdPTR " ; %s (%s)\n", value,
+ PrintF(trace_scope_->file(),
+ "%" V8PRIdPTR " ; %s (%s)\n", value,
converter.NameOfCPURegister(input_reg),
TraceValueType(is_smi));
}
@@ -1891,11 +2084,13 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
int input_reg = iterator->Next();
uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("%" V8PRIdPTR " ; uint %s (%s)\n", value,
+ PrintF(trace_scope_->file(),
+ "%" V8PRIdPTR " ; uint %s (%s)\n", value,
converter.NameOfCPURegister(input_reg),
TraceValueType(is_smi));
}
@@ -1913,11 +2108,13 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::DOUBLE_REGISTER: {
int input_reg = iterator->Next();
double value = input_->GetDoubleRegister(input_reg);
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("%e ; %s\n", value,
+ PrintF(trace_scope_->file(),
+ "%e ; %s\n", value,
DoubleRegister::AllocationIndexToString(input_reg));
}
AddObjectDoubleValue(value);
@@ -1928,13 +2125,17 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("0x%08" V8PRIxPTR " ; [sp + %d] ", input_value, input_offset);
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
+ PrintF(trace_scope_->file(),
+ "0x%08" V8PRIxPTR " ; [sp + %d] ", input_value, input_offset);
+ reinterpret_cast<Object*>(input_value)->ShortPrint(
+ trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ "\n");
}
AddObjectTaggedValue(input_value);
return;
@@ -1945,11 +2146,13 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t value = input_->GetFrameSlot(input_offset);
bool is_smi = Smi::IsValid(value);
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("%" V8PRIdPTR " ; [sp + %d] (%s)\n",
+ PrintF(trace_scope_->file(),
+ "%" V8PRIdPTR " ; [sp + %d] (%s)\n",
value, input_offset, TraceValueType(is_smi));
}
if (is_smi) {
@@ -1969,11 +2172,13 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
uintptr_t value =
static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("%" V8PRIdPTR " ; [sp + %d] (uint %s)\n",
+ PrintF(trace_scope_->file(),
+ "%" V8PRIdPTR " ; [sp + %d] (uint %s)\n",
value, input_offset, TraceValueType(is_smi));
}
if (is_smi) {
@@ -1991,11 +2196,13 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("%e ; [sp + %d]\n", value, input_offset);
+ PrintF(trace_scope_->file(),
+ "%e ; [sp + %d]\n", value, input_offset);
}
AddObjectDoubleValue(value);
return;
@@ -2003,12 +2210,14 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::LITERAL: {
Object* literal = ComputeLiteral(iterator->Next());
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- literal->ShortPrint();
- PrintF(" ; literal\n");
+ literal->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ " ; literal\n");
}
intptr_t value = reinterpret_cast<intptr_t>(literal);
AddObjectTaggedValue(value);
@@ -2017,12 +2226,14 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::DUPLICATED_OBJECT: {
int object_index = iterator->Next();
- if (trace_) {
- PrintF(" nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; duplicate of object #%d\n", object_index);
+ isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ " ; duplicate of object #%d\n", object_index);
}
// Use the materialization marker value as a sentinel and fill in
// the object after the deoptimized frame is built.
@@ -2037,12 +2248,14 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::CAPTURED_OBJECT: {
int length = iterator->Next();
bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
- if (trace_) {
- PrintF(" nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; object (length = %d, is_args = %d)\n", length, is_args);
+ isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ " ; object (length = %d, is_args = %d)\n", length, is_args);
}
// Use the materialization marker value as a sentinel and fill in
// the object after the deoptimized frame is built.
@@ -2063,13 +2276,11 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
- int frame_index,
- unsigned output_offset,
- DeoptimizerTranslatedValueType value_type) {
+ int frame_index,
+ unsigned output_offset) {
disasm::NameConverter converter;
// A GC-safe temporary placeholder that we can put in the output frame.
const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
- bool is_native = value_type == TRANSLATED_VALUE_IS_NATIVE;
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
@@ -2088,15 +2299,17 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::REGISTER: {
int input_reg = iterator->Next();
intptr_t input_value = input_->GetRegister(input_reg);
- if (trace_) {
+ if (trace_scope_ != NULL) {
PrintF(
+ trace_scope_->file(),
" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
output_[frame_index]->GetTop() + output_offset,
output_offset,
input_value,
converter.NameOfCPURegister(input_reg));
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
+ reinterpret_cast<Object*>(input_value)->ShortPrint(
+ trace_scope_->file());
+ PrintF(trace_scope_->file(), "\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@@ -2105,27 +2318,24 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::INT32_REGISTER: {
int input_reg = iterator->Next();
intptr_t value = input_->GetRegister(input_reg);
- bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
- Smi::IsValid(value);
- if (trace_) {
+ bool is_smi = Smi::IsValid(value);
+ if (trace_scope_ != NULL) {
PrintF(
+ trace_scope_->file(),
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
converter.NameOfCPURegister(input_reg),
- TraceValueType(is_smi, is_native));
+ TraceValueType(is_smi));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
- output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
- ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<int32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -2136,28 +2346,25 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::UINT32_REGISTER: {
int input_reg = iterator->Next();
uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
- bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
- (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_) {
+ bool is_smi = value <= static_cast<uintptr_t>(Smi::kMaxValue);
+ if (trace_scope_ != NULL) {
PrintF(
+ trace_scope_->file(),
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
" ; uint %s (%s)\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
converter.NameOfCPURegister(input_reg),
- TraceValueType(is_smi, is_native));
+ TraceValueType(is_smi));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
- output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
- ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<uint32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -2168,8 +2375,9 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::DOUBLE_REGISTER: {
int input_reg = iterator->Next();
double value = input_->GetDoubleRegister(input_reg);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
@@ -2186,15 +2394,18 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
+ PrintF(trace_scope_->file(),
+ "[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
output_offset,
input_value,
input_offset);
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
+ reinterpret_cast<Object*>(input_value)->ShortPrint(
+ trace_scope_->file());
+ PrintF(trace_scope_->file(), "\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@@ -2204,27 +2415,25 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t value = input_->GetFrameSlot(input_offset);
- bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
- Smi::IsValid(value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": ",
+ bool is_smi = Smi::IsValid(value);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
+ PrintF(trace_scope_->file(),
+ "[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
output_offset,
value,
input_offset,
- TraceValueType(is_smi, is_native));
+ TraceValueType(is_smi));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
- output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
- ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<int32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -2237,27 +2446,25 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
uintptr_t value =
static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
- bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
- (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": ",
+ bool is_smi = value <= static_cast<uintptr_t>(Smi::kMaxValue);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
+ PrintF(trace_scope_->file(),
+ "[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
output_offset,
value,
input_offset,
- TraceValueType(is_smi, is_native));
+ TraceValueType(is_smi));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
- output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
- ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<uint32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -2269,8 +2476,9 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
@@ -2285,12 +2493,13 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::LITERAL: {
Object* literal = ComputeLiteral(iterator->Next());
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
- literal->ShortPrint();
- PrintF(" ; literal\n");
+ literal->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(), " ; literal\n");
}
intptr_t value = reinterpret_cast<intptr_t>(literal);
output_[frame_index]->SetFrameSlot(output_offset, value);
@@ -2299,12 +2508,14 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::DUPLICATED_OBJECT: {
int object_index = iterator->Next();
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
- isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; duplicate of object #%d\n", object_index);
+ isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ " ; duplicate of object #%d\n", object_index);
}
// Use the materialization marker value as a sentinel and fill in
// the object after the deoptimized frame is built.
@@ -2320,12 +2531,14 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::CAPTURED_OBJECT: {
int length = iterator->Next();
bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
- isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; object (length = %d, is_args = %d)\n", length, is_args);
+ isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ " ; object (length = %d, is_args = %d)\n", length, is_args);
}
// Use the materialization marker value as a sentinel and fill in
// the object after the deoptimized frame is built.
@@ -2348,9 +2561,10 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned fixed_size = ComputeFixedSize(function_);
- // The fp-to-sp delta already takes the context and the function
- // into account so we have to avoid double counting them (-2).
- unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
+ // The fp-to-sp delta already takes the context, constant pool pointer and the
+ // function into account so we have to avoid double counting them.
+ unsigned result = fixed_size + fp_to_sp_delta_ -
+ StandardFrameConstants::kFixedFrameSizeFromFp;
#ifdef DEBUG
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
unsigned stack_slots = compiled_code_->stack_slots();
@@ -2473,9 +2687,13 @@ FrameDescription::FrameDescription(uint32_t frame_size,
top_(kZapUint32),
pc_(kZapUint32),
fp_(kZapUint32),
- context_(kZapUint32) {
+ context_(kZapUint32),
+ constant_pool_(kZapUint32) {
// Zap all the registers.
for (int r = 0; r < Register::kNumRegisters; r++) {
+ // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
+ // isn't used before the next safepoint, the GC will try to scan it as a
+ // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
SetRegister(r, kZapUint32);
}
@@ -2760,12 +2978,11 @@ const char* Translation::StringFor(Opcode opcode) {
// We can't intermix stack decoding and allocations because
// deoptimization infrastracture is not GC safe.
// Thus we build a temporary structure in malloced space.
-SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame) {
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
-
+SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument(
+ Translation::Opcode opcode,
+ TranslationIterator* iterator,
+ DeoptimizationInputData* data,
+ JavaScriptFrame* frame) {
switch (opcode) {
case Translation::BEGIN:
case Translation::JS_FRAME:
@@ -2776,12 +2993,18 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
// Peeled off before getting here.
break;
- case Translation::DUPLICATED_OBJECT:
+ case Translation::DUPLICATED_OBJECT: {
+ return SlotRef::NewDuplicateObject(iterator->Next());
+ }
+
case Translation::ARGUMENTS_OBJECT:
- case Translation::CAPTURED_OBJECT:
// This can be only emitted for local slots not for argument slots.
break;
+ case Translation::CAPTURED_OBJECT: {
+ return SlotRef::NewDeferredObject(iterator->Next());
+ }
+
case Translation::REGISTER:
case Translation::INT32_REGISTER:
case Translation::UINT32_REGISTER:
@@ -2831,28 +3054,12 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
}
-void SlotRef::ComputeSlotsForArguments(Vector<SlotRef>* args_slots,
- TranslationIterator* it,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame) {
- // Process the translation commands for the arguments.
-
- // Skip the translation command for the receiver.
- it->Skip(Translation::NumberOfOperandsFor(
- static_cast<Translation::Opcode>(it->Next())));
-
- // Compute slots for arguments.
- for (int i = 0; i < args_slots->length(); ++i) {
- (*args_slots)[i] = ComputeSlotForNextArgument(it, data, frame);
- }
-}
-
-
-Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
- JavaScriptFrame* frame,
- int inlined_jsframe_index,
- int formal_parameter_count) {
+SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
+ int inlined_jsframe_index,
+ int formal_parameter_count)
+ : current_slot_(0), args_length_(-1), first_slot_index_(-1) {
DisallowHeapAllocation no_gc;
+
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
@@ -2861,12 +3068,18 @@ Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
it.Next(); // Drop frame count.
+
+ stack_frame_id_ = frame->fp();
+
int jsframe_count = it.Next();
USE(jsframe_count);
ASSERT(jsframe_count > inlined_jsframe_index);
int jsframes_to_skip = inlined_jsframe_index;
- while (true) {
+ int number_of_slots = -1; // Number of slots inside our frame (yet unknown)
+ bool should_deopt = false;
+ while (number_of_slots != 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
+ bool processed = false;
if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
if (jsframes_to_skip == 0) {
ASSERT(Translation::NumberOfOperandsFor(opcode) == 2);
@@ -2874,36 +3087,339 @@ Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
it.Skip(1); // literal id
int height = it.Next();
+ // Skip the translation command for the receiver.
+ it.Skip(Translation::NumberOfOperandsFor(
+ static_cast<Translation::Opcode>(it.Next())));
+
// We reached the arguments adaptor frame corresponding to the
// inlined function in question. Number of arguments is height - 1.
- Vector<SlotRef> args_slots =
- Vector<SlotRef>::New(height - 1); // Minus receiver.
- ComputeSlotsForArguments(&args_slots, &it, data, frame);
- return args_slots;
+ first_slot_index_ = slot_refs_.length();
+ args_length_ = height - 1;
+ number_of_slots = height - 1;
+ processed = true;
}
} else if (opcode == Translation::JS_FRAME) {
if (jsframes_to_skip == 0) {
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
+ // Skip the translation command for the receiver.
+ it.Skip(Translation::NumberOfOperandsFor(
+ static_cast<Translation::Opcode>(it.Next())));
+
// We reached the frame corresponding to the inlined function
// in question. Process the translation commands for the
// arguments. Number of arguments is equal to the number of
// format parameter count.
- Vector<SlotRef> args_slots =
- Vector<SlotRef>::New(formal_parameter_count);
- ComputeSlotsForArguments(&args_slots, &it, data, frame);
- return args_slots;
+ first_slot_index_ = slot_refs_.length();
+ args_length_ = formal_parameter_count;
+ number_of_slots = formal_parameter_count;
+ processed = true;
}
jsframes_to_skip--;
+ } else if (opcode != Translation::BEGIN &&
+ opcode != Translation::CONSTRUCT_STUB_FRAME &&
+ opcode != Translation::GETTER_STUB_FRAME &&
+ opcode != Translation::SETTER_STUB_FRAME &&
+ opcode != Translation::COMPILED_STUB_FRAME) {
+ slot_refs_.Add(ComputeSlotForNextArgument(opcode, &it, data, frame));
+
+ if (first_slot_index_ >= 0) {
+ // We have found the beginning of our frame -> make sure we count
+ // the nested slots of captured objects
+ number_of_slots--;
+ SlotRef& slot = slot_refs_.last();
+ if (slot.Representation() == SlotRef::DEFERRED_OBJECT) {
+ number_of_slots += slot.DeferredObjectLength();
+ }
+ if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
+ slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
+ should_deopt = true;
+ }
+ }
+
+ processed = true;
}
+ if (!processed) {
+ // Skip over operands to advance to the next opcode.
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ }
+ }
+ if (should_deopt) {
+ List<JSFunction*> functions(2);
+ frame->GetFunctions(&functions);
+ Deoptimizer::DeoptimizeFunction(functions[0]);
+ }
+}
+
+
+Handle<Object> SlotRef::GetValue(Isolate* isolate) {
+ switch (representation_) {
+ case TAGGED:
+ return Handle<Object>(Memory::Object_at(addr_), isolate);
+
+ case INT32: {
+ int value = Memory::int32_at(addr_);
+ if (Smi::IsValid(value)) {
+ return Handle<Object>(Smi::FromInt(value), isolate);
+ } else {
+ return isolate->factory()->NewNumberFromInt(value);
+ }
+ }
+
+ case UINT32: {
+ uint32_t value = Memory::uint32_at(addr_);
+ if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
+ return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
+ } else {
+ return isolate->factory()->NewNumber(static_cast<double>(value));
+ }
+ }
+
+ case DOUBLE: {
+ double value = read_double_value(addr_);
+ return isolate->factory()->NewNumber(value);
+ }
+
+ case LITERAL:
+ return literal_;
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
+ default:
+ UNREACHABLE();
+ return Handle<Object>::null();
+ }
+}
+
+
+void SlotRefValueBuilder::Prepare(Isolate* isolate) {
+ MaterializedObjectStore* materialized_store =
+ isolate->materialized_object_store();
+ previously_materialized_objects_ = materialized_store->Get(stack_frame_id_);
+ prev_materialized_count_ = previously_materialized_objects_.is_null()
+ ? 0 : previously_materialized_objects_->length();
+
+ // Skip any materialized objects of the inlined "parent" frames.
+ // (Note that we still need to materialize them because they might be
+ // referred to as duplicated objects.)
+ while (current_slot_ < first_slot_index_) {
+ GetNext(isolate, 0);
+ }
+ ASSERT(current_slot_ == first_slot_index_);
+}
+
+
+Handle<Object> SlotRefValueBuilder::GetPreviouslyMaterialized(
+ Isolate* isolate, int length) {
+ int object_index = materialized_objects_.length();
+ Handle<Object> return_value = Handle<Object>(
+ previously_materialized_objects_->get(object_index), isolate);
+ materialized_objects_.Add(return_value);
+
+ // Now need to skip all nested objects (and possibly read them from
+ // the materialization store, too)
+ for (int i = 0; i < length; i++) {
+ SlotRef& slot = slot_refs_[current_slot_];
+ current_slot_++;
+
+ // For nested deferred objects, we need to read its properties
+ if (slot.Representation() == SlotRef::DEFERRED_OBJECT) {
+ length += slot.DeferredObjectLength();
+ }
+
+ // For nested deferred and duplicate objects, we need to put them into
+ // our materialization array
+ if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
+ slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
+ int nested_object_index = materialized_objects_.length();
+ Handle<Object> nested_object = Handle<Object>(
+ previously_materialized_objects_->get(nested_object_index),
+ isolate);
+ materialized_objects_.Add(nested_object);
+ }
+ }
+
+ return return_value;
+}
+
+
+Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
+ SlotRef& slot = slot_refs_[current_slot_];
+ current_slot_++;
+ switch (slot.Representation()) {
+ case SlotRef::TAGGED:
+ case SlotRef::INT32:
+ case SlotRef::UINT32:
+ case SlotRef::DOUBLE:
+ case SlotRef::LITERAL: {
+ return slot.GetValue(isolate);
+ }
+ case SlotRef::DEFERRED_OBJECT: {
+ int length = slot.DeferredObjectLength();
+ ASSERT(slot_refs_[current_slot_].Representation() == SlotRef::LITERAL ||
+ slot_refs_[current_slot_].Representation() == SlotRef::TAGGED);
+
+ int object_index = materialized_objects_.length();
+ if (object_index < prev_materialized_count_) {
+ return GetPreviouslyMaterialized(isolate, length);
+ }
+
+ Handle<Object> map_object = slot_refs_[current_slot_].GetValue(isolate);
+ Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
+ Handle<Map>::cast(map_object), Representation::Tagged());
+ current_slot_++;
+ // TODO(jarin) this should be unified with the code in
+ // Deoptimizer::MaterializeNextHeapObject()
+ switch (map->instance_type()) {
+ case HEAP_NUMBER_TYPE: {
+ // Reuse the HeapNumber value directly as it is already properly
+ // tagged and skip materializing the HeapNumber explicitly.
+ Handle<Object> object = GetNext(isolate, lvl + 1);
+ materialized_objects_.Add(object);
+ return object;
+ }
+ case JS_OBJECT_TYPE: {
+ Handle<JSObject> object =
+ isolate->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
+ materialized_objects_.Add(object);
+ Handle<Object> properties = GetNext(isolate, lvl + 1);
+ Handle<Object> elements = GetNext(isolate, lvl + 1);
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ for (int i = 0; i < length - 3; ++i) {
+ Handle<Object> value = GetNext(isolate, lvl + 1);
+ object->FastPropertyAtPut(i, *value);
+ }
+ return object;
+ }
+ case JS_ARRAY_TYPE: {
+ Handle<JSArray> object =
+ isolate->factory()->NewJSArray(0, map->elements_kind());
+ materialized_objects_.Add(object);
+ Handle<Object> properties = GetNext(isolate, lvl + 1);
+ Handle<Object> elements = GetNext(isolate, lvl + 1);
+ Handle<Object> length = GetNext(isolate, lvl + 1);
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_length(*length);
+ return object;
+ }
+ default:
+ PrintF(stderr,
+ "[couldn't handle instance type %d]\n", map->instance_type());
+ UNREACHABLE();
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ case SlotRef::DUPLICATE_OBJECT: {
+ int object_index = slot.DuplicateObjectId();
+ Handle<Object> object = materialized_objects_[object_index];
+ materialized_objects_.Add(object);
+ return object;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
UNREACHABLE();
- return Vector<SlotRef>();
+ return Handle<Object>::null();
+}
+
+
+void SlotRefValueBuilder::Finish(Isolate* isolate) {
+ // We should have processed all slot
+ ASSERT(slot_refs_.length() == current_slot_);
+
+ if (materialized_objects_.length() > prev_materialized_count_) {
+ // We have materialized some new objects, so we have to store them
+ // to prevent duplicate materialization
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(
+ materialized_objects_.length());
+ for (int i = 0; i < materialized_objects_.length(); i++) {
+ array->set(i, *(materialized_objects_.at(i)));
+ }
+ isolate->materialized_object_store()->Set(stack_frame_id_, array);
+ }
+}
+
+
+Handle<FixedArray> MaterializedObjectStore::Get(Address fp) {
+ int index = StackIdToIndex(fp);
+ if (index == -1) {
+ return Handle<FixedArray>::null();
+ }
+ Handle<FixedArray> array = GetStackEntries();
+ ASSERT(array->length() > index);
+ return Handle<FixedArray>::cast(Handle<Object>(array->get(index),
+ isolate()));
+}
+
+
+void MaterializedObjectStore::Set(Address fp,
+ Handle<FixedArray> materialized_objects) {
+ int index = StackIdToIndex(fp);
+ if (index == -1) {
+ index = frame_fps_.length();
+ frame_fps_.Add(fp);
+ }
+
+ Handle<FixedArray> array = EnsureStackEntries(index + 1);
+ array->set(index, *materialized_objects);
+}
+
+
+void MaterializedObjectStore::Remove(Address fp) {
+ int index = StackIdToIndex(fp);
+ ASSERT(index >= 0);
+
+ frame_fps_.Remove(index);
+ Handle<FixedArray> array = GetStackEntries();
+ ASSERT(array->length() > index);
+ for (int i = index; i < frame_fps_.length(); i++) {
+ array->set(i, array->get(i + 1));
+ }
+ array->set(frame_fps_.length(), isolate()->heap()->undefined_value());
+}
+
+
+int MaterializedObjectStore::StackIdToIndex(Address fp) {
+ for (int i = 0; i < frame_fps_.length(); i++) {
+ if (frame_fps_[i] == fp) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+
+Handle<FixedArray> MaterializedObjectStore::GetStackEntries() {
+ return Handle<FixedArray>(isolate()->heap()->materialized_objects());
+}
+
+
+Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
+ Handle<FixedArray> array = GetStackEntries();
+ if (array->length() >= length) {
+ return array;
+ }
+
+ int new_length = length > 10 ? length : 10;
+ if (new_length < 2 * array->length()) {
+ new_length = 2 * array->length();
+ }
+
+ Handle<FixedArray> new_array =
+ isolate()->factory()->NewFixedArray(new_length, TENURED);
+ for (int i = 0; i < array->length(); i++) {
+ new_array->set(i, array->get(i));
+ }
+ for (int i = array->length(); i < length; i++) {
+ new_array->set(i, isolate()->heap()->undefined_value());
+ }
+ isolate()->heap()->public_set_materialized_objects(*new_array);
+ return new_array;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 8576ffc20d..806433c6f3 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -333,15 +333,9 @@ class Deoptimizer : public Malloced {
int object_index,
int field_index);
- enum DeoptimizerTranslatedValueType {
- TRANSLATED_VALUE_IS_NATIVE,
- TRANSLATED_VALUE_IS_TAGGED
- };
-
void DoTranslateCommand(TranslationIterator* iterator,
- int frame_index,
- unsigned output_offset,
- DeoptimizerTranslatedValueType value_type = TRANSLATED_VALUE_IS_TAGGED);
+ int frame_index,
+ unsigned output_offset);
unsigned ComputeInputFrameSize() const;
unsigned ComputeFixedSize(JSFunction* function) const;
@@ -441,6 +435,11 @@ class Deoptimizer : public Malloced {
List<ObjectMaterializationDescriptor> deferred_objects_;
List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_;
+ // Key for lookup of previously materialized objects
+ Address stack_fp_;
+ Handle<FixedArray> previously_materialized_objects_;
+ int prev_materialized_count_;
+
// Output frame information. Only used during heap object materialization.
List<Handle<JSFunction> > jsframe_functions_;
List<bool> jsframe_has_adapted_arguments_;
@@ -455,7 +454,7 @@ class Deoptimizer : public Malloced {
DisallowHeapAllocation* disallow_heap_allocation_;
#endif // DEBUG
- bool trace_;
+ CodeTracer::Scope* trace_scope_;
static const int table_entry_size_;
@@ -549,6 +548,11 @@ class FrameDescription {
intptr_t GetContext() const { return context_; }
void SetContext(intptr_t context) { context_ = context; }
+ intptr_t GetConstantPool() const { return constant_pool_; }
+ void SetConstantPool(intptr_t constant_pool) {
+ constant_pool_ = constant_pool;
+ }
+
Smi* GetState() const { return state_; }
void SetState(Smi* state) { state_ = state; }
@@ -611,6 +615,7 @@ class FrameDescription {
intptr_t pc_;
intptr_t fp_;
intptr_t context_;
+ intptr_t constant_pool_;
StackFrame::Type type_;
Smi* state_;
@@ -783,7 +788,13 @@ class SlotRef BASE_EMBEDDED {
INT32,
UINT32,
DOUBLE,
- LITERAL
+ LITERAL,
+ DEFERRED_OBJECT, // Object captured by the escape analysis.
+ // The number of nested objects can be obtained
+ // with the DeferredObjectLength() method
+ // (the SlotRefs of the nested objects follow
+ // this SlotRef in the depth-first order.)
+ DUPLICATE_OBJECT // Duplicated object of a deferred object.
};
SlotRef()
@@ -795,52 +806,66 @@ class SlotRef BASE_EMBEDDED {
SlotRef(Isolate* isolate, Object* literal)
: literal_(literal, isolate), representation_(LITERAL) { }
- Handle<Object> GetValue(Isolate* isolate) {
- switch (representation_) {
- case TAGGED:
- return Handle<Object>(Memory::Object_at(addr_), isolate);
-
- case INT32: {
- int value = Memory::int32_at(addr_);
- if (Smi::IsValid(value)) {
- return Handle<Object>(Smi::FromInt(value), isolate);
- } else {
- return isolate->factory()->NewNumberFromInt(value);
- }
- }
-
- case UINT32: {
- uint32_t value = Memory::uint32_at(addr_);
- if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
- } else {
- return isolate->factory()->NewNumber(static_cast<double>(value));
- }
- }
-
- case DOUBLE: {
- double value = read_double_value(addr_);
- return isolate->factory()->NewNumber(value);
- }
-
- case LITERAL:
- return literal_;
-
- default:
- UNREACHABLE();
- return Handle<Object>::null();
- }
+ static SlotRef NewDeferredObject(int length) {
+ SlotRef slot;
+ slot.representation_ = DEFERRED_OBJECT;
+ slot.deferred_object_length_ = length;
+ return slot;
}
- static Vector<SlotRef> ComputeSlotMappingForArguments(
- JavaScriptFrame* frame,
- int inlined_frame_index,
- int formal_parameter_count);
+ SlotRepresentation Representation() { return representation_; }
+
+ static SlotRef NewDuplicateObject(int id) {
+ SlotRef slot;
+ slot.representation_ = DUPLICATE_OBJECT;
+ slot.duplicate_object_id_ = id;
+ return slot;
+ }
+
+ int DeferredObjectLength() { return deferred_object_length_; }
+
+ int DuplicateObjectId() { return duplicate_object_id_; }
+
+ Handle<Object> GetValue(Isolate* isolate);
private:
Address addr_;
Handle<Object> literal_;
SlotRepresentation representation_;
+ int deferred_object_length_;
+ int duplicate_object_id_;
+};
+
+class SlotRefValueBuilder BASE_EMBEDDED {
+ public:
+ SlotRefValueBuilder(
+ JavaScriptFrame* frame,
+ int inlined_frame_index,
+ int formal_parameter_count);
+
+ void Prepare(Isolate* isolate);
+ Handle<Object> GetNext(Isolate* isolate, int level);
+ void Finish(Isolate* isolate);
+
+ int args_length() { return args_length_; }
+
+ private:
+ List<Handle<Object> > materialized_objects_;
+ Handle<FixedArray> previously_materialized_objects_;
+ int prev_materialized_count_;
+ Address stack_frame_id_;
+ List<SlotRef> slot_refs_;
+ int current_slot_;
+ int args_length_;
+ int first_slot_index_;
+
+ static SlotRef ComputeSlotForNextArgument(
+ Translation::Opcode opcode,
+ TranslationIterator* iterator,
+ DeoptimizationInputData* data,
+ JavaScriptFrame* frame);
+
+ Handle<Object> GetPreviouslyMaterialized(Isolate* isolate, int length);
static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
if (slot_index >= 0) {
@@ -852,15 +877,27 @@ class SlotRef BASE_EMBEDDED {
}
}
- static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame);
+ Handle<Object> GetDeferredObject(Isolate* isolate);
+};
- static void ComputeSlotsForArguments(
- Vector<SlotRef>* args_slots,
- TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame);
+class MaterializedObjectStore {
+ public:
+ explicit MaterializedObjectStore(Isolate* isolate) : isolate_(isolate) {
+ }
+
+ Handle<FixedArray> Get(Address fp);
+ void Set(Address fp, Handle<FixedArray> materialized_objects);
+ void Remove(Address fp);
+
+ private:
+ Isolate* isolate() { return isolate_; }
+ Handle<FixedArray> GetStackEntries();
+ Handle<FixedArray> EnsureStackEntries(int size);
+
+ int StackIdToIndex(Address fp);
+
+ Isolate* isolate_;
+ List<Address> frame_fps_;
};
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index d7898ddcd9..f02d43ad8a 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -224,10 +224,10 @@ static int DecodeIt(Isolate* isolate,
StringStream accumulator(&allocator);
relocinfo.target_object()->ShortPrint(&accumulator);
SmartArrayPointer<const char> obj_name = accumulator.ToCString();
- out.AddFormatted(" ;; object: %s", *obj_name);
+ out.AddFormatted(" ;; object: %s", obj_name.get());
} else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
const char* reference_name =
- ref_encoder.NameOfAddress(*relocinfo.target_reference_address());
+ ref_encoder.NameOfAddress(relocinfo.target_reference());
out.AddFormatted(" ;; external reference (%s)", reference_name);
} else if (RelocInfo::IsCodeTarget(rmode)) {
out.AddFormatted(" ;; code:");
@@ -237,7 +237,8 @@ static int DecodeIt(Isolate* isolate,
Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
Code::Kind kind = code->kind();
if (code->is_inline_cache_stub()) {
- if (rmode == RelocInfo::CODE_TARGET_CONTEXT) {
+ if (kind == Code::LOAD_IC &&
+ LoadIC::GetContextualMode(code->extra_ic_state()) == CONTEXTUAL) {
out.AddFormatted(" contextual,");
}
InlineCacheState ic_state = code->ic_state();
@@ -247,9 +248,6 @@ static int DecodeIt(Isolate* isolate,
Code::StubType type = code->type();
out.AddFormatted(", %s", Code::StubType2String(type));
}
- if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
- out.AddFormatted(", argc = %d", code->arguments_count());
- }
} else if (kind == Code::STUB || kind == Code::HANDLER) {
// Reverse lookup required as the minor key cannot be retrieved
// from the code object.
diff --git a/deps/v8/src/effects.h b/deps/v8/src/effects.h
index afb8f9e54b..5d980f54ed 100644
--- a/deps/v8/src/effects.h
+++ b/deps/v8/src/effects.h
@@ -59,24 +59,24 @@ struct Effect {
Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {}
// The unknown effect.
- static Effect Unknown(Isolate* isolate) {
- return Effect(Bounds::Unbounded(isolate), POSSIBLE);
+ static Effect Unknown(Zone* zone) {
+ return Effect(Bounds::Unbounded(zone), POSSIBLE);
}
- static Effect Forget(Isolate* isolate) {
- return Effect(Bounds::Unbounded(isolate), DEFINITE);
+ static Effect Forget(Zone* zone) {
+ return Effect(Bounds::Unbounded(zone), DEFINITE);
}
// Sequential composition, as in 'e1; e2'.
- static Effect Seq(Effect e1, Effect e2, Isolate* isolate) {
+ static Effect Seq(Effect e1, Effect e2, Zone* zone) {
if (e2.modality == DEFINITE) return e2;
- return Effect(Bounds::Either(e1.bounds, e2.bounds, isolate), e1.modality);
+ return Effect(Bounds::Either(e1.bounds, e2.bounds, zone), e1.modality);
}
// Alternative composition, as in 'cond ? e1 : e2'.
- static Effect Alt(Effect e1, Effect e2, Isolate* isolate) {
+ static Effect Alt(Effect e1, Effect e2, Zone* zone) {
return Effect(
- Bounds::Either(e1.bounds, e2.bounds, isolate),
+ Bounds::Either(e1.bounds, e2.bounds, zone),
e1.modality == POSSIBLE ? POSSIBLE : e2.modality);
}
};
@@ -106,20 +106,20 @@ class EffectsMixin: public Base {
Effect Lookup(Var var) {
Locator locator;
return this->Find(var, &locator)
- ? locator.value() : Effect::Unknown(Base::isolate());
+ ? locator.value() : Effect::Unknown(Base::zone());
}
Bounds LookupBounds(Var var) {
Effect effect = Lookup(var);
return effect.modality == Effect::DEFINITE
- ? effect.bounds : Bounds::Unbounded(Base::isolate());
+ ? effect.bounds : Bounds::Unbounded(Base::zone());
}
// Sequential composition.
void Seq(Var var, Effect effect) {
Locator locator;
if (!this->Insert(var, &locator)) {
- effect = Effect::Seq(locator.value(), effect, Base::isolate());
+ effect = Effect::Seq(locator.value(), effect, Base::zone());
}
locator.set_value(effect);
}
@@ -133,7 +133,7 @@ class EffectsMixin: public Base {
void Alt(Var var, Effect effect) {
Locator locator;
if (!this->Insert(var, &locator)) {
- effect = Effect::Alt(locator.value(), effect, Base::isolate());
+ effect = Effect::Alt(locator.value(), effect, Base::zone());
}
locator.set_value(effect);
}
@@ -148,7 +148,7 @@ class EffectsMixin: public Base {
// Invalidation.
void Forget() {
Overrider override = {
- Effect::Forget(Base::isolate()), Effects(Base::zone()) };
+ Effect::Forget(Base::zone()), Effects(Base::zone()) };
this->ForEach(&override);
Seq(override.effects);
}
@@ -206,7 +206,6 @@ class EffectsBase {
EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >;
Zone* zone() { return map_->allocator().zone(); }
- Isolate* isolate() { return zone()->isolate(); }
struct SplayTreeConfig {
typedef Var Key;
@@ -277,7 +276,6 @@ class NestedEffectsBase {
typedef typename EffectsBase<Var, kNoVar>::Locator Locator;
Zone* zone() { return node_->zone; }
- Isolate* isolate() { return zone()->isolate(); }
void push() { node_ = new(node_->zone) Node(node_->zone, node_); }
void pop() { node_ = node_->previous; }
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index 8129051a62..d2abb0442a 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -37,20 +37,29 @@ namespace internal {
int ElementsKindToShiftSize(ElementsKind elements_kind) {
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
return 0;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
return 1;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case FLOAT32_ELEMENTS:
return 2;
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FLOAT64_ELEMENTS:
return 3;
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
@@ -98,6 +107,14 @@ struct InitializeFastElementsKindSequence {
fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS;
fast_elements_kind_sequence[4] = FAST_ELEMENTS;
fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS;
+
+ // Verify that kFastElementsKindPackedToHoley is correct.
+ STATIC_ASSERT(FAST_SMI_ELEMENTS + kFastElementsKindPackedToHoley ==
+ FAST_HOLEY_SMI_ELEMENTS);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS + kFastElementsKindPackedToHoley ==
+ FAST_HOLEY_DOUBLE_ELEMENTS);
+ STATIC_ASSERT(FAST_ELEMENTS + kFastElementsKindPackedToHoley ==
+ FAST_HOLEY_ELEMENTS);
}
};
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index 69b4057818..5a3f00dcce 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -53,23 +53,36 @@ enum ElementsKind {
DICTIONARY_ELEMENTS,
NON_STRICT_ARGUMENTS_ELEMENTS,
// The "fast" kind for external arrays
- EXTERNAL_BYTE_ELEMENTS,
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
- EXTERNAL_SHORT_ELEMENTS,
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
- EXTERNAL_INT_ELEMENTS,
- EXTERNAL_UNSIGNED_INT_ELEMENTS,
- EXTERNAL_FLOAT_ELEMENTS,
- EXTERNAL_DOUBLE_ELEMENTS,
- EXTERNAL_PIXEL_ELEMENTS,
+ EXTERNAL_INT8_ELEMENTS,
+ EXTERNAL_UINT8_ELEMENTS,
+ EXTERNAL_INT16_ELEMENTS,
+ EXTERNAL_UINT16_ELEMENTS,
+ EXTERNAL_INT32_ELEMENTS,
+ EXTERNAL_UINT32_ELEMENTS,
+ EXTERNAL_FLOAT32_ELEMENTS,
+ EXTERNAL_FLOAT64_ELEMENTS,
+ EXTERNAL_UINT8_CLAMPED_ELEMENTS,
+
+ // Fixed typed arrays
+ UINT8_ELEMENTS,
+ INT8_ELEMENTS,
+ UINT16_ELEMENTS,
+ INT16_ELEMENTS,
+ UINT32_ELEMENTS,
+ INT32_ELEMENTS,
+ FLOAT32_ELEMENTS,
+ FLOAT64_ELEMENTS,
+ UINT8_CLAMPED_ELEMENTS,
// Derived constants from ElementsKind
FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
- LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
+ LAST_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS,
- FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
- LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
+ FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_INT8_ELEMENTS,
+ LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_UINT8_CLAMPED_ELEMENTS,
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS,
+ LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS
};
@@ -77,6 +90,10 @@ const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1;
+// The number to add to a packed elements kind to reach a holey elements kind
+const int kFastElementsKindPackedToHoley =
+ FAST_HOLEY_SMI_ELEMENTS - FAST_SMI_ELEMENTS;
+
int ElementsKindToShiftSize(ElementsKind elements_kind);
const char* ElementsKindToString(ElementsKind kind);
void PrintElementsKind(FILE* out, ElementsKind kind);
@@ -99,6 +116,12 @@ inline bool IsExternalArrayElementsKind(ElementsKind kind) {
}
+inline bool IsFixedTypedArrayElementsKind(ElementsKind kind) {
+ return kind >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
+ kind <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND;
+}
+
+
inline bool IsFastElementsKind(ElementsKind kind) {
ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
@@ -112,14 +135,20 @@ inline bool IsFastDoubleElementsKind(ElementsKind kind) {
inline bool IsExternalFloatOrDoubleElementsKind(ElementsKind kind) {
- return kind == EXTERNAL_DOUBLE_ELEMENTS ||
- kind == EXTERNAL_FLOAT_ELEMENTS;
+ return kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ kind == EXTERNAL_FLOAT32_ELEMENTS;
+}
+
+
+inline bool IsFixedFloatElementsKind(ElementsKind kind) {
+ return kind == FLOAT32_ELEMENTS || kind == FLOAT64_ELEMENTS;
}
inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
return IsFastDoubleElementsKind(kind) ||
- IsExternalFloatOrDoubleElementsKind(kind);
+ IsExternalFloatOrDoubleElementsKind(kind) ||
+ IsFixedFloatElementsKind(kind);
}
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 0b745c4505..2e4667d4a0 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -48,16 +48,25 @@
// - FastDoubleElementsAccessor
// - FastPackedDoubleElementsAccessor
// - FastHoleyDoubleElementsAccessor
-// - ExternalElementsAccessor (abstract)
-// - ExternalByteElementsAccessor
-// - ExternalUnsignedByteElementsAccessor
-// - ExternalShortElementsAccessor
-// - ExternalUnsignedShortElementsAccessor
-// - ExternalIntElementsAccessor
-// - ExternalUnsignedIntElementsAccessor
-// - ExternalFloatElementsAccessor
-// - ExternalDoubleElementsAccessor
-// - PixelElementsAccessor
+// - TypedElementsAccessor: template, with instantiations:
+// - ExternalInt8ElementsAccessor
+// - ExternalUint8ElementsAccessor
+// - ExternalInt16ElementsAccessor
+// - ExternalUint16ElementsAccessor
+// - ExternalInt32ElementsAccessor
+// - ExternalUint32ElementsAccessor
+// - ExternalFloat32ElementsAccessor
+// - ExternalFloat64ElementsAccessor
+// - ExternalUint8ClampedElementsAccessor
+// - FixedUint8ElementsAccessor
+// - FixedInt8ElementsAccessor
+// - FixedUint16ElementsAccessor
+// - FixedInt16ElementsAccessor
+// - FixedUint32ElementsAccessor
+// - FixedInt32ElementsAccessor
+// - FixedFloat32ElementsAccessor
+// - FixedFloat64ElementsAccessor
+// - FixedUint8ClampedElementsAccessor
// - DictionaryElementsAccessor
// - NonStrictArgumentsElementsAccessor
@@ -88,23 +97,35 @@ static const int kPackedSizeNotKnown = -1;
SeededNumberDictionary) \
V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \
FixedArray) \
- V(ExternalByteElementsAccessor, EXTERNAL_BYTE_ELEMENTS, \
- ExternalByteArray) \
- V(ExternalUnsignedByteElementsAccessor, \
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS, ExternalUnsignedByteArray) \
- V(ExternalShortElementsAccessor, EXTERNAL_SHORT_ELEMENTS, \
- ExternalShortArray) \
- V(ExternalUnsignedShortElementsAccessor, \
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS, ExternalUnsignedShortArray) \
- V(ExternalIntElementsAccessor, EXTERNAL_INT_ELEMENTS, \
- ExternalIntArray) \
- V(ExternalUnsignedIntElementsAccessor, \
- EXTERNAL_UNSIGNED_INT_ELEMENTS, ExternalUnsignedIntArray) \
- V(ExternalFloatElementsAccessor, \
- EXTERNAL_FLOAT_ELEMENTS, ExternalFloatArray) \
- V(ExternalDoubleElementsAccessor, \
- EXTERNAL_DOUBLE_ELEMENTS, ExternalDoubleArray) \
- V(PixelElementsAccessor, EXTERNAL_PIXEL_ELEMENTS, ExternalPixelArray)
+ V(ExternalInt8ElementsAccessor, EXTERNAL_INT8_ELEMENTS, \
+ ExternalInt8Array) \
+ V(ExternalUint8ElementsAccessor, \
+ EXTERNAL_UINT8_ELEMENTS, ExternalUint8Array) \
+ V(ExternalInt16ElementsAccessor, EXTERNAL_INT16_ELEMENTS, \
+ ExternalInt16Array) \
+ V(ExternalUint16ElementsAccessor, \
+ EXTERNAL_UINT16_ELEMENTS, ExternalUint16Array) \
+ V(ExternalInt32ElementsAccessor, EXTERNAL_INT32_ELEMENTS, \
+ ExternalInt32Array) \
+ V(ExternalUint32ElementsAccessor, \
+ EXTERNAL_UINT32_ELEMENTS, ExternalUint32Array) \
+ V(ExternalFloat32ElementsAccessor, \
+ EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array) \
+ V(ExternalFloat64ElementsAccessor, \
+ EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array) \
+ V(ExternalUint8ClampedElementsAccessor, \
+ EXTERNAL_UINT8_CLAMPED_ELEMENTS, \
+ ExternalUint8ClampedArray) \
+ V(FixedUint8ElementsAccessor, UINT8_ELEMENTS, FixedUint8Array) \
+ V(FixedInt8ElementsAccessor, INT8_ELEMENTS, FixedInt8Array) \
+ V(FixedUint16ElementsAccessor, UINT16_ELEMENTS, FixedUint16Array) \
+ V(FixedInt16ElementsAccessor, INT16_ELEMENTS, FixedInt16Array) \
+ V(FixedUint32ElementsAccessor, UINT32_ELEMENTS, FixedUint32Array) \
+ V(FixedInt32ElementsAccessor, INT32_ELEMENTS, FixedInt32Array) \
+ V(FixedFloat32ElementsAccessor, FLOAT32_ELEMENTS, FixedFloat32Array) \
+ V(FixedFloat64ElementsAccessor, FLOAT64_ELEMENTS, FixedFloat64Array) \
+ V(FixedUint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, \
+ FixedUint8ClampedArray)
template<ElementsKind Kind> class ElementsKindTraits {
@@ -1078,24 +1099,16 @@ static inline ElementsKind ElementsKindForArray(FixedArrayBase* array) {
}
case FIXED_DOUBLE_ARRAY_TYPE:
return FAST_HOLEY_DOUBLE_ELEMENTS;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- return EXTERNAL_BYTE_ELEMENTS;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- return EXTERNAL_SHORT_ELEMENTS;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
- case EXTERNAL_INT_ARRAY_TYPE:
- return EXTERNAL_INT_ELEMENTS;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_INT_ELEMENTS;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- return EXTERNAL_FLOAT_ELEMENTS;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- return EXTERNAL_DOUBLE_ELEMENTS;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- return EXTERNAL_PIXEL_ELEMENTS;
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ return EXTERNAL_##TYPE##_ELEMENTS; \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ return TYPE##_ELEMENTS;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
default:
UNREACHABLE();
}
@@ -1149,16 +1162,12 @@ class FastSmiOrObjectElementsAccessor
return CopyElementsImpl(arguments, from_start, to, from_kind,
to_start, packed_size, copy_size);
}
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
UNREACHABLE();
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
}
return NULL;
}
@@ -1274,16 +1283,14 @@ class FastDoubleElementsAccessor
from, from_start, to, to_start, copy_size);
break;
case NON_STRICT_ARGUMENTS_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
UNREACHABLE();
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+ UNREACHABLE();
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
}
return to->GetHeap()->undefined_value();
}
@@ -1320,20 +1327,20 @@ class FastHoleyDoubleElementsAccessor
// Super class for all external element arrays.
-template<typename ExternalElementsAccessorSubclass,
- ElementsKind Kind>
-class ExternalElementsAccessor
- : public ElementsAccessorBase<ExternalElementsAccessorSubclass,
+template<ElementsKind Kind>
+class TypedElementsAccessor
+ : public ElementsAccessorBase<TypedElementsAccessor<Kind>,
ElementsKindTraits<Kind> > {
public:
- explicit ExternalElementsAccessor(const char* name)
- : ElementsAccessorBase<ExternalElementsAccessorSubclass,
+ explicit TypedElementsAccessor(const char* name)
+ : ElementsAccessorBase<AccessorClass,
ElementsKindTraits<Kind> >(name) {}
protected:
typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
+ typedef TypedElementsAccessor<Kind> AccessorClass;
- friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
+ friend class ElementsAccessorBase<AccessorClass,
ElementsKindTraits<Kind> >;
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
@@ -1341,7 +1348,7 @@ class ExternalElementsAccessor
uint32_t key,
FixedArrayBase* backing_store) {
return
- key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
+ key < AccessorClass::GetCapacityImpl(backing_store)
? BackingStore::cast(backing_store)->get(key)
: backing_store->GetHeap()->undefined_value();
}
@@ -1352,7 +1359,7 @@ class ExternalElementsAccessor
uint32_t key,
FixedArrayBase* backing_store) {
return
- key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
+ key < AccessorClass::GetCapacityImpl(backing_store)
? NONE : ABSENT;
}
@@ -1362,7 +1369,7 @@ class ExternalElementsAccessor
uint32_t key,
FixedArrayBase* backing_store) {
return
- key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
+ key < AccessorClass::GetCapacityImpl(backing_store)
? FIELD : NONEXISTENT;
}
@@ -1387,100 +1394,27 @@ class ExternalElementsAccessor
uint32_t key,
FixedArrayBase* backing_store) {
uint32_t capacity =
- ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store);
+ AccessorClass::GetCapacityImpl(backing_store);
return key < capacity;
}
};
-class ExternalByteElementsAccessor
- : public ExternalElementsAccessor<ExternalByteElementsAccessor,
- EXTERNAL_BYTE_ELEMENTS> {
- public:
- explicit ExternalByteElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalByteElementsAccessor,
- EXTERNAL_BYTE_ELEMENTS>(name) {}
-};
-
-
-class ExternalUnsignedByteElementsAccessor
- : public ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS> {
- public:
- explicit ExternalUnsignedByteElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS>(name) {}
-};
-
-
-class ExternalShortElementsAccessor
- : public ExternalElementsAccessor<ExternalShortElementsAccessor,
- EXTERNAL_SHORT_ELEMENTS> {
- public:
- explicit ExternalShortElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalShortElementsAccessor,
- EXTERNAL_SHORT_ELEMENTS>(name) {}
-};
-
-
-class ExternalUnsignedShortElementsAccessor
- : public ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS> {
- public:
- explicit ExternalUnsignedShortElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS>(name) {}
-};
+#define EXTERNAL_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype, size) \
+ typedef TypedElementsAccessor<EXTERNAL_##TYPE##_ELEMENTS> \
+ External##Type##ElementsAccessor;
-class ExternalIntElementsAccessor
- : public ExternalElementsAccessor<ExternalIntElementsAccessor,
- EXTERNAL_INT_ELEMENTS> {
- public:
- explicit ExternalIntElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalIntElementsAccessor,
- EXTERNAL_INT_ELEMENTS>(name) {}
-};
+TYPED_ARRAYS(EXTERNAL_ELEMENTS_ACCESSOR)
+#undef EXTERNAL_ELEMENTS_ACCESSOR
+#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype, size) \
+ typedef TypedElementsAccessor<TYPE##_ELEMENTS > \
+ Fixed##Type##ElementsAccessor;
-class ExternalUnsignedIntElementsAccessor
- : public ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
- EXTERNAL_UNSIGNED_INT_ELEMENTS> {
- public:
- explicit ExternalUnsignedIntElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
- EXTERNAL_UNSIGNED_INT_ELEMENTS>(name) {}
-};
-
+TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR)
+#undef FIXED_ELEMENTS_ACCESSOR
-class ExternalFloatElementsAccessor
- : public ExternalElementsAccessor<ExternalFloatElementsAccessor,
- EXTERNAL_FLOAT_ELEMENTS> {
- public:
- explicit ExternalFloatElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalFloatElementsAccessor,
- EXTERNAL_FLOAT_ELEMENTS>(name) {}
-};
-
-
-class ExternalDoubleElementsAccessor
- : public ExternalElementsAccessor<ExternalDoubleElementsAccessor,
- EXTERNAL_DOUBLE_ELEMENTS> {
- public:
- explicit ExternalDoubleElementsAccessor(const char* name)
- : ExternalElementsAccessor<ExternalDoubleElementsAccessor,
- EXTERNAL_DOUBLE_ELEMENTS>(name) {}
-};
-
-
-class PixelElementsAccessor
- : public ExternalElementsAccessor<PixelElementsAccessor,
- EXTERNAL_PIXEL_ELEMENTS> {
- public:
- explicit PixelElementsAccessor(const char* name)
- : ExternalElementsAccessor<PixelElementsAccessor,
- EXTERNAL_PIXEL_ELEMENTS>(name) {}
-};
class DictionaryElementsAccessor
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 979641a9de..690a4e3f4e 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -354,6 +354,34 @@ Handle<Object> Execution::TryGetConstructorDelegate(
}
+void Execution::RunMicrotasks(Isolate* isolate) {
+ ASSERT(isolate->microtask_pending());
+ bool threw = false;
+ Execution::Call(
+ isolate,
+ isolate->run_microtasks(),
+ isolate->factory()->undefined_value(),
+ 0,
+ NULL,
+ &threw);
+ ASSERT(!threw);
+}
+
+
+void Execution::EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask) {
+ bool threw = false;
+ Handle<Object> args[] = { microtask };
+ Execution::Call(
+ isolate,
+ isolate->enqueue_external_microtask(),
+ isolate->factory()->undefined_value(),
+ 1,
+ args,
+ &threw);
+ ASSERT(!threw);
+}
+
+
bool StackGuard::IsStackOverflow() {
ExecutionAccess access(isolate_);
return (thread_local_.jslimit_ != kInterruptLimit &&
@@ -488,6 +516,19 @@ void StackGuard::FullDeopt() {
}
+bool StackGuard::IsDeoptMarkedAllocationSites() {
+ ExecutionAccess access(isolate_);
+ return (thread_local_.interrupt_flags_ & DEOPT_MARKED_ALLOCATION_SITES) != 0;
+}
+
+
+void StackGuard::DeoptMarkedAllocationSites() {
+ ExecutionAccess access(isolate_);
+ thread_local_.interrupt_flags_ |= DEOPT_MARKED_ALLOCATION_SITES;
+ set_interrupt_limits(access);
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() {
ExecutionAccess access(isolate_);
@@ -526,6 +567,48 @@ void StackGuard::Continue(InterruptFlag after_what) {
}
+void StackGuard::RequestInterrupt(InterruptCallback callback, void* data) {
+ ExecutionAccess access(isolate_);
+ thread_local_.interrupt_flags_ |= API_INTERRUPT;
+ thread_local_.interrupt_callback_ = callback;
+ thread_local_.interrupt_callback_data_ = data;
+ set_interrupt_limits(access);
+}
+
+
+void StackGuard::ClearInterrupt() {
+ thread_local_.interrupt_callback_ = 0;
+ thread_local_.interrupt_callback_data_ = 0;
+ Continue(API_INTERRUPT);
+}
+
+
+bool StackGuard::IsAPIInterrupt() {
+ ExecutionAccess access(isolate_);
+ return thread_local_.interrupt_flags_ & API_INTERRUPT;
+}
+
+
+void StackGuard::InvokeInterruptCallback() {
+ InterruptCallback callback = 0;
+ void* data = 0;
+
+ {
+ ExecutionAccess access(isolate_);
+ callback = thread_local_.interrupt_callback_;
+ data = thread_local_.interrupt_callback_data_;
+ thread_local_.interrupt_callback_ = NULL;
+ thread_local_.interrupt_callback_data_ = NULL;
+ }
+
+ if (callback != NULL) {
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ callback(reinterpret_cast<v8::Isolate*>(isolate_), data);
+ }
+}
+
+
char* StackGuard::ArchiveStackGuard(char* to) {
ExecutionAccess access(isolate_);
OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
@@ -567,6 +650,8 @@ void StackGuard::ThreadLocal::Clear() {
nesting_ = 0;
postpone_interrupts_nesting_ = 0;
interrupt_flags_ = 0;
+ interrupt_callback_ = NULL;
+ interrupt_callback_data_ = NULL;
}
@@ -587,6 +672,8 @@ bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
nesting_ = 0;
postpone_interrupts_nesting_ = 0;
interrupt_flags_ = 0;
+ interrupt_callback_ = NULL;
+ interrupt_callback_data_ = NULL;
return should_set_stack_limits;
}
@@ -814,8 +901,6 @@ static Object* RuntimePreempt(Isolate* isolate) {
// Clear the preempt request flag.
isolate->stack_guard()->Continue(PREEMPT);
- ContextSwitcher::PreemptionReceived();
-
#ifdef ENABLE_DEBUGGER_SUPPORT
if (isolate->debug()->InDebugger()) {
// If currently in the debugger don't do any actual preemption but record
@@ -924,6 +1009,11 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
return isolate->heap()->undefined_value();
}
+ if (stack_guard->IsAPIInterrupt()) {
+ stack_guard->InvokeInterruptCallback();
+ stack_guard->Continue(API_INTERRUPT);
+ }
+
if (stack_guard->IsGCRequest()) {
isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
"StackGuard GC request");
@@ -950,8 +1040,12 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(FULL_DEOPT);
Deoptimizer::DeoptimizeAll(isolate);
}
+ if (stack_guard->IsDeoptMarkedAllocationSites()) {
+ stack_guard->Continue(DEOPT_MARKED_ALLOCATION_SITES);
+ isolate->heap()->DeoptMarkedAllocationSites();
+ }
if (stack_guard->IsInstallCodeRequest()) {
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(isolate->concurrent_recompilation_enabled());
stack_guard->Continue(INSTALL_CODE);
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 371ea309d6..b53a83358c 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -43,7 +43,9 @@ enum InterruptFlag {
TERMINATE = 1 << 4,
GC_REQUEST = 1 << 5,
FULL_DEOPT = 1 << 6,
- INSTALL_CODE = 1 << 7
+ INSTALL_CODE = 1 << 7,
+ API_INTERRUPT = 1 << 8,
+ DEOPT_MARKED_ALLOCATION_SITES = 1 << 9
};
@@ -171,6 +173,9 @@ class Execution : public AllStatic {
static Handle<Object> TryGetConstructorDelegate(Isolate* isolate,
Handle<Object> object,
bool* has_pending_exception);
+
+ static void RunMicrotasks(Isolate* isolate);
+ static void EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask);
};
@@ -218,8 +223,15 @@ class StackGuard {
void RequestInstallCode();
bool IsFullDeopt();
void FullDeopt();
+ bool IsDeoptMarkedAllocationSites();
+ void DeoptMarkedAllocationSites();
void Continue(InterruptFlag after_what);
+ void RequestInterrupt(InterruptCallback callback, void* data);
+ void ClearInterrupt();
+ bool IsAPIInterrupt();
+ void InvokeInterruptCallback();
+
// This provides an asynchronous read of the stack limits for the current
// thread. There are no locks protecting this, but it is assumed that you
// have the global V8 lock if you are using multiple V8 threads.
@@ -270,7 +282,7 @@ class StackGuard {
void EnableInterrupts();
void DisableInterrupts();
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_A64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
@@ -305,6 +317,9 @@ class StackGuard {
int nesting_;
int postpone_interrupts_nesting_;
int interrupt_flags_;
+
+ InterruptCallback interrupt_callback_;
+ void* interrupt_callback_data_;
};
// TODO(isolates): Technically this could be calculated directly from a
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 9fdb194e42..d372cf0125 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -60,14 +60,16 @@ const char* const ExternalizeStringExtension::kSource =
"native function externalizeString();"
"native function isAsciiString();";
-
-v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
- v8::Handle<v8::String> str) {
+v8::Handle<v8::FunctionTemplate>
+ExternalizeStringExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Handle<v8::String> str) {
if (strcmp(*v8::String::Utf8Value(str), "externalizeString") == 0) {
- return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize);
+ return v8::FunctionTemplate::New(isolate,
+ ExternalizeStringExtension::Externalize);
} else {
ASSERT(strcmp(*v8::String::Utf8Value(str), "isAsciiString") == 0);
- return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii);
+ return v8::FunctionTemplate::New(isolate,
+ ExternalizeStringExtension::IsAscii);
}
}
@@ -75,7 +77,8 @@ v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
void ExternalizeStringExtension::Externalize(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1 || !args[0]->IsString()) {
- args.GetIsolate()->ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
+ args.GetIsolate(),
"First parameter to externalizeString() must be a string."));
return;
}
@@ -84,7 +87,8 @@ void ExternalizeStringExtension::Externalize(
if (args[1]->IsBoolean()) {
force_two_byte = args[1]->BooleanValue();
} else {
- args.GetIsolate()->ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
+ args.GetIsolate(),
"Second parameter to externalizeString() must be a boolean."));
return;
}
@@ -92,7 +96,8 @@ void ExternalizeStringExtension::Externalize(
bool result = false;
Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
if (string->IsExternalString()) {
- args.GetIsolate()->ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
+ args.GetIsolate(),
"externalizeString() can't externalize twice."));
return;
}
@@ -120,8 +125,8 @@ void ExternalizeStringExtension::Externalize(
if (!result) delete resource;
}
if (!result) {
- args.GetIsolate()->ThrowException(
- v8::String::New("externalizeString() failed."));
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
+ args.GetIsolate(), "externalizeString() failed."));
return;
}
}
@@ -130,7 +135,8 @@ void ExternalizeStringExtension::Externalize(
void ExternalizeStringExtension::IsAscii(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- args.GetIsolate()->ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
+ args.GetIsolate(),
"isAsciiString() requires a single string argument."));
return;
}
@@ -139,10 +145,4 @@ void ExternalizeStringExtension::IsAscii(
args.GetReturnValue().Set(is_one_byte);
}
-
-void ExternalizeStringExtension::Register() {
- static ExternalizeStringExtension externalize_extension;
- static v8::DeclareExtension declaration(&externalize_extension);
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/externalize-string-extension.h b/deps/v8/src/extensions/externalize-string-extension.h
index ecbc1cf447..d0cb5e47d3 100644
--- a/deps/v8/src/extensions/externalize-string-extension.h
+++ b/deps/v8/src/extensions/externalize-string-extension.h
@@ -36,11 +36,12 @@ namespace internal {
class ExternalizeStringExtension : public v8::Extension {
public:
ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
v8::Handle<v8::String> name);
static void Externalize(const v8::FunctionCallbackInfo<v8::Value>& args);
static void IsAscii(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Register();
+
private:
static const char* const kSource;
};
diff --git a/deps/v8/src/extensions/free-buffer-extension.cc b/deps/v8/src/extensions/free-buffer-extension.cc
new file mode 100644
index 0000000000..b4abaafe23
--- /dev/null
+++ b/deps/v8/src/extensions/free-buffer-extension.cc
@@ -0,0 +1,50 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "free-buffer-extension.h"
+#include "platform.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+v8::Handle<v8::FunctionTemplate> FreeBufferExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
+ v8::Handle<v8::String> str) {
+ return v8::FunctionTemplate::New(isolate, FreeBufferExtension::FreeBuffer);
+}
+
+
+void FreeBufferExtension::FreeBuffer(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Handle<v8::ArrayBuffer> arrayBuffer = args[0].As<v8::ArrayBuffer>();
+ v8::ArrayBuffer::Contents contents = arrayBuffer->Externalize();
+ V8::ArrayBufferAllocator()->Free(contents.Data(), contents.ByteLength());
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/free-buffer-extension.h b/deps/v8/src/extensions/free-buffer-extension.h
new file mode 100644
index 0000000000..26ff7d1bb0
--- /dev/null
+++ b/deps/v8/src/extensions/free-buffer-extension.h
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
+#define V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+class FreeBufferExtension : public v8::Extension {
+ public:
+ FreeBufferExtension()
+ : v8::Extension("v8/free-buffer", "native function freeBuffer();") {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
+ v8::Handle<v8::String> name);
+ static void FreeBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 308879115f..1d4873de73 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -32,33 +32,17 @@ namespace v8 {
namespace internal {
-v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
+v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
v8::Handle<v8::String> str) {
- return v8::FunctionTemplate::New(GCExtension::GC);
+ return v8::FunctionTemplate::New(isolate, GCExtension::GC);
}
void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- if (args[0]->BooleanValue()) {
- isolate->heap()->CollectGarbage(NEW_SPACE, "gc extension");
- } else {
- isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
- }
-}
-
-
-void GCExtension::Register() {
- static char buffer[50];
- Vector<char> temp_vector(buffer, sizeof(buffer));
- if (FLAG_expose_gc_as != NULL && strlen(FLAG_expose_gc_as) != 0) {
- OS::SNPrintF(temp_vector, "native function %s();", FLAG_expose_gc_as);
- } else {
- OS::SNPrintF(temp_vector, "native function gc();");
- }
-
- static GCExtension gc_extension(buffer);
- static v8::DeclareExtension declaration(&gc_extension);
+ args.GetIsolate()->RequestGarbageCollectionForTesting(
+ args[0]->BooleanValue() ? v8::Isolate::kMinorGarbageCollection
+ : v8::Isolate::kFullGarbageCollection);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h
index e412b92a4d..105c5ad5b7 100644
--- a/deps/v8/src/extensions/gc-extension.h
+++ b/deps/v8/src/extensions/gc-extension.h
@@ -35,11 +35,22 @@ namespace internal {
class GCExtension : public v8::Extension {
public:
- explicit GCExtension(const char* source) : v8::Extension("v8/gc", source) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ explicit GCExtension(const char* fun_name)
+ : v8::Extension("v8/gc",
+ BuildSource(buffer_, sizeof(buffer_), fun_name)) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
v8::Handle<v8::String> name);
static void GC(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Register();
+
+ private:
+ static const char* BuildSource(char* buf, size_t size, const char* fun_name) {
+ OS::SNPrintF(Vector<char>(buf, static_cast<int>(size)),
+ "native function %s();", fun_name);
+ return buf;
+ }
+
+ char buffer_[50];
};
} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 651d99d452..e10dddfcdb 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -34,27 +34,39 @@ const char* const StatisticsExtension::kSource =
"native function getV8Statistics();";
-v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunction(
+v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
v8::Handle<v8::String> str) {
ASSERT(strcmp(*v8::String::Utf8Value(str), "getV8Statistics") == 0);
- return v8::FunctionTemplate::New(StatisticsExtension::GetCounters);
+ return v8::FunctionTemplate::New(isolate, StatisticsExtension::GetCounters);
}
-static void AddCounter(v8::Local<v8::Object> object,
+static void AddCounter(v8::Isolate* isolate,
+ v8::Local<v8::Object> object,
StatsCounter* counter,
const char* name) {
if (counter->Enabled()) {
- object->Set(v8::String::New(name),
- v8::Number::New(*counter->GetInternalPointer()));
+ object->Set(v8::String::NewFromUtf8(isolate, name),
+ v8::Number::New(isolate, *counter->GetInternalPointer()));
}
}
-static void AddNumber(v8::Local<v8::Object> object,
+static void AddNumber(v8::Isolate* isolate,
+ v8::Local<v8::Object> object,
intptr_t value,
const char* name) {
- object->Set(v8::String::New(name),
- v8::Number::New(static_cast<double>(value)));
+ object->Set(v8::String::NewFromUtf8(isolate, name),
+ v8::Number::New(isolate, static_cast<double>(value)));
+}
+
+
+static void AddNumber64(v8::Isolate* isolate,
+ v8::Local<v8::Object> object,
+ int64_t value,
+ const char* name) {
+ object->Set(v8::String::NewFromUtf8(isolate, name),
+ v8::Number::New(isolate, static_cast<double>(value)));
}
@@ -70,90 +82,92 @@ void StatisticsExtension::GetCounters(
}
Counters* counters = isolate->counters();
- v8::Local<v8::Object> result = v8::Object::New();
+ v8::Local<v8::Object> result = v8::Object::New(args.GetIsolate());
#define ADD_COUNTER(name, caption) \
- AddCounter(result, counters->name(), #name);
+ AddCounter(args.GetIsolate(), result, counters->name(), #name);
STATS_COUNTER_LIST_1(ADD_COUNTER)
STATS_COUNTER_LIST_2(ADD_COUNTER)
#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- AddCounter(result, counters->count_of_##name(), "count_of_" #name); \
- AddCounter(result, counters->size_of_##name(), "size_of_" #name);
+#define ADD_COUNTER(name) \
+ AddCounter(args.GetIsolate(), result, counters->count_of_##name(), \
+ "count_of_" #name); \
+ AddCounter(args.GetIsolate(), result, counters->size_of_##name(), \
+ "size_of_" #name);
INSTANCE_TYPE_LIST(ADD_COUNTER)
#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- AddCounter(result, counters->count_of_CODE_TYPE_##name(), \
- "count_of_CODE_TYPE_" #name); \
- AddCounter(result, counters->size_of_CODE_TYPE_##name(), \
+#define ADD_COUNTER(name) \
+ AddCounter(args.GetIsolate(), result, counters->count_of_CODE_TYPE_##name(), \
+ "count_of_CODE_TYPE_" #name); \
+ AddCounter(args.GetIsolate(), result, counters->size_of_CODE_TYPE_##name(), \
"size_of_CODE_TYPE_" #name);
CODE_KIND_LIST(ADD_COUNTER)
#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- AddCounter(result, counters->count_of_FIXED_ARRAY_##name(), \
- "count_of_FIXED_ARRAY_" #name); \
- AddCounter(result, counters->size_of_FIXED_ARRAY_##name(), \
+#define ADD_COUNTER(name) \
+ AddCounter(args.GetIsolate(), result, \
+ counters->count_of_FIXED_ARRAY_##name(), \
+ "count_of_FIXED_ARRAY_" #name); \
+ AddCounter(args.GetIsolate(), result, \
+ counters->size_of_FIXED_ARRAY_##name(), \
"size_of_FIXED_ARRAY_" #name);
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADD_COUNTER)
#undef ADD_COUNTER
- AddNumber(result, isolate->memory_allocator()->Size(),
+ AddNumber(args.GetIsolate(), result, isolate->memory_allocator()->Size(),
"total_committed_bytes");
- AddNumber(result, heap->new_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->new_space()->Size(),
"new_space_live_bytes");
- AddNumber(result, heap->new_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->new_space()->Available(),
"new_space_available_bytes");
- AddNumber(result, heap->new_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result, heap->new_space()->CommittedMemory(),
"new_space_commited_bytes");
- AddNumber(result, heap->old_pointer_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->old_pointer_space()->Size(),
"old_pointer_space_live_bytes");
- AddNumber(result, heap->old_pointer_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->old_pointer_space()->Available(),
"old_pointer_space_available_bytes");
- AddNumber(result, heap->old_pointer_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result,
+ heap->old_pointer_space()->CommittedMemory(),
"old_pointer_space_commited_bytes");
- AddNumber(result, heap->old_data_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->old_data_space()->Size(),
"old_data_space_live_bytes");
- AddNumber(result, heap->old_data_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->old_data_space()->Available(),
"old_data_space_available_bytes");
- AddNumber(result, heap->old_data_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result,
+ heap->old_data_space()->CommittedMemory(),
"old_data_space_commited_bytes");
- AddNumber(result, heap->code_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->code_space()->Size(),
"code_space_live_bytes");
- AddNumber(result, heap->code_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->code_space()->Available(),
"code_space_available_bytes");
- AddNumber(result, heap->code_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result, heap->code_space()->CommittedMemory(),
"code_space_commited_bytes");
- AddNumber(result, heap->cell_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->cell_space()->Size(),
"cell_space_live_bytes");
- AddNumber(result, heap->cell_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->cell_space()->Available(),
"cell_space_available_bytes");
- AddNumber(result, heap->cell_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result, heap->cell_space()->CommittedMemory(),
"cell_space_commited_bytes");
- AddNumber(result, heap->property_cell_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->property_cell_space()->Size(),
"property_cell_space_live_bytes");
- AddNumber(result, heap->property_cell_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->property_cell_space()->Available(),
"property_cell_space_available_bytes");
- AddNumber(result, heap->property_cell_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result,
+ heap->property_cell_space()->CommittedMemory(),
"property_cell_space_commited_bytes");
- AddNumber(result, heap->lo_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->lo_space()->Size(),
"lo_space_live_bytes");
- AddNumber(result, heap->lo_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->lo_space()->Available(),
"lo_space_available_bytes");
- AddNumber(result, heap->lo_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result, heap->lo_space()->CommittedMemory(),
"lo_space_commited_bytes");
- AddNumber(result, heap->amount_of_external_allocated_memory(),
- "amount_of_external_allocated_memory");
+ AddNumber64(args.GetIsolate(), result,
+ heap->amount_of_external_allocated_memory(),
+ "amount_of_external_allocated_memory");
args.GetReturnValue().Set(result);
}
-
-void StatisticsExtension::Register() {
- static StatisticsExtension statistics_extension;
- static v8::DeclareExtension declaration(&statistics_extension);
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/statistics-extension.h b/deps/v8/src/extensions/statistics-extension.h
index bfd9c4134e..9e97b45dd6 100644
--- a/deps/v8/src/extensions/statistics-extension.h
+++ b/deps/v8/src/extensions/statistics-extension.h
@@ -36,10 +36,11 @@ namespace internal {
class StatisticsExtension : public v8::Extension {
public:
StatisticsExtension() : v8::Extension("v8/statistics", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
v8::Handle<v8::String> name);
static void GetCounters(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Register();
+
private:
static const char* const kSource;
};
diff --git a/deps/v8/src/extensions/trigger-failure-extension.cc b/deps/v8/src/extensions/trigger-failure-extension.cc
new file mode 100644
index 0000000000..83894b922c
--- /dev/null
+++ b/deps/v8/src/extensions/trigger-failure-extension.cc
@@ -0,0 +1,79 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "trigger-failure-extension.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+const char* const TriggerFailureExtension::kSource =
+ "native function triggerCheckFalse();"
+ "native function triggerAssertFalse();"
+ "native function triggerSlowAssertFalse();";
+
+
+v8::Handle<v8::FunctionTemplate>
+TriggerFailureExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
+ v8::Handle<v8::String> str) {
+ if (strcmp(*v8::String::Utf8Value(str), "triggerCheckFalse") == 0) {
+ return v8::FunctionTemplate::New(
+ isolate,
+ TriggerFailureExtension::TriggerCheckFalse);
+ } else if (strcmp(*v8::String::Utf8Value(str), "triggerAssertFalse") == 0) {
+ return v8::FunctionTemplate::New(
+ isolate,
+ TriggerFailureExtension::TriggerAssertFalse);
+ } else {
+ CHECK_EQ(0, strcmp(*v8::String::Utf8Value(str), "triggerSlowAssertFalse"));
+ return v8::FunctionTemplate::New(
+ isolate,
+ TriggerFailureExtension::TriggerSlowAssertFalse);
+ }
+}
+
+
+void TriggerFailureExtension::TriggerCheckFalse(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CHECK(false);
+}
+
+
+void TriggerFailureExtension::TriggerAssertFalse(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ ASSERT(false);
+}
+
+
+void TriggerFailureExtension::TriggerSlowAssertFalse(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ SLOW_ASSERT(false);
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/trigger-failure-extension.h b/deps/v8/src/extensions/trigger-failure-extension.h
new file mode 100644
index 0000000000..467b7d28be
--- /dev/null
+++ b/deps/v8/src/extensions/trigger-failure-extension.h
@@ -0,0 +1,55 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
+#define V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+class TriggerFailureExtension : public v8::Extension {
+ public:
+ TriggerFailureExtension() : v8::Extension("v8/trigger-failure", kSource) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
+ v8::Handle<v8::String> name);
+ static void TriggerCheckFalse(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void TriggerAssertFalse(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void TriggerSlowAssertFalse(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ private:
+ static const char* const kSource;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 1dd246fc48..6bce5d3a6a 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -132,11 +132,14 @@ Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) {
}
-Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
+Handle<ObjectHashTable> Factory::NewObjectHashTable(
+ int at_least_space_for,
+ MinimumCapacity capacity_option) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
ObjectHashTable::Allocate(isolate()->heap(),
- at_least_space_for),
+ at_least_space_for,
+ capacity_option),
ObjectHashTable);
}
@@ -147,7 +150,7 @@ Handle<WeakHashTable> Factory::NewWeakHashTable(int at_least_space_for) {
isolate(),
WeakHashTable::Allocate(isolate()->heap(),
at_least_space_for,
- WeakHashTable::USE_DEFAULT_MINIMUM_CAPACITY,
+ USE_DEFAULT_MINIMUM_CAPACITY,
TENURED),
WeakHashTable);
}
@@ -203,9 +206,8 @@ Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
// Internalized strings are created in the old generation (data space).
Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeUtf8String(string),
- String);
+ Utf8StringKey key(string, isolate()->heap()->HashSeed());
+ return InternalizeStringWithKey(&key);
}
@@ -218,28 +220,38 @@ Handle<String> Factory::InternalizeString(Handle<String> string) {
Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeOneByteString(string),
- String);
+ OneByteStringKey key(string, isolate()->heap()->HashSeed());
+ return InternalizeStringWithKey(&key);
}
Handle<String> Factory::InternalizeOneByteString(
Handle<SeqOneByteString> string, int from, int length) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeOneByteString(
- string, from, length),
- String);
+ SubStringKey<uint8_t> key(string, from, length);
+ return InternalizeStringWithKey(&key);
}
Handle<String> Factory::InternalizeTwoByteString(Vector<const uc16> string) {
+ TwoByteStringKey key(string, isolate()->heap()->HashSeed());
+ return InternalizeStringWithKey(&key);
+}
+
+
+template<class StringTableKey>
+Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeTwoByteString(string),
+ isolate()->heap()->InternalizeStringWithKey(key),
String);
}
+template Handle<String> Factory::InternalizeStringWithKey<
+ SubStringKey<uint8_t> > (SubStringKey<uint8_t>* key);
+template Handle<String> Factory::InternalizeStringWithKey<
+ SubStringKey<uint16_t> > (SubStringKey<uint16_t>* key);
+
+
Handle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
@@ -284,11 +296,43 @@ Handle<SeqTwoByteString> Factory::NewRawTwoByteString(int length,
}
-Handle<String> Factory::NewConsString(Handle<String> first,
- Handle<String> second) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateConsString(*first, *second),
- String);
+// Returns true for a character in a range. Both limits are inclusive.
+static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
+ // This makes uses of the the unsigned wraparound.
+ return character - from <= to - from;
+}
+
+
+static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
+ uint16_t c1,
+ uint16_t c2) {
+ // Numeric strings have a different hash algorithm not known by
+ // LookupTwoCharsStringIfExists, so we skip this step for such strings.
+ if (!Between(c1, '0', '9') || !Between(c2, '0', '9')) {
+ String* result;
+ StringTable* table = isolate->heap()->string_table();
+ if (table->LookupTwoCharsStringIfExists(c1, c2, &result)) {
+ return handle(result);
+ }
+ }
+
+ // Now we know the length is 2, we might as well make use of that fact
+ // when building the new string.
+ if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
+ // We can do this.
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
+ Handle<SeqOneByteString> str = isolate->factory()->NewRawOneByteString(2);
+ uint8_t* dest = str->GetChars();
+ dest[0] = static_cast<uint8_t>(c1);
+ dest[1] = static_cast<uint8_t>(c2);
+ return str;
+ } else {
+ Handle<SeqTwoByteString> str = isolate->factory()->NewRawTwoByteString(2);
+ uc16* dest = str->GetChars();
+ dest[0] = c1;
+ dest[1] = c2;
+ return str;
+ }
}
@@ -304,11 +348,103 @@ Handle<String> ConcatStringContent(Handle<StringType> result,
}
+Handle<ConsString> Factory::NewRawConsString(String::Encoding encoding) {
+ Handle<Map> map = (encoding == String::ONE_BYTE_ENCODING)
+ ? cons_ascii_string_map() : cons_string_map();
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->Allocate(*map, NEW_SPACE),
+ ConsString);
+}
+
+
+Handle<String> Factory::NewConsString(Handle<String> left,
+ Handle<String> right) {
+ int left_length = left->length();
+ if (left_length == 0) return right;
+ int right_length = right->length();
+ if (right_length == 0) return left;
+
+ int length = left_length + right_length;
+
+ if (length == 2) {
+ uint16_t c1 = left->Get(0);
+ uint16_t c2 = right->Get(0);
+ return MakeOrFindTwoCharacterString(isolate(), c1, c2);
+ }
+
+ // Make sure that an out of memory exception is thrown if the length
+ // of the new cons string is too large.
+ if (length > String::kMaxLength || length < 0) {
+ isolate()->context()->mark_out_of_memory();
+ V8::FatalProcessOutOfMemory("String concatenation result too large.");
+ UNREACHABLE();
+ return Handle<String>::null();
+ }
+
+ bool left_is_one_byte = left->IsOneByteRepresentation();
+ bool right_is_one_byte = right->IsOneByteRepresentation();
+ bool is_one_byte = left_is_one_byte && right_is_one_byte;
+ bool is_one_byte_data_in_two_byte_string = false;
+ if (!is_one_byte) {
+ // At least one of the strings uses two-byte representation so we
+ // can't use the fast case code for short ASCII strings below, but
+ // we can try to save memory if all chars actually fit in ASCII.
+ is_one_byte_data_in_two_byte_string =
+ left->HasOnlyOneByteChars() && right->HasOnlyOneByteChars();
+ if (is_one_byte_data_in_two_byte_string) {
+ isolate()->counters()->string_add_runtime_ext_to_ascii()->Increment();
+ }
+ }
+
+ // If the resulting string is small make a flat string.
+ if (length < ConsString::kMinLength) {
+ // Note that neither of the two inputs can be a slice because:
+ STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
+ ASSERT(left->IsFlat());
+ ASSERT(right->IsFlat());
+
+ if (is_one_byte) {
+ Handle<SeqOneByteString> result = NewRawOneByteString(length);
+ DisallowHeapAllocation no_gc;
+ uint8_t* dest = result->GetChars();
+ // Copy left part.
+ const uint8_t* src = left->IsExternalString()
+ ? Handle<ExternalAsciiString>::cast(left)->GetChars()
+ : Handle<SeqOneByteString>::cast(left)->GetChars();
+ for (int i = 0; i < left_length; i++) *dest++ = src[i];
+ // Copy right part.
+ src = right->IsExternalString()
+ ? Handle<ExternalAsciiString>::cast(right)->GetChars()
+ : Handle<SeqOneByteString>::cast(right)->GetChars();
+ for (int i = 0; i < right_length; i++) *dest++ = src[i];
+ return result;
+ }
+
+ return (is_one_byte_data_in_two_byte_string)
+ ? ConcatStringContent<uint8_t>(NewRawOneByteString(length), left, right)
+ : ConcatStringContent<uc16>(NewRawTwoByteString(length), left, right);
+ }
+
+ Handle<ConsString> result = NewRawConsString(
+ (is_one_byte || is_one_byte_data_in_two_byte_string)
+ ? String::ONE_BYTE_ENCODING
+ : String::TWO_BYTE_ENCODING);
+
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+
+ result->set_hash_field(String::kEmptyHashField);
+ result->set_length(length);
+ result->set_first(*left, mode);
+ result->set_second(*right, mode);
+ return result;
+}
+
+
Handle<String> Factory::NewFlatConcatString(Handle<String> first,
Handle<String> second) {
int total_length = first->length() + second->length();
- if (first->IsOneByteRepresentationUnderneath() &&
- second->IsOneByteRepresentationUnderneath()) {
+ if (first->IsOneByteRepresentation() && second->IsOneByteRepresentation()) {
return ConcatStringContent<uint8_t>(
NewRawOneByteString(total_length), first, second);
} else {
@@ -318,22 +454,89 @@ Handle<String> Factory::NewFlatConcatString(Handle<String> first,
}
-Handle<String> Factory::NewSubString(Handle<String> str,
- int begin,
- int end) {
+Handle<SlicedString> Factory::NewRawSlicedString(String::Encoding encoding) {
+ Handle<Map> map = (encoding == String::ONE_BYTE_ENCODING)
+ ? sliced_ascii_string_map() : sliced_string_map();
CALL_HEAP_FUNCTION(isolate(),
- str->SubString(begin, end),
- String);
+ isolate()->heap()->Allocate(*map, NEW_SPACE),
+ SlicedString);
}
Handle<String> Factory::NewProperSubString(Handle<String> str,
int begin,
int end) {
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) str->StringVerify();
+#endif
ASSERT(begin > 0 || end < str->length());
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateSubString(*str, begin, end),
- String);
+
+ int length = end - begin;
+ if (length <= 0) return empty_string();
+ if (length == 1) {
+ return LookupSingleCharacterStringFromCode(isolate(), str->Get(begin));
+ }
+ if (length == 2) {
+ // Optimization for 2-byte strings often used as keys in a decompression
+ // dictionary. Check whether we already have the string in the string
+ // table to prevent creation of many unnecessary strings.
+ uint16_t c1 = str->Get(begin);
+ uint16_t c2 = str->Get(begin + 1);
+ return MakeOrFindTwoCharacterString(isolate(), c1, c2);
+ }
+
+ if (!FLAG_string_slices || length < SlicedString::kMinLength) {
+ if (str->IsOneByteRepresentation()) {
+ Handle<SeqOneByteString> result = NewRawOneByteString(length);
+ uint8_t* dest = result->GetChars();
+ DisallowHeapAllocation no_gc;
+ String::WriteToFlat(*str, dest, begin, end);
+ return result;
+ } else {
+ Handle<SeqTwoByteString> result = NewRawTwoByteString(length);
+ uc16* dest = result->GetChars();
+ DisallowHeapAllocation no_gc;
+ String::WriteToFlat(*str, dest, begin, end);
+ return result;
+ }
+ }
+
+ int offset = begin;
+
+ while (str->IsConsString()) {
+ Handle<ConsString> cons = Handle<ConsString>::cast(str);
+ int split = cons->first()->length();
+ if (split <= offset) {
+ // Slice is fully contained in the second part.
+ str = Handle<String>(cons->second(), isolate());
+ offset -= split; // Adjust for offset.
+ continue;
+ } else if (offset + length <= split) {
+ // Slice is fully contained in the first part.
+ str = Handle<String>(cons->first(), isolate());
+ continue;
+ }
+ break;
+ }
+
+ if (str->IsSlicedString()) {
+ Handle<SlicedString> slice = Handle<SlicedString>::cast(str);
+ str = Handle<String>(slice->parent(), isolate());
+ offset += slice->offset();
+ } else {
+ str = FlattenGetString(str);
+ }
+
+ ASSERT(str->IsSeqString() || str->IsExternalString());
+ Handle<SlicedString> slice = NewRawSlicedString(
+ str->IsOneByteRepresentation() ? String::ONE_BYTE_ENCODING
+ : String::TWO_BYTE_ENCODING);
+
+ slice->set_hash_field(String::kEmptyHashField);
+ slice->set_length(length);
+ slice->set_parent(*str);
+ slice->set_offset(offset);
+ return slice;
}
@@ -363,6 +566,14 @@ Handle<Symbol> Factory::NewSymbol() {
}
+Handle<Symbol> Factory::NewPrivateSymbol() {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocatePrivateSymbol(),
+ Symbol);
+}
+
+
Handle<Context> Factory::NewNativeContext() {
CALL_HEAP_FUNCTION(
isolate(),
@@ -441,6 +652,15 @@ Handle<Struct> Factory::NewStruct(InstanceType type) {
}
+Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
+ int aliased_context_slot) {
+ Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast(
+ NewStruct(ALIASED_ARGUMENTS_ENTRY_TYPE));
+ entry->set_aliased_context_slot(aliased_context_slot);
+ return entry;
+}
+
+
Handle<DeclaredAccessorDescriptor> Factory::NewDeclaredAccessorDescriptor() {
return Handle<DeclaredAccessorDescriptor>::cast(
NewStruct(DECLARED_ACCESSOR_DESCRIPTOR_TYPE));
@@ -518,7 +738,7 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure) {
- ASSERT(0 <= length);
+ ASSERT(0 <= length && length <= Smi::kMaxValue);
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateExternalArray(length,
@@ -529,6 +749,20 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
}
+Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
+ int length,
+ ExternalArrayType array_type,
+ PretenureFlag pretenure) {
+ ASSERT(0 <= length && length <= Smi::kMaxValue);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFixedTypedArray(length,
+ array_type,
+ pretenure),
+ FixedTypedArrayBase);
+}
+
+
Handle<Cell> Factory::NewCell(Handle<Object> value) {
AllowDeferredHandleDereference convert_to_cell;
CALL_HEAP_FUNCTION(
@@ -573,10 +807,32 @@ Handle<Map> Factory::NewMap(InstanceType type,
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunctionPrototype(*function),
- JSObject);
+ // Make sure to use globals from the function's context, since the function
+ // can be from a different context.
+ Handle<Context> native_context(function->context()->native_context());
+ Handle<Map> new_map;
+ if (function->shared()->is_generator()) {
+ // Generator prototypes can share maps since they don't have "constructor"
+ // properties.
+ new_map = handle(native_context->generator_object_prototype_map());
+ } else {
+ // Each function prototype gets a fresh map to avoid unwanted sharing of
+ // maps between prototypes of different constructors.
+ Handle<JSFunction> object_function(native_context->object_function());
+ ASSERT(object_function->has_initial_map());
+ new_map = Map::Copy(handle(object_function->initial_map()));
+ }
+
+ Handle<JSObject> prototype = NewJSObjectFromMap(new_map);
+
+ if (!function->shared()->is_generator()) {
+ JSObject::SetLocalPropertyIgnoreAttributes(prototype,
+ constructor_string(),
+ function,
+ DONT_ENUM);
+ }
+
+ return prototype;
}
@@ -594,11 +850,12 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
int instance_size_delta = extra_inobject_properties * kPointerSize;
int max_instance_size_delta =
JSObject::kMaxInstanceSize - copy->instance_size();
- if (instance_size_delta > max_instance_size_delta) {
+ int max_extra_properties = max_instance_size_delta >> kPointerSizeLog2;
+ if (extra_inobject_properties > max_extra_properties) {
// If the instance size overflows, we allocate as many properties
// as we can as inobject properties.
instance_size_delta = max_instance_size_delta;
- extra_inobject_properties = max_instance_size_delta >> kPointerSizeLog2;
+ extra_inobject_properties = max_extra_properties;
}
// Adjust the map with the extra inobject properties.
int inobject_properties =
@@ -690,7 +947,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
result->set_context(*context);
- int index = function_info->SearchOptimizedCodeMap(context->native_context());
+ int index = function_info->SearchOptimizedCodeMap(context->native_context(),
+ BailoutId::None());
if (!function_info->bound() && index < 0) {
int number_of_literals = function_info->num_literals();
Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
@@ -706,7 +964,10 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
if (index > 0) {
// Caching of optimized code enabled and optimized code found.
- function_info->InstallFromOptimizedCodeMap(*result, index);
+ FixedArray* literals =
+ function_info->GetLiteralsFromOptimizedCodeMap(index);
+ if (literals != NULL) result->set_literals(literals);
+ result->ReplaceCode(function_info->GetCodeFromOptimizedCodeMap(index));
return result;
}
@@ -717,7 +978,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
function_info->allows_lazy_compilation() &&
!function_info->optimization_disabled() &&
!isolate()->DebuggerHasBreakPoints()) {
- result->MarkForLazyRecompilation();
+ result->MarkForOptimization();
}
return result;
}
@@ -812,7 +1073,7 @@ Handle<Object> Factory::NewError(const char* maker,
const char* message,
Vector< Handle<Object> > args) {
// Instantiate a closeable HandleScope for EscapeFrom.
- v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate()));
+ v8::EscapableHandleScope scope(reinterpret_cast<v8::Isolate*>(isolate()));
Handle<FixedArray> array = NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
array->set(i, *args[i]);
@@ -854,10 +1115,10 @@ Handle<String> Factory::EmergencyNewError(const char* message,
if (space > 0) {
MaybeObject* maybe_arg = args->GetElement(isolate(), i);
Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
- const char* arg = *arg_str->ToCString();
+ SmartArrayPointer<char> arg = arg_str->ToCString();
Vector<char> v2(p, static_cast<int>(space));
- OS::StrNCpy(v2, arg, space);
- space -= Min(space, strlen(arg));
+ OS::StrNCpy(v2, arg.get(), space);
+ space -= Min(space, strlen(arg.get()));
p = &buffer[kBufferSize] - space;
}
}
@@ -1039,14 +1300,9 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
}
-Handle<String> Factory::InternalizedStringFromString(Handle<String> value) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeString(*value), String);
-}
-
-
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
+ JSFunction::EnsureHasInitialMap(constructor);
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSObject(*constructor, pretenure), JSObject);
@@ -1193,6 +1449,19 @@ void Factory::SetContent(Handle<JSArray> array,
}
+Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
+ Handle<JSFunction> function) {
+ ASSERT(function->shared()->is_generator());
+ JSFunction::EnsureHasInitialMap(function);
+ Handle<Map> map(function->initial_map());
+ ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObjectFromMap(*map),
+ JSGeneratorObject);
+}
+
+
Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() {
Handle<JSFunction> array_buffer_fun(
isolate()->context()->native_context()->array_buffer_fun());
@@ -1217,32 +1486,12 @@ static JSFunction* GetTypedArrayFun(ExternalArrayType type,
Isolate* isolate) {
Context* native_context = isolate->context()->native_context();
switch (type) {
- case kExternalUnsignedByteArray:
- return native_context->uint8_array_fun();
-
- case kExternalByteArray:
- return native_context->int8_array_fun();
-
- case kExternalUnsignedShortArray:
- return native_context->uint16_array_fun();
-
- case kExternalShortArray:
- return native_context->int16_array_fun();
-
- case kExternalUnsignedIntArray:
- return native_context->uint32_array_fun();
-
- case kExternalIntArray:
- return native_context->int32_array_fun();
-
- case kExternalFloatArray:
- return native_context->float_array_fun();
-
- case kExternalDoubleArray:
- return native_context->double_array_fun();
+#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return native_context->type##_array_fun();
- case kExternalPixelArray:
- return native_context->uint8c_array_fun();
+ TYPED_ARRAYS(TYPED_ARRAY_FUN)
+#undef TYPED_ARRAY_FUN
default:
UNREACHABLE();
@@ -1317,7 +1566,6 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
int start_position,
int end_position,
Handle<Object> script,
- Handle<Object> stack_trace,
Handle<Object> stack_frames) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSMessageObject(*type,
@@ -1325,7 +1573,6 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
start_position,
end_position,
*script,
- *stack_trace,
*stack_frames),
JSMessageObject);
}
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index ee25bf23d8..00ae587d64 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -74,7 +74,9 @@ class Factory {
Handle<ObjectHashSet> NewObjectHashSet(int at_least_space_for);
- Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
+ Handle<ObjectHashTable> NewObjectHashTable(
+ int at_least_space_for,
+ MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
Handle<WeakHashTable> NewWeakHashTable(int at_least_space_for);
@@ -97,11 +99,14 @@ class Factory {
}
Handle<String> InternalizeString(Handle<String> str);
Handle<String> InternalizeOneByteString(Vector<const uint8_t> str);
- Handle<String> InternalizeOneByteString(Handle<SeqOneByteString>,
- int from,
- int length);
+ Handle<String> InternalizeOneByteString(
+ Handle<SeqOneByteString>, int from, int length);
+
Handle<String> InternalizeTwoByteString(Vector<const uc16> str);
+ template<class StringTableKey>
+ Handle<String> InternalizeStringWithKey(StringTableKey* key);
+
// String creation functions. Most of the string creation functions take
// a Heap::PretenureFlag argument to optionally request that they be
@@ -156,23 +161,28 @@ class Factory {
PretenureFlag pretenure = NOT_TENURED);
// Create a new cons string object which consists of a pair of strings.
- Handle<String> NewConsString(Handle<String> first,
- Handle<String> second);
+ Handle<String> NewConsString(Handle<String> left,
+ Handle<String> right);
+
+ Handle<ConsString> NewRawConsString(String::Encoding encoding);
// Create a new sequential string containing the concatenation of the inputs.
Handle<String> NewFlatConcatString(Handle<String> first,
Handle<String> second);
- // Create a new string object which holds a substring of a string.
- Handle<String> NewSubString(Handle<String> str,
- int begin,
- int end);
-
// Create a new string object which holds a proper substring of a string.
Handle<String> NewProperSubString(Handle<String> str,
int begin,
int end);
+ // Create a new string object which holds a substring of a string.
+ Handle<String> NewSubString(Handle<String> str, int begin, int end) {
+ if (begin == 0 && end == str->length()) return str;
+ return NewProperSubString(str, begin, end);
+ }
+
+ Handle<SlicedString> NewRawSlicedString(String::Encoding encoding);
+
// Creates a new external String object. There are two String encodings
// in the system: ASCII and two byte. Unlike other String types, it does
// not make sense to have a UTF-8 factory function for external strings,
@@ -184,6 +194,7 @@ class Factory {
// Create a symbol.
Handle<Symbol> NewSymbol();
+ Handle<Symbol> NewPrivateSymbol();
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewNativeContext();
@@ -214,13 +225,13 @@ class Factory {
Handle<Context> previous,
Handle<ScopeInfo> scope_info);
- // Return the internalized version of the passed in string.
- Handle<String> InternalizedStringFromString(Handle<String> value);
-
// Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation).
Handle<Struct> NewStruct(InstanceType type);
+ Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry(
+ int aliased_context_slot);
+
Handle<DeclaredAccessorDescriptor> NewDeclaredAccessorDescriptor();
Handle<DeclaredAccessorInfo> NewDeclaredAccessorInfo();
@@ -246,6 +257,11 @@ class Factory {
void* external_pointer,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<FixedTypedArrayBase> NewFixedTypedArray(
+ int length,
+ ExternalArrayType array_type,
+ PretenureFlag pretenure = NOT_TENURED);
+
Handle<Cell> NewCell(Handle<Object> value);
Handle<PropertyCell> NewPropertyCellWithHole();
@@ -341,6 +357,8 @@ class Factory {
void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
+ Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
+
Handle<JSArrayBuffer> NewJSArrayBuffer();
Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type);
@@ -507,7 +525,6 @@ class Factory {
int start_position,
int end_position,
Handle<Object> script,
- Handle<Object> stack_trace,
Handle<Object> stack_frames);
Handle<SeededNumberDictionary> DictionaryAtNumberPut(
@@ -584,101 +601,6 @@ Handle<Object> Factory::NewNumberFromSize(size_t value,
}
-// Used to "safely" transition from pointer-based runtime code to Handle-based
-// runtime code. When a GC happens during the called Handle-based code, a
-// failure object is returned to the pointer-based code to cause it abort and
-// re-trigger a gc of it's own. Since this double-gc will cause the Handle-based
-// code to be called twice, it must be idempotent.
-class IdempotentPointerToHandleCodeTrampoline {
- public:
- explicit IdempotentPointerToHandleCodeTrampoline(Isolate* isolate)
- : isolate_(isolate) {}
-
- template<typename R>
- MUST_USE_RESULT MaybeObject* Call(R (*function)()) {
- int collections = isolate_->heap()->gc_count();
- (*function)();
- return (collections == isolate_->heap()->gc_count())
- ? isolate_->heap()->true_value()
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- template<typename R>
- MUST_USE_RESULT MaybeObject* CallWithReturnValue(R (*function)()) {
- int collections = isolate_->heap()->gc_count();
- Object* result = (*function)();
- return (collections == isolate_->heap()->gc_count())
- ? result
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- template<typename R, typename P1>
- MUST_USE_RESULT MaybeObject* Call(R (*function)(P1), P1 p1) {
- int collections = isolate_->heap()->gc_count();
- (*function)(p1);
- return (collections == isolate_->heap()->gc_count())
- ? isolate_->heap()->true_value()
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- template<typename R, typename P1>
- MUST_USE_RESULT MaybeObject* CallWithReturnValue(
- R (*function)(P1),
- P1 p1) {
- int collections = isolate_->heap()->gc_count();
- Object* result = (*function)(p1);
- return (collections == isolate_->heap()->gc_count())
- ? result
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- template<typename R, typename P1, typename P2>
- MUST_USE_RESULT MaybeObject* Call(
- R (*function)(P1, P2),
- P1 p1,
- P2 p2) {
- int collections = isolate_->heap()->gc_count();
- (*function)(p1, p2);
- return (collections == isolate_->heap()->gc_count())
- ? isolate_->heap()->true_value()
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- template<typename R, typename P1, typename P2>
- MUST_USE_RESULT MaybeObject* CallWithReturnValue(
- R (*function)(P1, P2),
- P1 p1,
- P2 p2) {
- int collections = isolate_->heap()->gc_count();
- Object* result = (*function)(p1, p2);
- return (collections == isolate_->heap()->gc_count())
- ? result
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- template<typename R, typename P1, typename P2, typename P3, typename P4,
- typename P5, typename P6, typename P7>
- MUST_USE_RESULT MaybeObject* CallWithReturnValue(
- R (*function)(P1, P2, P3, P4, P5, P6, P7),
- P1 p1,
- P2 p2,
- P3 p3,
- P4 p4,
- P5 p5,
- P6 p6,
- P7 p7) {
- int collections = isolate_->heap()->gc_count();
- Handle<Object> result = (*function)(p1, p2, p3, p4, p5, p6, p7);
- return (collections == isolate_->heap()->gc_count())
- ? *result
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- private:
- Isolate* isolate_;
-};
-
-
} } // namespace v8::internal
#endif // V8_FACTORY_H_
diff --git a/deps/v8/src/feedback-slots.h b/deps/v8/src/feedback-slots.h
new file mode 100644
index 0000000000..9760c652bc
--- /dev/null
+++ b/deps/v8/src/feedback-slots.h
@@ -0,0 +1,110 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FEEDBACK_SLOTS_H_
+#define V8_FEEDBACK_SLOTS_H_
+
+#include "v8.h"
+
+#include "isolate.h"
+
+namespace v8 {
+namespace internal {
+
+enum ComputablePhase {
+ DURING_PARSE,
+ AFTER_SCOPING
+};
+
+
+class FeedbackSlotInterface {
+ public:
+ static const int kInvalidFeedbackSlot = -1;
+
+ virtual ~FeedbackSlotInterface() {}
+
+ // When can we ask how many feedback slots are necessary?
+ virtual ComputablePhase GetComputablePhase() = 0;
+ virtual int ComputeFeedbackSlotCount(Isolate* isolate) = 0;
+ virtual void SetFirstFeedbackSlot(int slot) = 0;
+};
+
+
+class DeferredFeedbackSlotProcessor {
+ public:
+ DeferredFeedbackSlotProcessor()
+ : slot_nodes_(NULL),
+ slot_count_(0) { }
+
+ void add_slot_node(Zone* zone, FeedbackSlotInterface* slot) {
+ if (slot->GetComputablePhase() == DURING_PARSE) {
+ // No need to add to the list
+ int count = slot->ComputeFeedbackSlotCount(zone->isolate());
+ slot->SetFirstFeedbackSlot(slot_count_);
+ slot_count_ += count;
+ } else {
+ if (slot_nodes_ == NULL) {
+ slot_nodes_ = new(zone) ZoneList<FeedbackSlotInterface*>(10, zone);
+ }
+ slot_nodes_->Add(slot, zone);
+ }
+ }
+
+ void ProcessFeedbackSlots(Isolate* isolate) {
+ // Scope analysis must have been done.
+ if (slot_nodes_ == NULL) {
+ return;
+ }
+
+ int current_slot = slot_count_;
+ for (int i = 0; i < slot_nodes_->length(); i++) {
+ FeedbackSlotInterface* slot_interface = slot_nodes_->at(i);
+ int count = slot_interface->ComputeFeedbackSlotCount(isolate);
+ if (count > 0) {
+ slot_interface->SetFirstFeedbackSlot(current_slot);
+ current_slot += count;
+ }
+ }
+
+ slot_count_ = current_slot;
+ slot_nodes_->Clear();
+ }
+
+ int slot_count() {
+ ASSERT(slot_count_ >= 0);
+ return slot_count_;
+ }
+
+ private:
+ ZoneList<FeedbackSlotInterface*>* slot_nodes_;
+ int slot_count_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_FEEDBACK_SLOTS_H_
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 34a903aac2..476e5348af 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -69,6 +69,9 @@
#define DEFINE_implication(whenflag, thenflag) \
if (FLAG_##whenflag) FLAG_##thenflag = true;
+#define DEFINE_neg_implication(whenflag, thenflag) \
+ if (FLAG_##whenflag) FLAG_##thenflag = false;
+
#else
#error No mode supplied when including flags.defs
#endif
@@ -90,6 +93,10 @@
#define DEFINE_implication(whenflag, thenflag)
#endif
+#ifndef DEFINE_neg_implication
+#define DEFINE_neg_implication(whenflag, thenflag)
+#endif
+
#define COMMA ,
#ifdef FLAG_MODE_DECLARE
@@ -171,6 +178,7 @@ DEFINE_bool(harmony_modules, false,
"enable harmony modules (implies block scoping)")
DEFINE_bool(harmony_symbols, false,
"enable harmony symbols (a.k.a. private names)")
+DEFINE_bool(harmony_promises, false, "enable harmony promises")
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
@@ -187,6 +195,7 @@ DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
DEFINE_implication(harmony, harmony_symbols)
+DEFINE_implication(harmony, harmony_promises)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation)
@@ -196,20 +205,27 @@ DEFINE_implication(harmony, harmony_numeric_literals)
DEFINE_implication(harmony, harmony_strings)
DEFINE_implication(harmony, harmony_arrays)
DEFINE_implication(harmony, harmony_maths)
+DEFINE_implication(harmony_promises, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
+DEFINE_bool(compiled_keyed_dictionary_loads, true,
+ "use optimizing compiler to generate keyed dictionary load stubs")
DEFINE_bool(clever_optimizations, true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenuring, true, "allocate objects in old space")
// TODO(hpayer): We will remove this flag as soon as we have pretenuring
// support for specific allocation sites.
DEFINE_bool(pretenuring_call_new, false, "pretenure call new")
-DEFINE_bool(allocation_site_pretenuring, false,
+DEFINE_bool(allocation_site_pretenuring, true,
"pretenure with allocation sites")
+DEFINE_bool(trace_pretenuring, false,
+ "trace pretenuring decisions of HAllocate instructions")
+DEFINE_bool(trace_pretenuring_statistics, false,
+ "trace allocation site pretenuring statistics")
DEFINE_bool(track_fields, true, "track fields with only smi values")
DEFINE_bool(track_double_fields, true, "track fields with double values")
DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
@@ -233,10 +249,14 @@ DEFINE_bool(crankshaft, true, "use crankshaft")
DEFINE_string(hydrogen_filter, "*", "optimization filter")
DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
+DEFINE_int(gvn_iterations, 3, "maximum number of GVN fix-point iterations")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
-DEFINE_bool(use_escape_analysis, false, "use hydrogen escape analysis")
+DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis")
DEFINE_bool(use_allocation_folding, true, "use allocation folding")
+DEFINE_bool(use_local_allocation_folding, false, "only fold in basic blocks")
+DEFINE_bool(use_write_barrier_elimination, true,
+ "eliminate write barriers targeting allocations in optimized code")
DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
@@ -289,14 +309,12 @@ DEFINE_bool(array_index_dehoisting, true,
"perform array index dehoisting")
DEFINE_bool(analyze_environment_liveness, true,
"analyze liveness of environment slots and zap dead values")
-DEFINE_bool(load_elimination, false, "use load elimination")
-DEFINE_bool(check_elimination, false, "use check elimination")
+DEFINE_bool(load_elimination, true, "use load elimination")
+DEFINE_bool(check_elimination, true, "use check elimination")
DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
DEFINE_bool(fold_constants, true, "use constant folding")
DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
DEFINE_bool(unreachable_code_elimination, true, "eliminate unreachable code")
-DEFINE_bool(track_allocation_sites, true,
- "Use allocation site info to reduce transitions")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
@@ -310,8 +328,7 @@ DEFINE_bool(flush_optimized_code_cache, true,
DEFINE_bool(inline_construct, true, "inline constructor calls")
DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
-DEFINE_int(loop_weight, 1, "loop weight for representation inference")
-DEFINE_int(escape_analysis_iterations, 1,
+DEFINE_int(escape_analysis_iterations, 2,
"maximum number of escape analysis fix-point iterations")
DEFINE_bool(optimize_for_in, true,
@@ -338,33 +355,15 @@ DEFINE_bool(omit_map_checks_for_leaf_maps, true,
"do not emit check maps for constant values that have a leaf map, "
"deoptimize the optimized code if the layout of the maps changes.")
-// Experimental profiler changes.
-DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
-DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
+// Profiler flags.
DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler")
-DEFINE_bool(self_optimization, false,
- "primitive functions trigger their own optimization")
-DEFINE_bool(direct_self_opt, false,
- "call recompile stub directly when self-optimizing")
-DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
-DEFINE_bool(interrupt_at_exit, false,
- "insert an interrupt check at function exit")
-DEFINE_bool(weighted_back_edges, false,
- "weight back edges by jump distance for interrupt triggering")
- // 0x1700 fits in the immediate field of an ARM instruction.
-DEFINE_int(interrupt_budget, 0x1700,
+ // 0x1800 fits in the immediate field of an ARM instruction.
+DEFINE_int(interrupt_budget, 0x1800,
"execution budget before interrupt is triggered")
DEFINE_int(type_info_threshold, 25,
"percentage of ICs that must have type info to allow optimization")
DEFINE_int(self_opt_count, 130, "call count before self-optimization")
-DEFINE_implication(experimental_profiler, watch_ic_patching)
-DEFINE_implication(experimental_profiler, self_optimization)
-// Not implying direct_self_opt here because it seems to be a bad idea.
-DEFINE_implication(experimental_profiler, retry_self_opt)
-DEFINE_implication(experimental_profiler, interrupt_at_exit)
-DEFINE_implication(experimental_profiler, weighted_back_edges)
-
DEFINE_bool(trace_opt_verbose, false, "extra verbose compilation tracing")
DEFINE_implication(trace_opt_verbose, trace_opt)
@@ -403,12 +402,14 @@ DEFINE_bool(enable_vldr_imm, false,
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
+DEFINE_bool(expose_free_buffer, false, "expose freeBuffer extension")
DEFINE_bool(expose_gc, false, "expose gc extension")
DEFINE_string(expose_gc_as, NULL,
"expose gc extension under the specified name")
DEFINE_implication(expose_gc_as, expose_gc)
DEFINE_bool(expose_externalize_string, false,
"expose externalize string extension")
+DEFINE_bool(expose_trigger_failure, false, "expose trigger-failure extension")
DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
DEFINE_bool(builtins_in_stack_traces, false,
"show built-in functions in stack traces")
@@ -417,10 +418,6 @@ DEFINE_bool(disable_native_files, false, "disable builtin natives files")
// builtins-ia32.cc
DEFINE_bool(inline_new, true, "use fast inline allocation")
-// checks.cc
-DEFINE_bool(stack_trace_on_abort, true,
- "print a stack trace if an assertion failure occurs")
-
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace_codegen, false,
"print name of functions for which code is generated")
@@ -502,6 +499,9 @@ DEFINE_bool(trace_gc_ignore_scavenger, false,
"do not print trace line after scavenger collection")
DEFINE_bool(print_cumulative_gc_stat, false,
"print cumulative GC statistics in name=value format on exit")
+DEFINE_bool(print_max_heap_committed, false,
+ "print statistics of the maximum memory committed for the heap "
+ "in name=value format on exit")
DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(trace_fragmentation, false,
@@ -533,6 +533,7 @@ DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
DEFINE_int(sweeper_threads, 0,
"number of parallel and concurrent sweeping threads")
+DEFINE_bool(job_based_sweeping, false, "enable job based sweeping")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
@@ -580,21 +581,35 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
-// simulator-arm.cc and simulator-mips.cc
+// simulator-arm.cc, simulator-a64.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
DEFINE_bool(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
+#ifdef V8_TARGET_ARCH_A64
+DEFINE_int(sim_stack_alignment, 16,
+ "Stack alignment in bytes in simulator. This must be a power of two "
+ "and it must be at least 16. 16 is default.")
+#else
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
+#endif
+DEFINE_int(sim_stack_size, 2 * MB / KB,
+ "Stack size of the A64 simulator in kBytes (default is 2 MB)")
+DEFINE_bool(log_regs_modified, true,
+ "When logging register values, only print modified registers.")
+DEFINE_bool(log_colour, true,
+ "When logging, try to use coloured output.")
+DEFINE_bool(ignore_asm_unimplemented_break, false,
+ "Don't break for ASM_UNIMPLEMENTED_BREAK macros.")
+DEFINE_bool(trace_sim_messages, false,
+ "Trace simulator debug messages. Implied by --trace-sim.")
// isolate.cc
+DEFINE_bool(stack_trace_on_illegal, false,
+ "print stack trace when an illegal exception is thrown")
DEFINE_bool(abort_on_uncaught_exception, false,
"abort program (dump core) when an uncaught exception is thrown")
-DEFINE_bool(trace_exception, false,
- "print stack trace when throwing exceptions")
-DEFINE_bool(preallocate_message_memory, false,
- "preallocate some memory to build stack traces.")
DEFINE_bool(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
@@ -606,10 +621,6 @@ DEFINE_int(hash_seed, 0,
DEFINE_bool(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")
-// v8.cc
-DEFINE_bool(preemption, false,
- "activate a 100ms timer that switches between V8 threads")
-
// Regexp
DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
@@ -636,6 +647,14 @@ DEFINE_string(extra_code, NULL, "A filename with extra code to be included in"
DEFINE_bool(profile_hydrogen_code_stub_compilation, false,
"Print the time it takes to lazily compile hydrogen code stubs.")
+DEFINE_bool(predictable, false, "enable predictable mode")
+DEFINE_neg_implication(predictable, randomize_hashes)
+DEFINE_neg_implication(predictable, concurrent_recompilation)
+DEFINE_neg_implication(predictable, concurrent_osr)
+DEFINE_neg_implication(predictable, concurrent_sweeping)
+DEFINE_neg_implication(predictable, parallel_sweeping)
+
+
//
// Dev shell flags
//
@@ -784,6 +803,10 @@ DEFINE_bool(log_regexp, false, "Log regular expression execution.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(logfile_per_isolate, true, "Separate log files for each isolate.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
+DEFINE_bool(perf_basic_prof, false,
+ "Enable perf linux profiler (basic support).")
+DEFINE_bool(perf_jit_prof, false,
+ "Enable perf linux profiler (experimental annotate support).")
DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
DEFINE_bool(log_internal_timer_events, false, "Time internal events.")
@@ -791,6 +814,20 @@ DEFINE_bool(log_timer_events, false,
"Time events including external callbacks.")
DEFINE_implication(log_timer_events, log_internal_timer_events)
DEFINE_implication(log_internal_timer_events, prof)
+DEFINE_bool(log_instruction_stats, false, "Log AArch64 instruction statistics.")
+DEFINE_string(log_instruction_file, "a64_inst.csv",
+ "AArch64 instruction statistics log file.")
+DEFINE_int(log_instruction_period, 1 << 22,
+ "AArch64 instruction statistics logging period.")
+
+DEFINE_bool(redirect_code_traces, false,
+ "output deopt information and disassembly into file "
+ "code-<pid>-<isolate id>.asm")
+DEFINE_string(redirect_code_traces_to, NULL,
+ "output deopt information and disassembly into the given file")
+
+DEFINE_bool(hydrogen_track_positions, false,
+ "track source code positions when building IR")
//
// Disassembler only flags
@@ -824,8 +861,6 @@ DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
"printing optimized code based on it")
DEFINE_bool(print_code_verbose, false, "print more information for code")
DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
-DEFINE_bool(emit_opt_code_positions, false,
- "annotate optimize code with source code positions")
#ifdef ENABLE_DISASSEMBLER
DEFINE_bool(sodium, false, "print generated code output suitable for use with "
@@ -834,7 +869,7 @@ DEFINE_bool(sodium, false, "print generated code output suitable for use with "
DEFINE_implication(sodium, print_code_stubs)
DEFINE_implication(sodium, print_code)
DEFINE_implication(sodium, print_opt_code)
-DEFINE_implication(sodium, emit_opt_code_positions)
+DEFINE_implication(sodium, hydrogen_track_positions)
DEFINE_implication(sodium, code_comments)
DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
@@ -873,6 +908,7 @@ DEFINE_bool(enable_ool_constant_pool, false,
#undef DEFINE_float
#undef DEFINE_args
#undef DEFINE_implication
+#undef DEFINE_neg_implication
#undef DEFINE_ALIAS_bool
#undef DEFINE_ALIAS_int
#undef DEFINE_ALIAS_string
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 0c36aed332..8e42206c59 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -566,7 +566,7 @@ void FlagList::PrintHelp() {
Flag* f = &flags[i];
SmartArrayPointer<const char> value = ToString(f);
printf(" --%s (%s)\n type: %s default: %s\n",
- f->name(), f->comment(), Type2String(f->type()), *value);
+ f->name(), f->comment(), Type2String(f->type()), value.get());
}
}
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 2b15bfffab..2973bad6af 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -36,6 +36,8 @@
#include "ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/frames-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/frames-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 167277f799..3b55c276cf 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -38,8 +38,6 @@
#include "string-stream.h"
#include "vm-state-inl.h"
-#include "allocation-inl.h"
-
namespace v8 {
namespace internal {
@@ -546,6 +544,9 @@ void ExitFrame::Iterate(ObjectVisitor* v) const {
// the calling frame.
IteratePc(v, pc_address(), LookupCode());
v->VisitPointer(&code_slot());
+ if (FLAG_enable_ool_constant_pool) {
+ v->VisitPointer(&constant_pool_slot());
+ }
}
@@ -826,7 +827,7 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
SmartArrayPointer<char> c_script_name =
script_name->ToCString(DISALLOW_NULLS,
ROBUST_STRING_TRAVERSAL);
- PrintF(file, " at %s:%d", *c_script_name, line);
+ PrintF(file, " at %s:%d", c_script_name.get(), line);
} else {
PrintF(file, " at <unknown>:%d", line);
}
@@ -986,14 +987,16 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
// to construct a stack trace, the receiver is always in a stack slot.
opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::STACK_SLOT ||
- opcode == Translation::LITERAL);
+ opcode == Translation::LITERAL ||
+ opcode == Translation::CAPTURED_OBJECT ||
+ opcode == Translation::DUPLICATED_OBJECT);
int index = it.Next();
// Get the correct receiver in the optimized frame.
Object* receiver = NULL;
if (opcode == Translation::LITERAL) {
receiver = data->LiteralArray()->get(index);
- } else {
+ } else if (opcode == Translation::STACK_SLOT) {
// Positive index means the value is spilled to the locals
// area. Negative means it is stored in the incoming parameter
// area.
@@ -1009,6 +1012,12 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
? this->receiver()
: this->GetParameter(parameter_index);
}
+ } else {
+ // TODO(3029): Materializing a captured object (or duplicated
+ // object) is hard, we return undefined for now. This breaks the
+ // produced stack trace, as constructor frames aren't marked as
+ // such anymore.
+ receiver = isolate()->heap()->undefined_value();
}
Code* code = function->shared()->code();
@@ -1337,7 +1346,7 @@ void EntryFrame::Iterate(ObjectVisitor* v) const {
void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
- const int offset = StandardFrameConstants::kContextOffset;
+ const int offset = StandardFrameConstants::kLastObjectOffset;
Object** base = &Memory::Object_at(sp());
Object** limit = &Memory::Object_at(fp() + offset) + 1;
for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
@@ -1375,7 +1384,7 @@ void StubFailureTrampolineFrame::Iterate(ObjectVisitor* v) const {
kFirstRegisterParameterFrameOffset);
v->VisitPointers(base, limit);
base = &Memory::Object_at(fp() + StandardFrameConstants::kMarkerOffset);
- const int offset = StandardFrameConstants::kContextOffset;
+ const int offset = StandardFrameConstants::kLastObjectOffset;
limit = &Memory::Object_at(fp() + offset) + 1;
v->VisitPointers(base, limit);
IteratePc(v, pc_address(), LookupCode());
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index d2dbfe2815..af2b55afb5 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -35,7 +35,11 @@
namespace v8 {
namespace internal {
+#if V8_TARGET_ARCH_A64
+typedef uint64_t RegList;
+#else
typedef uint32_t RegList;
+#endif
// Get the number of registers in a given register list.
int NumRegs(RegList list);
@@ -143,6 +147,7 @@ class StackHandler BASE_EMBEDDED {
inline Kind kind() const;
inline unsigned index() const;
+ inline Object** constant_pool_address() const;
inline Object** context_address() const;
inline Object** code_address() const;
inline void SetFp(Address slot, Address fp);
@@ -167,17 +172,25 @@ class StackHandler BASE_EMBEDDED {
class StandardFrameConstants : public AllStatic {
public:
// Fixed part of the frame consists of return address, caller fp,
- // context and function.
- // StandardFrame::IterateExpressions assumes that kContextOffset is the last
- // object pointer.
- static const int kFixedFrameSize = kPCOnStackSize + kFPOnStackSize +
- 2 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kFPOnStackSize;
- static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
+ // constant pool (if FLAG_enable_ool_constant_pool), context, and function.
+ // StandardFrame::IterateExpressions assumes that kLastObjectOffset is the
+ // last object pointer.
+ static const int kCPSlotSize =
+ FLAG_enable_ool_constant_pool ? kPointerSize : 0;
+ static const int kFixedFrameSizeFromFp = 2 * kPointerSize + kCPSlotSize;
+ static const int kFixedFrameSize = kPCOnStackSize + kFPOnStackSize +
+ kFixedFrameSizeFromFp;
+ static const int kExpressionsOffset = -3 * kPointerSize - kCPSlotSize;
+ static const int kMarkerOffset = -2 * kPointerSize - kCPSlotSize;
+ static const int kContextOffset = -1 * kPointerSize - kCPSlotSize;
+ static const int kConstantPoolOffset = FLAG_enable_ool_constant_pool ?
+ -1 * kPointerSize : 0;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kFPOnStackSize;
+ static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
+
+ static const int kLastObjectOffset = FLAG_enable_ool_constant_pool ?
+ kConstantPoolOffset : kContextOffset;
};
@@ -418,6 +431,7 @@ class ExitFrame: public StackFrame {
virtual Code* unchecked_code() const;
Object*& code_slot() const;
+ Object*& constant_pool_slot() const;
// Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const;
@@ -601,6 +615,7 @@ class JavaScriptFrame: public StandardFrame {
// Architecture-specific register description.
static Register fp_register();
static Register context_register();
+ static Register constant_pool_pointer_register();
static JavaScriptFrame* cast(StackFrame* frame) {
ASSERT(frame->is_java_script());
@@ -757,6 +772,7 @@ class StubFailureTrampolineFrame: public StandardFrame {
// Architecture-specific register description.
static Register fp_register();
static Register context_register();
+ static Register constant_pool_pointer_register();
protected:
inline explicit StubFailureTrampolineFrame(
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index fec9ee565d..16bb6c0d01 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -312,6 +312,10 @@ void BreakableStatementChecker::VisitThisFunction(ThisFunction* expr) {
bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Isolate* isolate = info->isolate();
+
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_compile_full_code);
+
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
@@ -341,7 +345,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
- cgen.PopulateTypeFeedbackCells(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_handler_table(*cgen.handler_table());
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -383,6 +386,15 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() {
}
+void FullCodeGenerator::InitializeFeedbackVector() {
+ int length = info_->function()->slot_count();
+ ASSERT_EQ(isolate()->heap()->the_hole_value(),
+ *TypeFeedbackInfo::UninitializedSentinel(isolate()));
+ feedback_vector_ = isolate()->factory()->NewFixedArrayWithHoles(length,
+ TENURED);
+}
+
+
void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
// Fill in the deoptimization information.
ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
@@ -401,6 +413,7 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo();
info->set_ic_total_count(ic_total_count_);
+ info->set_feedback_vector(*FeedbackVector());
ASSERT(!isolate()->heap()->InNewSpace(*info));
code->set_type_feedback_info(*info);
}
@@ -417,28 +430,26 @@ void FullCodeGenerator::Initialize() {
!Snapshot::HaveASnapshotToStartFrom();
masm_->set_emit_debug_code(generate_debug_code_);
masm_->set_predictable_code_size(true);
- InitializeAstVisitor(info_->isolate());
+ InitializeAstVisitor(info_->zone());
}
-void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
- if (type_feedback_cells_.is_empty()) return;
- int length = type_feedback_cells_.length();
- int array_size = TypeFeedbackCells::LengthOfFixedArray(length);
- Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
- isolate()->factory()->NewFixedArray(array_size, TENURED));
- for (int i = 0; i < length; i++) {
- cache->SetAstId(i, type_feedback_cells_[i].ast_id);
- cache->SetCell(i, *type_feedback_cells_[i].cell);
- }
- TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
- *cache);
+void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
+ PrepareForBailoutForId(node->id(), state);
}
+void FullCodeGenerator::CallLoadIC(ContextualMode contextual_mode,
+ TypeFeedbackId id) {
+ ExtraICState extra_state = LoadIC::ComputeExtraICState(contextual_mode);
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), extra_state);
+ CallIC(ic, id);
+}
+
-void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
- PrepareForBailoutForId(node->id(), state);
+void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), strict_mode());
+ CallIC(ic, id);
}
@@ -473,13 +484,6 @@ void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
}
-void FullCodeGenerator::RecordTypeFeedbackCell(
- TypeFeedbackId id, Handle<Cell> cell) {
- TypeFeedbackCellEntry entry = { id, cell };
- type_feedback_cells_.Add(entry, zone());
-}
-
-
void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a state.
ASSERT(masm_->pc_offset() > 0);
@@ -832,7 +836,7 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
} else {
// Check if the statement will be breakable without adding a debug break
// slot.
- BreakableStatementChecker checker(isolate());
+ BreakableStatementChecker checker(zone());
checker.Check(stmt);
// Record the statement position right here if the statement is not
// breakable. For breakable statements the actual recording of the
@@ -858,7 +862,7 @@ void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
} else {
// Check if the expression will be breakable without adding a debug break
// slot.
- BreakableStatementChecker checker(isolate());
+ BreakableStatementChecker checker(zone());
checker.Check(expr);
// Record a statement position right here if the expression is not
// breakable. For breakable expressions the actual recording of the
@@ -1083,16 +1087,9 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
scope_ = stmt->scope();
ASSERT(!scope_->is_module_scope());
{ Comment cmnt(masm_, "[ Extend block context");
- Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
- int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
- __ Push(scope_info);
+ __ Push(scope_->GetScopeInfo());
PushFunctionArgumentForContextAllocation();
- if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
- FastNewBlockContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kPushBlockContext, 2);
- }
+ __ CallRuntime(Runtime::kPushBlockContext, 2);
// Replace the context stored in the frame.
StoreToFrameField(StandardFrameConstants::kContextOffset,
@@ -1579,7 +1576,8 @@ void FullCodeGenerator::VisitNativeFunctionLiteral(
// Compute the function template for the native function.
Handle<String> name = expr->name();
v8::Handle<v8::FunctionTemplate> fun_template =
- expr->extension()->GetNativeFunction(v8::Utils::ToLocal(name));
+ expr->extension()->GetNativeFunctionTemplate(
+ reinterpret_cast<v8::Isolate*>(isolate()), v8::Utils::ToLocal(name));
ASSERT(!fun_template.IsEmpty());
// Instantiate the function and create a shared function info from it.
@@ -1643,8 +1641,7 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
}
-void BackEdgeTable::Patch(Isolate* isolate,
- Code* unoptimized) {
+void BackEdgeTable::Patch(Isolate* isolate, Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
@@ -1667,8 +1664,7 @@ void BackEdgeTable::Patch(Isolate* isolate,
}
-void BackEdgeTable::Revert(Isolate* isolate,
- Code* unoptimized) {
+void BackEdgeTable::Revert(Isolate* isolate, Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck);
@@ -1693,25 +1689,23 @@ void BackEdgeTable::Revert(Isolate* isolate,
}
-void BackEdgeTable::AddStackCheck(CompilationInfo* info) {
+void BackEdgeTable::AddStackCheck(Handle<Code> code, uint32_t pc_offset) {
DisallowHeapAllocation no_gc;
- Isolate* isolate = info->isolate();
- Code* code = info->shared_info()->code();
- Address pc = code->instruction_start() + info->osr_pc_offset();
- ASSERT_EQ(ON_STACK_REPLACEMENT, GetBackEdgeState(isolate, code, pc));
+ Isolate* isolate = code->GetIsolate();
+ Address pc = code->instruction_start() + pc_offset;
Code* patch = isolate->builtins()->builtin(Builtins::kOsrAfterStackCheck);
- PatchAt(code, pc, OSR_AFTER_STACK_CHECK, patch);
+ PatchAt(*code, pc, OSR_AFTER_STACK_CHECK, patch);
}
-void BackEdgeTable::RemoveStackCheck(CompilationInfo* info) {
+void BackEdgeTable::RemoveStackCheck(Handle<Code> code, uint32_t pc_offset) {
DisallowHeapAllocation no_gc;
- Isolate* isolate = info->isolate();
- Code* code = info->shared_info()->code();
- Address pc = code->instruction_start() + info->osr_pc_offset();
- if (GetBackEdgeState(isolate, code, pc) == OSR_AFTER_STACK_CHECK) {
+ Isolate* isolate = code->GetIsolate();
+ Address pc = code->instruction_start() + pc_offset;
+
+ if (OSR_AFTER_STACK_CHECK == GetBackEdgeState(isolate, *code, pc)) {
Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- PatchAt(code, pc, ON_STACK_REPLACEMENT, patch);
+ PatchAt(*code, pc, ON_STACK_REPLACEMENT, patch);
}
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index e27662e0e3..d9090a8dc8 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -52,8 +52,8 @@ class JumpPatchSite;
// debugger to piggybag on.
class BreakableStatementChecker: public AstVisitor {
public:
- explicit BreakableStatementChecker(Isolate* isolate) : is_breakable_(false) {
- InitializeAstVisitor(isolate);
+ explicit BreakableStatementChecker(Zone* zone) : is_breakable_(false) {
+ InitializeAstVisitor(zone);
}
void Check(Statement* stmt);
@@ -96,11 +96,7 @@ class FullCodeGenerator: public AstVisitor {
? info->function()->ast_node_count() : 0,
info->zone()),
back_edges_(2, info->zone()),
- type_feedback_cells_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0,
- info->zone()),
- ic_total_count_(0),
- zone_(info->zone()) {
+ ic_total_count_(0) {
Initialize();
}
@@ -122,8 +118,6 @@ class FullCodeGenerator: public AstVisitor {
return NULL;
}
- Zone* zone() const { return zone_; }
-
static const int kMaxBackEdgeWeight = 127;
// Platform-specific code size multiplier.
@@ -133,6 +127,9 @@ class FullCodeGenerator: public AstVisitor {
static const int kCodeSizeMultiplier = 162;
#elif V8_TARGET_ARCH_ARM
static const int kCodeSizeMultiplier = 142;
+#elif V8_TARGET_ARCH_A64
+// TODO(all): Copied ARM value. Check this is sensible for A64.
+ static const int kCodeSizeMultiplier = 142;
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 142;
#else
@@ -437,9 +434,15 @@ class FullCodeGenerator: public AstVisitor {
void PrepareForBailout(Expression* node, State state);
void PrepareForBailoutForId(BailoutId id, State state);
- // Cache cell support. This associates AST ids with global property cells
- // that will be cleared during GC and collected by the type-feedback oracle.
- void RecordTypeFeedbackCell(TypeFeedbackId id, Handle<Cell> cell);
+ // Feedback slot support. The feedback vector will be cleared during gc and
+ // collected by the type-feedback oracle.
+ Handle<FixedArray> FeedbackVector() {
+ return feedback_vector_;
+ }
+ void StoreFeedbackVectorSlot(int slot, Handle<Object> object) {
+ feedback_vector_->set(slot, *object);
+ }
+ void InitializeFeedbackVector();
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
@@ -482,8 +485,8 @@ class FullCodeGenerator: public AstVisitor {
void EmitReturnSequence();
// Platform-specific code sequences for calls
- void EmitCallWithStub(Call* expr, CallFunctionFlags flags);
- void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
+ void EmitCallWithStub(Call* expr);
+ void EmitCallWithIC(Call* expr);
void EmitKeyedCallWithIC(Call* expr, Expression* key);
// Platform-specific code for inline runtime calls.
@@ -497,11 +500,6 @@ class FullCodeGenerator: public AstVisitor {
INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
#undef EMIT_INLINE_RUNTIME_CALL
- void EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask);
-
// Platform-specific code for resuming generators.
void EmitGeneratorResume(Expression *generator,
Expression *value,
@@ -560,6 +558,11 @@ class FullCodeGenerator: public AstVisitor {
void EmitVariableAssignment(Variable* var,
Token::Value op);
+ // Helper functions to EmitVariableAssignment
+ void EmitStoreToStackLocalOrContextSlot(Variable* var,
+ MemOperand location);
+ void EmitCallStoreContextSlot(Handle<String> name, LanguageMode mode);
+
// Complete a named property assignment. The receiver is expected on top
// of the stack and the right-hand-side value in the accumulator.
void EmitNamedPropertyAssignment(Assignment* expr);
@@ -570,9 +573,12 @@ class FullCodeGenerator: public AstVisitor {
void EmitKeyedPropertyAssignment(Assignment* expr);
void CallIC(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId id = TypeFeedbackId::None());
+ void CallLoadIC(ContextualMode mode,
+ TypeFeedbackId id = TypeFeedbackId::None());
+ void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
+
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
@@ -602,6 +608,9 @@ class FullCodeGenerator: public AstVisitor {
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
bool is_classic_mode() { return language_mode() == CLASSIC_MODE; }
+ StrictModeFlag strict_mode() {
+ return is_classic_mode() ? kNonStrictMode : kStrictMode;
+ }
LanguageMode language_mode() { return function()->language_mode(); }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return scope_; }
@@ -635,7 +644,6 @@ class FullCodeGenerator: public AstVisitor {
void Generate();
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateTypeFeedbackInfo(Handle<Code> code);
- void PopulateTypeFeedbackCells(Handle<Code> code);
Handle<FixedArray> handler_table() { return handler_table_; }
@@ -650,12 +658,6 @@ class FullCodeGenerator: public AstVisitor {
uint32_t loop_depth;
};
- struct TypeFeedbackCellEntry {
- TypeFeedbackId ast_id;
- Handle<Cell> cell;
- };
-
-
class ExpressionContext BASE_EMBEDDED {
public:
explicit ExpressionContext(FullCodeGenerator* codegen)
@@ -845,12 +847,11 @@ class FullCodeGenerator: public AstVisitor {
ZoneList<BailoutEntry> bailout_entries_;
GrowableBitVector prepared_bailout_ids_;
ZoneList<BackEdgeEntry> back_edges_;
- ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
Handle<FixedArray> handler_table_;
+ Handle<FixedArray> feedback_vector_;
Handle<Cell> profiling_counter_;
bool generate_debug_code_;
- Zone* zone_;
friend class NestedStatement;
@@ -933,10 +934,10 @@ class BackEdgeTable {
// Change a back edge patched for on-stack replacement to perform a
// stack check first.
- static void AddStackCheck(CompilationInfo* info);
+ static void AddStackCheck(Handle<Code> code, uint32_t pc_offset);
- // Remove the stack check, if available, and replace by on-stack replacement.
- static void RemoveStackCheck(CompilationInfo* info);
+ // Revert the patch by AddStackCheck.
+ static void RemoveStackCheck(Handle<Code> code, uint32_t pc_offset);
// Return the current patch state of the back edge.
static BackEdgeState GetBackEdgeState(Isolate* isolate,
diff --git a/deps/v8/src/func-name-inferrer.cc b/deps/v8/src/func-name-inferrer.cc
index 84d3bf06b8..5409a4e180 100644
--- a/deps/v8/src/func-name-inferrer.cc
+++ b/deps/v8/src/func-name-inferrer.cc
@@ -62,7 +62,7 @@ void FuncNameInferrer::PushLiteralName(Handle<String> name) {
void FuncNameInferrer::PushVariableName(Handle<String> name) {
- if (IsOpen() && !isolate()->heap()->result_string()->Equals(*name)) {
+ if (IsOpen() && !isolate()->heap()->dot_result_string()->Equals(*name)) {
names_stack_.Add(Name(name, kVariableName), zone());
}
}
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 21cfd22334..afe5b7117d 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -1089,7 +1089,7 @@ class DebugInfoSection : public DebugSection {
w->Write<uint8_t>(sizeof(intptr_t));
w->WriteULEB128(1); // Abbreviation code.
- w->WriteString(*desc_->GetFilename());
+ w->WriteString(desc_->GetFilename().get());
w->Write<intptr_t>(desc_->CodeStart());
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
w->Write<uint32_t>(0);
@@ -1131,7 +1131,7 @@ class DebugInfoSection : public DebugSection {
for (int param = 0; param < params; ++param) {
w->WriteULEB128(current_abbreviation++);
w->WriteString(
- *scope->parameter(param)->name()->ToCString(DISALLOW_NULLS));
+ scope->parameter(param)->name()->ToCString(DISALLOW_NULLS).get());
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
@@ -1182,7 +1182,7 @@ class DebugInfoSection : public DebugSection {
for (int local = 0; local < locals; ++local) {
w->WriteULEB128(current_abbreviation++);
w->WriteString(
- *stack_locals[local]->name()->ToCString(DISALLOW_NULLS));
+ stack_locals[local]->name()->ToCString(DISALLOW_NULLS).get());
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
@@ -1455,7 +1455,7 @@ class DebugLineSection : public DebugSection {
w->Write<uint8_t>(1); // DW_LNS_SET_COLUMN operands count.
w->Write<uint8_t>(0); // DW_LNS_NEGATE_STMT operands count.
w->Write<uint8_t>(0); // Empty include_directories sequence.
- w->WriteString(*desc_->GetFilename()); // File name.
+ w->WriteString(desc_->GetFilename().get()); // File name.
w->WriteULEB128(0); // Current directory.
w->WriteULEB128(0); // Unknown modification time.
w->WriteULEB128(0); // Unknown file size.
@@ -2009,7 +2009,8 @@ void GDBJITInterface::AddCode(Handle<Name> name,
if (!name.is_null() && name->IsString()) {
SmartArrayPointer<char> name_cstring =
Handle<String>::cast(name)->ToCString(DISALLOW_NULLS);
- AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script, info);
+ AddCode(name_cstring.get(), *code, GDBJITInterface::FUNCTION, *script,
+ info);
} else {
AddCode("", *code, GDBJITInterface::FUNCTION, *script, info);
}
@@ -2132,7 +2133,7 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
Code* code) {
if (!FLAG_gdbjit) return;
if (name != NULL && name->IsString()) {
- AddCode(tag, *String::cast(name)->ToCString(DISALLOW_NULLS), code);
+ AddCode(tag, String::cast(name)->ToCString(DISALLOW_NULLS).get(), code);
} else {
AddCode(tag, "", code);
}
diff --git a/deps/v8/src/gdb-jit.h b/deps/v8/src/gdb-jit.h
index a34d3d3012..bc1a8f3646 100644
--- a/deps/v8/src/gdb-jit.h
+++ b/deps/v8/src/gdb-jit.h
@@ -50,12 +50,6 @@ class CompilationInfo;
V(KEYED_LOAD_IC) \
V(STORE_IC) \
V(KEYED_STORE_IC) \
- V(CALL_IC) \
- V(CALL_INITIALIZE) \
- V(CALL_PRE_MONOMORPHIC) \
- V(CALL_NORMAL) \
- V(CALL_MEGAMORPHIC) \
- V(CALL_MISS) \
V(STUB) \
V(BUILTIN) \
V(SCRIPT) \
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 2ebe1c0088..09449791f4 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -169,13 +169,6 @@ class GlobalHandles::Node {
flags_ = IsInNewSpaceList::update(flags_, v);
}
- bool is_revivable_callback() {
- return IsRevivableCallback::decode(flags_);
- }
- void set_revivable_callback(bool v) {
- flags_ = IsRevivableCallback::update(flags_, v);
- }
-
bool IsNearDeath() const {
// Check for PENDING to ensure correct answer when processing callbacks.
return state() == PENDING || state() == NEAR_DEATH;
@@ -234,21 +227,12 @@ class GlobalHandles::Node {
parameter_or_next_free_.next_free = value;
}
- void MakeWeak(void* parameter,
- WeakCallback weak_callback,
- RevivableCallback revivable_callback) {
- ASSERT((weak_callback == NULL) != (revivable_callback == NULL));
+ void MakeWeak(void* parameter, WeakCallback weak_callback) {
+ ASSERT(weak_callback != NULL);
ASSERT(state() != FREE);
set_state(WEAK);
set_parameter(parameter);
- if (weak_callback != NULL) {
- weak_callback_ = weak_callback;
- set_revivable_callback(false);
- } else {
- weak_callback_ =
- reinterpret_cast<WeakCallback>(revivable_callback);
- set_revivable_callback(true);
- }
+ weak_callback_ = weak_callback;
}
void ClearWeakness() {
@@ -278,20 +262,12 @@ class GlobalHandles::Node {
// Leaving V8.
VMState<EXTERNAL> state(isolate);
HandleScope handle_scope(isolate);
- if (is_revivable_callback()) {
- RevivableCallback revivable =
- reinterpret_cast<RevivableCallback>(weak_callback_);
- revivable(reinterpret_cast<v8::Isolate*>(isolate),
- reinterpret_cast<Persistent<Value>*>(&object),
- par);
- } else {
- Handle<Object> handle(*object, isolate);
- v8::WeakCallbackData<v8::Value, void> data(
- reinterpret_cast<v8::Isolate*>(isolate),
- v8::Utils::ToLocal(handle),
- par);
- weak_callback_(data);
- }
+ Handle<Object> handle(*object, isolate);
+ v8::WeakCallbackData<v8::Value, void> data(
+ reinterpret_cast<v8::Isolate*>(isolate),
+ v8::Utils::ToLocal(handle),
+ par);
+ weak_callback_(data);
}
// Absence of explicit cleanup or revival of weak handle
// in most of the cases would lead to memory leak.
@@ -325,7 +301,6 @@ class GlobalHandles::Node {
class IsIndependent: public BitField<bool, 4, 1> {};
class IsPartiallyDependent: public BitField<bool, 5, 1> {};
class IsInNewSpaceList: public BitField<bool, 6, 1> {};
- class IsRevivableCallback: public BitField<bool, 7, 1> {};
uint8_t flags_;
@@ -522,10 +497,8 @@ void GlobalHandles::Destroy(Object** location) {
void GlobalHandles::MakeWeak(Object** location,
void* parameter,
- WeakCallback weak_callback,
- RevivableCallback revivable_callback) {
- Node::FromLocation(location)->MakeWeak(
- parameter, weak_callback, revivable_callback);
+ WeakCallback weak_callback) {
+ Node::FromLocation(location)->MakeWeak(parameter, weak_callback);
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 4b46aac05d..a40645199c 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -135,7 +135,6 @@ class GlobalHandles {
static void Destroy(Object** location);
typedef WeakCallbackData<v8::Value, void>::Callback WeakCallback;
- typedef WeakReferenceCallbacks<v8::Value, void>::Revivable RevivableCallback;
// Make the global handle weak and set the callback parameter for the
// handle. When the garbage collector recognizes that only weak global
@@ -145,14 +144,7 @@ class GlobalHandles {
// reason is that Smi::FromInt(0) does not change during garage collection.
static void MakeWeak(Object** location,
void* parameter,
- WeakCallback weak_callback,
- RevivableCallback revivable_callback);
-
- static inline void MakeWeak(Object** location,
- void* parameter,
- RevivableCallback revivable_callback) {
- MakeWeak(location, parameter, NULL, revivable_callback);
- }
+ WeakCallback weak_callback);
void RecordStats(HeapStats* stats);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 3456030b7e..8a67632d2c 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -38,7 +38,7 @@
#if V8_CC_GNU && V8_GNUC_PREREQ(2, 96, 0) && !V8_GNUC_PREREQ(4, 1, 0)
# include <limits> // NOLINT
# define V8_INFINITY std::numeric_limits<double>::infinity()
-#elif V8_CC_MSVC
+#elif V8_LIBC_MSVCRT
# define V8_INFINITY HUGE_VAL
#else
# define V8_INFINITY INFINITY
@@ -71,6 +71,10 @@ namespace internal {
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
+#elif defined(__AARCH64EL__)
+#define V8_HOST_ARCH_A64 1
+#define V8_HOST_ARCH_64_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
@@ -78,7 +82,7 @@ namespace internal {
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else
-#error Host architecture was not detected as supported by v8
+#error "Host architecture was not detected as supported by v8"
#endif
#if defined(__ARM_ARCH_7A__) || \
@@ -95,11 +99,13 @@ namespace internal {
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
+ !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_A64 && !V8_TARGET_ARCH_MIPS
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_TARGET_ARCH_IA32 1
+#elif defined(__AARCH64EL__)
+#define V8_TARGET_ARCH_A64 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
#elif defined(__MIPSEL__)
@@ -119,6 +125,9 @@ namespace internal {
#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
#error Target architecture arm is only supported on arm and ia32 host
#endif
+#if (V8_TARGET_ARCH_A64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_A64))
+#error Target architecture a64 is only supported on a64 and x64 host
+#endif
#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
#endif
@@ -127,6 +136,9 @@ namespace internal {
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
+#if (V8_TARGET_ARCH_A64 && !V8_HOST_ARCH_A64)
+#define USE_SIMULATOR 1
+#endif
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
@@ -142,6 +154,8 @@ namespace internal {
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_ARM
#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_A64
+#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#define V8_TARGET_LITTLE_ENDIAN 1
#else
@@ -187,8 +201,13 @@ typedef byte* Address;
# define V8_INTPTR_C(x) (x ## LL)
# define V8_PTR_PREFIX "I64"
#elif V8_HOST_ARCH_64_BIT
-# define V8_UINT64_C(x) (x ## UL)
-# define V8_INT64_C(x) (x ## L)
+# if V8_OS_MACOSX
+# define V8_UINT64_C(x) (x ## ULL)
+# define V8_INT64_C(x) (x ## LL)
+# else
+# define V8_UINT64_C(x) (x ## UL)
+# define V8_INT64_C(x) (x ## L)
+# endif
# define V8_INTPTR_C(x) (x ## L)
# define V8_PTR_PREFIX "l"
#else
@@ -208,13 +227,12 @@ typedef byte* Address;
#define V8PRIuPTR V8_PTR_PREFIX "u"
// Fix for Mac OS X defining uintptr_t as "unsigned long":
-#if defined(__APPLE__) && defined(__MACH__)
+#if V8_OS_MACOSX
#undef V8PRIxPTR
#define V8PRIxPTR "lx"
#endif
-#if (defined(__APPLE__) && defined(__MACH__)) || \
- defined(__FreeBSD__) || defined(__OpenBSD__)
+#if V8_OS_MACOSX || defined(__FreeBSD__) || defined(__OpenBSD__)
#define USING_BSD_ABI
#endif
@@ -226,6 +244,14 @@ const int MB = KB * KB;
const int GB = KB * KB * KB;
const int kMaxInt = 0x7FFFFFFF;
const int kMinInt = -kMaxInt - 1;
+const int kMaxInt8 = (1 << 7) - 1;
+const int kMinInt8 = -(1 << 7);
+const int kMaxUInt8 = (1 << 8) - 1;
+const int kMinUInt8 = 0;
+const int kMaxInt16 = (1 << 15) - 1;
+const int kMinInt16 = -(1 << 15);
+const int kMaxUInt16 = (1 << 16) - 1;
+const int kMinUInt16 = 0;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
@@ -243,9 +269,6 @@ const int kFPOnStackSize = kRegisterSize;
const int kDoubleSizeLog2 = 3;
-// Size of the state of a the random number generator.
-const int kRandomStateSize = 2 * kIntSize;
-
#if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3;
const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index ec69c3fdbe..22bbd7cd7c 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -30,7 +30,6 @@
#define V8_HANDLES_INL_H_
#include "api.h"
-#include "apiutils.h"
#include "handles.h"
#include "heap.h"
#include "isolate.h"
@@ -110,8 +109,7 @@ bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
HandleScope::HandleScope(Isolate* isolate) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
+ HandleScopeData* current = isolate->handle_scope_data();
isolate_ = isolate;
prev_next_ = current->next;
prev_limit_ = current->limit;
@@ -127,8 +125,7 @@ HandleScope::~HandleScope() {
void HandleScope::CloseScope(Isolate* isolate,
Object** prev_next,
Object** prev_limit) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
+ HandleScopeData* current = isolate->handle_scope_data();
std::swap(current->next, prev_next);
current->level--;
@@ -146,8 +143,7 @@ void HandleScope::CloseScope(Isolate* isolate,
template <typename T>
Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
+ HandleScopeData* current = isolate_->handle_scope_data();
T* value = *handle_value;
// Throw away all handles in the current scope.
@@ -167,8 +163,7 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
template <typename T>
T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
ASSERT(AllowHandleAllocation::IsAllowed());
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
+ HandleScopeData* current = isolate->handle_scope_data();
internal::Object** cur = current->next;
if (cur == current->limit) cur = Extend(isolate);
@@ -187,8 +182,7 @@ T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
// Make sure the current thread is allowed to create handles to begin with.
CHECK(AllowHandleAllocation::IsAllowed());
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
+ HandleScopeData* current = isolate_->handle_scope_data();
// Shrink the current handle scope to make it impossible to do
// handle allocations without an explicit handle scope.
limit_ = current->limit;
@@ -201,8 +195,7 @@ inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
inline SealHandleScope::~SealHandleScope() {
// Restore state in current handle scope to re-enable handle
// allocations.
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
+ HandleScopeData* current = isolate_->handle_scope_data();
ASSERT_EQ(0, current->level);
current->level = level_;
ASSERT_EQ(current->next, current->limit);
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 4cb1827d8e..830eb09602 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -55,17 +55,16 @@ int HandleScope::NumberOfHandles(Isolate* isolate) {
Object** HandleScope::Extend(Isolate* isolate) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
+ HandleScopeData* current = isolate->handle_scope_data();
Object** result = current->next;
ASSERT(result == current->limit);
// Make sure there's at least one scope on the stack and that the
// top of the scope stack isn't a barrier.
- if (current->level == 0) {
- Utils::ReportApiFailure("v8::HandleScope::CreateHandle()",
- "Cannot create a handle without a HandleScope");
+ if (!Utils::ApiCheck(current->level != 0,
+ "v8::HandleScope::CreateHandle()",
+ "Cannot create a handle without a HandleScope")) {
return NULL;
}
HandleScopeImplementer* impl = isolate->handle_scope_implementer();
@@ -95,8 +94,7 @@ Object** HandleScope::Extend(Isolate* isolate) {
void HandleScope::DeleteExtensions(Isolate* isolate) {
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
+ HandleScopeData* current = isolate->handle_scope_data();
isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
}
@@ -160,30 +158,12 @@ Handle<String> FlattenGetString(Handle<String> string) {
}
-Handle<Object> SetProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(
- isolate,
- Runtime::SetObjectProperty(
- isolate, object, key, value, attributes, strict_mode),
- Object);
-}
-
-
Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION(
- isolate,
- Runtime::ForceSetObjectProperty(
- isolate, object, key, value, attributes),
- Object);
+ return Runtime::ForceSetObjectProperty(object->GetIsolate(), object, key,
+ value, attributes);
}
@@ -229,11 +209,12 @@ Handle<Object> GetProperty(Isolate* isolate,
}
-Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
+Handle<String> LookupSingleCharacterStringFromCode(Isolate* isolate,
uint32_t index) {
CALL_HEAP_FUNCTION(
isolate,
- isolate->heap()->LookupSingleCharacterStringFromCode(index), Object);
+ isolate->heap()->LookupSingleCharacterStringFromCode(index),
+ String);
}
@@ -243,26 +224,24 @@ Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
// collector will call the weak callback on the global handle
// associated with the wrapper and get rid of both the wrapper and the
// handle.
-static void ClearWrapperCache(v8::Isolate* v8_isolate,
- Persistent<v8::Value>* handle,
- void*) {
- Handle<Object> cache = Utils::OpenPersistent(handle);
- JSValue* wrapper = JSValue::cast(*cache);
+static void ClearWrapperCache(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ Object** location = reinterpret_cast<Object**>(data.GetParameter());
+ JSValue* wrapper = JSValue::cast(*location);
Foreign* foreign = Script::cast(wrapper->value())->wrapper();
- ASSERT(foreign->foreign_address() ==
- reinterpret_cast<Address>(cache.location()));
+ ASSERT_EQ(foreign->foreign_address(), reinterpret_cast<Address>(location));
foreign->set_foreign_address(0);
- Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- isolate->global_handles()->Destroy(cache.location());
+ GlobalHandles::Destroy(location);
+ Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
isolate->counters()->script_wrappers()->Decrement();
}
Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
if (script->wrapper()->foreign_address() != NULL) {
- // Return the script wrapper directly from the cache.
+ // Return a handle for the existing script wrapper from the cache.
return Handle<JSValue>(
- reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
+ *reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
}
Isolate* isolate = script->GetIsolate();
// Construct a new script wrapper.
@@ -273,10 +252,10 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
// The allocation might have triggered a GC, which could have called this
// function recursively, and a wrapper has already been created and cached.
- // In that case, simply return the cached wrapper.
+ // In that case, simply return a handle for the cached wrapper.
if (script->wrapper()->foreign_address() != NULL) {
return Handle<JSValue>(
- reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
+ *reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
}
result->set_value(*script);
@@ -285,9 +264,9 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
// for future use. The cache will automatically be cleared by the
// garbage collector when it is not used anymore.
Handle<Object> handle = isolate->global_handles()->Create(*result);
- isolate->global_handles()->MakeWeak(handle.location(),
- NULL,
- &ClearWrapperCache);
+ GlobalHandles::MakeWeak(handle.location(),
+ reinterpret_cast<void*>(handle.location()),
+ &ClearWrapperCache);
script->wrapper()->set_foreign_address(
reinterpret_cast<Address>(handle.location()));
return result;
@@ -655,7 +634,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
// present enum cache. The first step to using the cache is to set the
// enum length of the map by counting the number of own descriptors that
// are not DONT_ENUM or SYMBOLIC.
- if (own_property_count == Map::kInvalidEnumCache) {
+ if (own_property_count == kInvalidEnumCacheSentinel) {
own_property_count = object->map()->NumberOfDescribedProperties(
OWN_DESCRIPTORS, DONT_SHOW);
@@ -767,36 +746,10 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
}
-Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
- Handle<Object> key) {
- CALL_HEAP_FUNCTION(table->GetIsolate(),
- table->Add(*key),
- ObjectHashSet);
-}
-
-
-Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
- Handle<Object> key) {
- CALL_HEAP_FUNCTION(table->GetIsolate(),
- table->Remove(*key),
- ObjectHashSet);
-}
-
-
-Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
- Handle<Object> key,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(table->GetIsolate(),
- table->Put(*key, *value),
- ObjectHashTable);
-}
-
-
DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
: impl_(isolate->handle_scope_implementer()) {
impl_->BeginDeferredScope();
- v8::ImplementationUtilities::HandleScopeData* data =
- impl_->isolate()->handle_scope_data();
+ HandleScopeData* data = impl_->isolate()->handle_scope_data();
Object** new_next = impl_->GetSpareOrNewBlock();
Object** new_limit = &new_next[kHandleBlockSize];
ASSERT(data->limit == &impl_->blocks()->last()[kHandleBlockSize]);
@@ -822,8 +775,7 @@ DeferredHandleScope::~DeferredHandleScope() {
DeferredHandles* DeferredHandleScope::Detach() {
DeferredHandles* deferred = impl_->Detach(prev_limit_);
- v8::ImplementationUtilities::HandleScopeData* data =
- impl_->isolate()->handle_scope_data();
+ HandleScopeData* data = impl_->isolate()->handle_scope_data();
data->next = prev_next_;
data->limit = prev_limit_;
#ifdef DEBUG
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index cfdecac190..8538658047 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -29,7 +29,6 @@
#define V8_HANDLES_H_
#include "allocation.h"
-#include "apiutils.h"
#include "objects.h"
namespace v8 {
@@ -83,7 +82,7 @@ class Handle {
// Closes the given scope, but lets this handle escape. See
// implementation in api.h.
- inline Handle<T> EscapeFrom(v8::HandleScope* scope);
+ inline Handle<T> EscapeFrom(v8::EscapableHandleScope* scope);
#ifdef DEBUG
enum DereferenceCheckMode { INCLUDE_DEFERRED_CHECK, NO_DEFERRED_CHECK };
@@ -228,13 +227,6 @@ void FlattenString(Handle<String> str);
// string.
Handle<String> FlattenGetString(Handle<String> str);
-Handle<Object> SetProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
-
Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> key,
Handle<Object> value,
@@ -252,7 +244,7 @@ Handle<Object> GetProperty(Isolate* isolate,
Handle<Object> obj,
Handle<Object> key);
-Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
+Handle<String> LookupSingleCharacterStringFromCode(Isolate* isolate,
uint32_t index);
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
@@ -303,16 +295,6 @@ Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
Handle<JSFunction> constructor,
Handle<JSGlobalProxy> global);
-Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
- Handle<Object> key);
-
-Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
- Handle<Object> key);
-
-Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
- Handle<Object> key,
- Handle<Object> value);
-
void AddWeakObjectToCodeDependency(Heap* heap,
Handle<Object> object,
Handle<Code> code);
@@ -334,6 +316,17 @@ class SealHandleScope BASE_EMBEDDED {
#endif
};
+struct HandleScopeData {
+ internal::Object** next;
+ internal::Object** limit;
+ int level;
+
+ void Initialize() {
+ next = limit = NULL;
+ level = 0;
+ }
+};
+
} } // namespace v8::internal
#endif // V8_HANDLES_H_
diff --git a/deps/v8/src/harmony-array.js b/deps/v8/src/harmony-array.js
index e440299ff6..2cedebaae1 100644
--- a/deps/v8/src/harmony-array.js
+++ b/deps/v8/src/harmony-array.js
@@ -35,10 +35,7 @@
// ES6 draft 07-15-13, section 15.4.3.23
function ArrayFind(predicate /* thisArg */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.find"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
var array = ToObject(this);
var length = ToInteger(array.length);
@@ -73,10 +70,7 @@ function ArrayFind(predicate /* thisArg */) { // length == 1
// ES6 draft 07-15-13, section 15.4.3.24
function ArrayFindIndex(predicate /* thisArg */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Array.prototype.findIndex"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
var array = ToObject(this);
var length = ToInteger(array.length);
@@ -121,4 +115,4 @@ function HarmonyArrayExtendArrayPrototype() {
));
}
-HarmonyArrayExtendArrayPrototype(); \ No newline at end of file
+HarmonyArrayExtendArrayPrototype();
diff --git a/deps/v8/src/harmony-math.js b/deps/v8/src/harmony-math.js
index a4d3f2e8a5..c856ce72b2 100644
--- a/deps/v8/src/harmony-math.js
+++ b/deps/v8/src/harmony-math.js
@@ -47,14 +47,153 @@ function MathTrunc(x) {
}
+// ES6 draft 09-27-13, section 20.2.2.30.
+function MathSinh(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ // Idempotent for NaN, +/-0 and +/-Infinity.
+ if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
+ return (MathExp(x) - MathExp(-x)) / 2;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.12.
+function MathCosh(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ if (!NUMBER_IS_FINITE(x)) return MathAbs(x);
+ return (MathExp(x) + MathExp(-x)) / 2;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.33.
+function MathTanh(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ // Idempotent for +/-0.
+ if (x === 0) return x;
+ // Returns +/-1 for +/-Infinity.
+ if (!NUMBER_IS_FINITE(x)) return MathSign(x);
+ var exp1 = MathExp(x);
+ var exp2 = MathExp(-x);
+ return (exp1 - exp2) / (exp1 + exp2);
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.5.
+function MathAsinh(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ // Idempotent for NaN, +/-0 and +/-Infinity.
+ if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
+ if (x > 0) return MathLog(x + MathSqrt(x * x + 1));
+ // This is to prevent numerical errors caused by large negative x.
+ return -MathLog(-x + MathSqrt(x * x + 1));
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.3.
+function MathAcosh(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ if (x < 1) return NAN;
+ // Idempotent for NaN and +Infinity.
+ if (!NUMBER_IS_FINITE(x)) return x;
+ return MathLog(x + MathSqrt(x + 1) * MathSqrt(x - 1));
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.7.
+function MathAtanh(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ // Idempotent for +/-0.
+ if (x === 0) return x;
+ // Returns NaN for NaN and +/- Infinity.
+ if (!NUMBER_IS_FINITE(x)) return NAN;
+ return 0.5 * MathLog((1 + x) / (1 - x));
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.21.
+function MathLog10(x) {
+ return MathLog(x) * 0.434294481903251828; // log10(x) = log(x)/log(10).
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.22.
+function MathLog2(x) {
+ return MathLog(x) * 1.442695040888963407; // log2(x) = log(x)/log(2).
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.17.
+function MathHypot(x, y) { // Function length is 2.
+ // We may want to introduce fast paths for two arguments and when
+ // normalization to avoid overflow is not necessary. For now, we
+ // simply assume the general case.
+ var length = %_ArgumentsLength();
+ var args = new InternalArray(length);
+ var max = 0;
+ for (var i = 0; i < length; i++) {
+ var n = %_Arguments(i);
+ if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
+ if (n === INFINITY || n === -INFINITY) return INFINITY;
+ n = MathAbs(n);
+ if (n > max) max = n;
+ args[i] = n;
+ }
+
+ // Kahan summation to avoid rounding errors.
+ // Normalize the numbers to the largest one to avoid overflow.
+ if (max === 0) max = 1;
+ var sum = 0;
+ var compensation = 0;
+ for (var i = 0; i < length; i++) {
+ var n = args[i] / max;
+ var summand = n * n - compensation;
+ var preliminary = sum + summand;
+ compensation = (preliminary - sum) - summand;
+ sum = preliminary;
+ }
+ return MathSqrt(sum) * max;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.16.
+function MathFround(x) {
+ return %Math_fround(TO_NUMBER_INLINE(x));
+}
+
+
+function MathClz32(x) {
+ x = ToUint32(TO_NUMBER_INLINE(x));
+ if (x == 0) return 32;
+ var result = 0;
+ // Binary search.
+ if ((x & 0xFFFF0000) === 0) { x <<= 16; result += 16; };
+ if ((x & 0xFF000000) === 0) { x <<= 8; result += 8; };
+ if ((x & 0xF0000000) === 0) { x <<= 4; result += 4; };
+ if ((x & 0xC0000000) === 0) { x <<= 2; result += 2; };
+ if ((x & 0x80000000) === 0) { x <<= 1; result += 1; };
+ return result;
+}
+
+
function ExtendMath() {
%CheckIsBootstrapping();
// Set up the non-enumerable functions on the Math object.
InstallFunctions($Math, DONT_ENUM, $Array(
"sign", MathSign,
- "trunc", MathTrunc
+ "trunc", MathTrunc,
+ "sinh", MathSinh,
+ "cosh", MathCosh,
+ "tanh", MathTanh,
+ "asinh", MathAsinh,
+ "acosh", MathAcosh,
+ "atanh", MathAtanh,
+ "log10", MathLog10,
+ "log2", MathLog2,
+ "hypot", MathHypot,
+ "fround", MathFround,
+ "clz32", MathClz32
));
}
+
ExtendMath();
diff --git a/deps/v8/src/harmony-string.js b/deps/v8/src/harmony-string.js
index a5c6f4e2ec..cc3c5cf93c 100644
--- a/deps/v8/src/harmony-string.js
+++ b/deps/v8/src/harmony-string.js
@@ -1,4 +1,4 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -34,12 +34,9 @@
// -------------------------------------------------------------------
-// ES6 draft 07-15-13, section 15.5.3.21
+// ES6 draft 01-20-14, section 21.1.3.13
function StringRepeat(count) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.repeat"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.repeat");
var s = TO_STRING_INLINE(this);
var n = ToInteger(count);
@@ -56,14 +53,17 @@ function StringRepeat(count) {
}
-// ES6 draft 07-15-13, section 15.5.3.22
+// ES6 draft 01-20-14, section 21.1.3.18
function StringStartsWith(searchString /* position */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.startsWith");
+
+ var s = TO_STRING_INLINE(this);
+
+ if (IS_REGEXP(searchString)) {
+ throw MakeTypeError("first_argument_not_regexp",
["String.prototype.startsWith"]);
}
- var s = TO_STRING_INLINE(this);
var ss = TO_STRING_INLINE(searchString);
var pos = 0;
if (%_ArgumentsLength() > 1) {
@@ -82,14 +82,17 @@ function StringStartsWith(searchString /* position */) { // length == 1
}
-// ES6 draft 07-15-13, section 15.5.3.23
+// ES6 draft 01-20-14, section 21.1.3.7
function StringEndsWith(searchString /* position */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.endsWith");
+
+ var s = TO_STRING_INLINE(this);
+
+ if (IS_REGEXP(searchString)) {
+ throw MakeTypeError("first_argument_not_regexp",
["String.prototype.endsWith"]);
}
- var s = TO_STRING_INLINE(this);
var ss = TO_STRING_INLINE(searchString);
var s_len = s.length;
var pos = s_len;
@@ -111,12 +114,9 @@ function StringEndsWith(searchString /* position */) { // length == 1
}
-// ES6 draft 07-15-13, section 15.5.3.24
+// ES6 draft 01-20-14, section 21.1.3.6
function StringContains(searchString /* position */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.contains"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.contains");
var s = TO_STRING_INLINE(this);
var ss = TO_STRING_INLINE(searchString);
@@ -151,4 +151,4 @@ function ExtendStringPrototype() {
));
}
-ExtendStringPrototype(); \ No newline at end of file
+ExtendStringPrototype();
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index ad6f44f935..a45e3ab9d9 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -28,7 +28,10 @@
#ifndef V8_HEAP_INL_H_
#define V8_HEAP_INL_H_
+#include <cmath>
+
#include "heap.h"
+#include "heap-profiler.h"
#include "isolate.h"
#include "list-inl.h"
#include "objects.h"
@@ -217,10 +220,7 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
ASSERT(AllowHandleAllocation::IsAllowed());
ASSERT(AllowHeapAllocation::IsAllowed());
ASSERT(gc_state_ == NOT_IN_GC);
- ASSERT(space != NEW_SPACE ||
- retry_space == OLD_POINTER_SPACE ||
- retry_space == OLD_DATA_SPACE ||
- retry_space == LO_SPACE);
+ HeapProfiler* profiler = isolate_->heap_profiler();
#ifdef DEBUG
if (FLAG_gc_interval >= 0 &&
!disallow_allocation_failure_ &&
@@ -230,12 +230,17 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
isolate_->counters()->objs_since_last_full()->Increment();
isolate_->counters()->objs_since_last_young()->Increment();
#endif
+
+ HeapObject* object;
MaybeObject* result;
if (NEW_SPACE == space) {
result = new_space_.AllocateRaw(size_in_bytes);
- if (always_allocate() && result->IsFailure()) {
+ if (always_allocate() && result->IsFailure() && retry_space != NEW_SPACE) {
space = retry_space;
} else {
+ if (profiler->is_tracking_allocations() && result->To(&object)) {
+ profiler->AllocationEvent(object->address(), size_in_bytes);
+ }
return result;
}
}
@@ -257,6 +262,9 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
result = map_space_->AllocateRaw(size_in_bytes);
}
if (result->IsFailure()) old_gen_exhausted_ = true;
+ if (profiler->is_tracking_allocations() && result->To(&object)) {
+ profiler->AllocationEvent(object->address(), size_in_bytes);
+ }
return result;
}
@@ -410,7 +418,7 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
}
-bool Heap::AllowedToBeMigrated(HeapObject* object, AllocationSpace dst) {
+bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
// Object migration is governed by the following rules:
//
// 1) Objects in new-space can be migrated to one of the old spaces
@@ -420,18 +428,22 @@ bool Heap::AllowedToBeMigrated(HeapObject* object, AllocationSpace dst) {
// fixed arrays in new-space, old-data-space and old-pointer-space.
// 4) Fillers (one word) can never migrate, they are skipped by
// incremental marking explicitly to prevent invalid pattern.
+ // 5) Short external strings can end up in old pointer space when a cons
+ // string in old pointer space is made external (String::MakeExternal).
//
// Since this function is used for debugging only, we do not place
// asserts here, but check everything explicitly.
- if (object->map() == one_pointer_filler_map()) return false;
- InstanceType type = object->map()->instance_type();
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ if (obj->map() == one_pointer_filler_map()) return false;
+ InstanceType type = obj->map()->instance_type();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
AllocationSpace src = chunk->owner()->identity();
switch (src) {
case NEW_SPACE:
return dst == src || dst == TargetSpaceId(type);
case OLD_POINTER_SPACE:
- return dst == src && (dst == TargetSpaceId(type) || object->IsFiller());
+ return dst == src &&
+ (dst == TargetSpaceId(type) || obj->IsFiller() ||
+ (obj->IsExternalString() && ExternalString::cast(obj)->is_short()));
case OLD_DATA_SPACE:
return dst == src && dst == TargetSpaceId(type);
case CODE_SPACE:
@@ -478,6 +490,39 @@ void Heap::ScavengePointer(HeapObject** p) {
}
+void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
+ Heap* heap = object->GetHeap();
+ ASSERT(heap->InFromSpace(object));
+
+ if (!FLAG_allocation_site_pretenuring ||
+ !AllocationSite::CanTrack(object->map()->instance_type())) return;
+
+ // Check if there is potentially a memento behind the object. If
+ // the last word of the momento is on another page we return
+ // immediatelly. Note that we do not have to compare with the current
+ // top pointer of the from space page, since we always install filler
+ // objects above the top pointer of a from space page when performing
+ // a garbage collection.
+ Address object_address = object->address();
+ Address memento_address = object_address + object->Size();
+ Address last_memento_word_address = memento_address + kPointerSize;
+ if (!NewSpacePage::OnSamePage(object_address,
+ last_memento_word_address)) {
+ return;
+ }
+
+ HeapObject* candidate = HeapObject::FromAddress(memento_address);
+ if (candidate->map() != heap->allocation_memento_map()) return;
+
+ AllocationMemento* memento = AllocationMemento::cast(candidate);
+ if (!memento->IsValid()) return;
+
+ if (memento->GetAllocationSite()->IncrementMementoFoundCount()) {
+ heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite());
+ }
+}
+
+
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
@@ -496,12 +541,7 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
return;
}
- if (FLAG_trace_track_allocation_sites && object->IsJSObject()) {
- if (AllocationMemento::FindForJSObject(JSObject::cast(object), true) !=
- NULL) {
- object->GetIsolate()->heap()->allocation_mementos_found_++;
- }
- }
+ UpdateAllocationSiteFeedback(object);
// AllocationMementos are unrooted and shouldn't survive a scavenge
ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
@@ -510,10 +550,12 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
}
-bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
+bool Heap::CollectGarbage(AllocationSpace space,
+ const char* gc_reason,
+ const v8::GCCallbackFlags callbackFlags) {
const char* collector_reason = NULL;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
- return CollectGarbage(space, collector, gc_reason, collector_reason);
+ return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
}
@@ -536,10 +578,10 @@ MaybeObject* Heap::PrepareForCompare(String* str) {
}
-intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes) {
+int64_t Heap::AdjustAmountOfExternalAllocatedMemory(
+ int64_t change_in_bytes) {
ASSERT(HasBeenSetUp());
- intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
+ int64_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
if (change_in_bytes > 0) {
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
@@ -549,7 +591,7 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
amount_of_external_allocated_memory_ = 0;
amount_of_external_allocated_memory_at_last_global_gc_ = 0;
}
- intptr_t amount_since_last_global_gc = PromotedExternalMemorySize();
+ int64_t amount_since_last_global_gc = PromotedExternalMemorySize();
if (amount_since_last_global_gc > external_allocation_limit_) {
CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
}
@@ -568,9 +610,9 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
PrintF("Adjust amount of external memory: delta=%6" V8_PTR_PREFIX "d KB, "
"amount=%6" V8_PTR_PREFIX "d KB, since_gc=%6" V8_PTR_PREFIX "d KB, "
"isolate=0x%08" V8PRIxPTR ".\n",
- change_in_bytes / KB,
- amount_of_external_allocated_memory_ / KB,
- PromotedExternalMemorySize() / KB,
+ static_cast<intptr_t>(change_in_bytes / KB),
+ static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB),
+ static_cast<intptr_t>(PromotedExternalMemorySize() / KB),
reinterpret_cast<intptr_t>(isolate()));
}
ASSERT(amount_of_external_allocated_memory_ >= 0);
@@ -629,7 +671,7 @@ Isolate* Heap::isolate() {
} \
if (__maybe_object__->IsRetryAfterGC()) { \
/* TODO(1181417): Fix this. */ \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
+ v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true);\
} \
RETURN_EMPTY; \
} while (false)
@@ -641,7 +683,7 @@ Isolate* Heap::isolate() {
FUNCTION_CALL, \
RETURN_VALUE, \
RETURN_EMPTY, \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY", true))
+ v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY", true))
#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
CALL_AND_RETRY_OR_DIE(ISOLATE, \
@@ -735,69 +777,6 @@ void Heap::CompletelyClearInstanceofCache() {
}
-MaybeObject* TranscendentalCache::Get(Type type, double input) {
- SubCache* cache = caches_[type];
- if (cache == NULL) {
- caches_[type] = cache = new SubCache(isolate_, type);
- }
- return cache->Get(input);
-}
-
-
-Address TranscendentalCache::cache_array_address() {
- return reinterpret_cast<Address>(caches_);
-}
-
-
-double TranscendentalCache::SubCache::Calculate(double input) {
- switch (type_) {
- case ACOS:
- return acos(input);
- case ASIN:
- return asin(input);
- case ATAN:
- return atan(input);
- case COS:
- return fast_cos(input);
- case EXP:
- return exp(input);
- case LOG:
- return fast_log(input);
- case SIN:
- return fast_sin(input);
- case TAN:
- return fast_tan(input);
- default:
- return 0.0; // Never happens.
- }
-}
-
-
-MaybeObject* TranscendentalCache::SubCache::Get(double input) {
- Converter c;
- c.dbl = input;
- int hash = Hash(c);
- Element e = elements_[hash];
- if (e.in[0] == c.integers[0] &&
- e.in[1] == c.integers[1]) {
- ASSERT(e.output != NULL);
- isolate_->counters()->transcendental_cache_hit()->Increment();
- return e.output;
- }
- double answer = Calculate(input);
- isolate_->counters()->transcendental_cache_miss()->Increment();
- Object* heap_number;
- { MaybeObject* maybe_heap_number =
- isolate_->heap()->AllocateHeapNumber(answer);
- if (!maybe_heap_number->ToObject(&heap_number)) return maybe_heap_number;
- }
- elements_[hash].in[0] = c.integers[0];
- elements_[hash].in[1] = c.integers[1];
- elements_[hash].output = heap_number;
- return heap_number;
-}
-
-
AlwaysAllocateScope::AlwaysAllocateScope() {
// We shouldn't hit any nested scopes, because that requires
// non-handle code to call handle code. The code still works but
@@ -841,6 +820,13 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
}
+void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ CHECK((*current)->IsSmi());
+ }
+}
+
+
double GCTracer::SizeOfHeapObjects() {
return (static_cast<double>(heap_->SizeOfObjects())) / MB;
}
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 6b159a98a3..7413b6e688 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -27,29 +27,42 @@
#include "v8.h"
-#include "deoptimizer.h"
#include "heap-profiler.h"
+
+#include "allocation-tracker.h"
#include "heap-snapshot-generator-inl.h"
namespace v8 {
namespace internal {
HeapProfiler::HeapProfiler(Heap* heap)
- : snapshots_(new HeapSnapshotsCollection(heap)),
+ : ids_(new HeapObjectsMap(heap)),
+ names_(new StringsStorage(heap)),
next_snapshot_uid_(1),
- is_tracking_allocations_(false) {
+ is_tracking_object_moves_(false) {
+}
+
+
+static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
+ delete *snapshot_ptr;
}
HeapProfiler::~HeapProfiler() {
- delete snapshots_;
+ snapshots_.Iterate(DeleteHeapSnapshot);
+ snapshots_.Clear();
}
void HeapProfiler::DeleteAllSnapshots() {
- Heap* the_heap = heap();
- delete snapshots_;
- snapshots_ = new HeapSnapshotsCollection(the_heap);
+ snapshots_.Iterate(DeleteHeapSnapshot);
+ snapshots_.Clear();
+ names_.Reset(new StringsStorage(heap()));
+}
+
+
+void HeapProfiler::RemoveSnapshot(HeapSnapshot* snapshot) {
+ snapshots_.RemoveElement(snapshot);
}
@@ -76,15 +89,18 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
const char* name,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
- HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
+ HeapSnapshot* result = new HeapSnapshot(this, name, next_snapshot_uid_++);
{
HeapSnapshotGenerator generator(result, control, resolver, heap());
if (!generator.GenerateSnapshot()) {
delete result;
result = NULL;
+ } else {
+ snapshots_.Add(result);
}
}
- snapshots_->SnapshotGenerationFinished(result);
+ ids_->RemoveDeadEntries();
+ is_tracking_object_moves_ = true;
return result;
}
@@ -93,59 +109,79 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
String* name,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
- return TakeSnapshot(snapshots_->names()->GetName(name), control, resolver);
+ return TakeSnapshot(names_->GetName(name), control, resolver);
}
-void HeapProfiler::StartHeapObjectsTracking() {
- snapshots_->StartHeapObjectsTracking();
+void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
+ ids_->UpdateHeapObjectsMap();
+ is_tracking_object_moves_ = true;
+ ASSERT(!is_tracking_allocations());
+ if (track_allocations) {
+ allocation_tracker_.Reset(new AllocationTracker(ids_.get(), names_.get()));
+ heap()->DisableInlineAllocation();
+ }
}
SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
- return snapshots_->PushHeapObjectsStats(stream);
+ return ids_->PushHeapObjectsStats(stream);
}
void HeapProfiler::StopHeapObjectsTracking() {
- snapshots_->StopHeapObjectsTracking();
+ ids_->StopHeapObjectsTracking();
+ if (is_tracking_allocations()) {
+ allocation_tracker_.Reset(NULL);
+ heap()->EnableInlineAllocation();
+ }
}
size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
- return snapshots_->GetUsedMemorySize();
+ size_t size = sizeof(*this);
+ size += names_->GetUsedMemorySize();
+ size += ids_->GetUsedMemorySize();
+ size += GetMemoryUsedByList(snapshots_);
+ for (int i = 0; i < snapshots_.length(); ++i) {
+ size += snapshots_[i]->RawSnapshotSize();
+ }
+ return size;
}
int HeapProfiler::GetSnapshotsCount() {
- return snapshots_->snapshots()->length();
+ return snapshots_.length();
}
HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- return snapshots_->snapshots()->at(index);
+ return snapshots_.at(index);
}
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
if (!obj->IsHeapObject())
return v8::HeapProfiler::kUnknownObjectId;
- return snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
+ return ids_->FindEntry(HeapObject::cast(*obj)->address());
}
void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
- snapshots_->ObjectMoveEvent(from, to, size);
+ ids_->MoveObject(from, to, size);
}
-void HeapProfiler::NewObjectEvent(Address addr, int size) {
- snapshots_->NewObjectEvent(addr, size);
+void HeapProfiler::AllocationEvent(Address addr, int size) {
+ DisallowHeapAllocation no_allocation;
+ if (!allocation_tracker_.is_empty()) {
+ allocation_tracker_->AllocationEvent(addr, size);
+ }
}
void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
- snapshots_->UpdateObjectSizeEvent(addr, size);
+ ids_->UpdateObjectSize(addr, size);
}
@@ -156,63 +192,29 @@ void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
}
-void HeapProfiler::StartHeapAllocationsRecording() {
- StartHeapObjectsTracking();
- is_tracking_allocations_ = true;
- DropCompiledCode();
- snapshots_->UpdateHeapObjectsMap();
-}
-
-
-void HeapProfiler::StopHeapAllocationsRecording() {
- StopHeapObjectsTracking();
- is_tracking_allocations_ = false;
- DropCompiledCode();
-}
-
-
-void HeapProfiler::RecordObjectAllocationFromMasm(Isolate* isolate,
- Address obj,
- int size) {
- isolate->heap_profiler()->NewObjectEvent(obj, size);
-}
-
-
-void HeapProfiler::DropCompiledCode() {
- Isolate* isolate = heap()->isolate();
- HandleScope scope(isolate);
-
- if (FLAG_concurrent_recompilation) {
- isolate->optimizing_compiler_thread()->Flush();
- }
-
- Deoptimizer::DeoptimizeAll(isolate);
-
- Handle<Code> lazy_compile =
- Handle<Code>(isolate->builtins()->builtin(Builtins::kLazyCompile));
-
+Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) {
heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "switch allocations tracking");
-
+ "HeapProfiler::FindHeapObjectById");
DisallowHeapAllocation no_allocation;
-
- HeapIterator iterator(heap());
- HeapObject* obj = NULL;
- while (((obj = iterator.next()) != NULL)) {
- if (obj->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(obj);
- SharedFunctionInfo* shared = function->shared();
-
- if (!shared->allows_lazy_compilation()) continue;
- if (!shared->script()->IsScript()) continue;
-
- Code::Kind kind = function->code()->kind();
- if (kind == Code::FUNCTION || kind == Code::BUILTIN) {
- function->set_code(*lazy_compile);
- shared->set_code(*lazy_compile);
- }
+ HeapObject* object = NULL;
+ HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
+ // Make sure that object with the given id is still reachable.
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next()) {
+ if (ids_->FindEntry(obj->address()) == id) {
+ ASSERT(object == NULL);
+ object = obj;
+ // Can't break -- kFilterUnreachable requires full heap traversal.
}
}
+ return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
+}
+
+
+void HeapProfiler::ClearHeapObjectMap() {
+ ids_.Reset(new HeapObjectsMap(heap()));
+ if (!is_tracking_allocations()) is_tracking_object_moves_ = false;
}
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h
index 74002278d4..e4838df136 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/heap-profiler.h
@@ -30,12 +30,12 @@
#include "heap-snapshot-generator-inl.h"
#include "isolate.h"
+#include "smart-pointers.h"
namespace v8 {
namespace internal {
class HeapSnapshot;
-class HeapSnapshotsCollection;
class HeapProfiler {
public:
@@ -53,22 +53,24 @@ class HeapProfiler {
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
- void StartHeapObjectsTracking();
+ void StartHeapObjectsTracking(bool track_allocations);
void StopHeapObjectsTracking();
-
- static void RecordObjectAllocationFromMasm(Isolate* isolate,
- Address obj,
- int size);
+ AllocationTracker* allocation_tracker() const {
+ return allocation_tracker_.get();
+ }
+ HeapObjectsMap* heap_object_map() const { return ids_.get(); }
+ StringsStorage* names() const { return names_.get(); }
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
int GetSnapshotsCount();
HeapSnapshot* GetSnapshot(int index);
SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
void DeleteAllSnapshots();
+ void RemoveSnapshot(HeapSnapshot* snapshot);
void ObjectMoveEvent(Address from, Address to, int size);
- void NewObjectEvent(Address addr, int size);
+ void AllocationEvent(Address addr, int size);
void UpdateObjectSizeEvent(Address addr, int size);
@@ -77,32 +79,27 @@ class HeapProfiler {
v8::RetainedObjectInfo* ExecuteWrapperClassCallback(uint16_t class_id,
Object** wrapper);
- INLINE(bool is_profiling()) {
- return snapshots_->is_tracking_objects();
- }
-
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
- bool is_tracking_allocations() {
- return is_tracking_allocations_;
- }
-
- void StartHeapAllocationsRecording();
- void StopHeapAllocationsRecording();
-
- int FindUntrackedObjects() {
- return snapshots_->FindUntrackedObjects();
+ bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
+ bool is_tracking_allocations() const {
+ return !allocation_tracker_.is_empty();
}
- void DropCompiledCode();
+ Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
+ void ClearHeapObjectMap();
private:
- Heap* heap() const { return snapshots_->heap(); }
+ Heap* heap() const { return ids_->heap(); }
- HeapSnapshotsCollection* snapshots_;
+ // Mapping from HeapObject addresses to objects' uids.
+ SmartPointer<HeapObjectsMap> ids_;
+ List<HeapSnapshot*> snapshots_;
+ SmartPointer<StringsStorage> names_;
unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
- bool is_tracking_allocations_;
+ SmartPointer<AllocationTracker> allocation_tracker_;
+ bool is_tracking_object_moves_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/heap-snapshot-generator-inl.h b/deps/v8/src/heap-snapshot-generator-inl.h
index 1a878c6df1..582de32c2b 100644
--- a/deps/v8/src/heap-snapshot-generator-inl.h
+++ b/deps/v8/src/heap-snapshot-generator-inl.h
@@ -59,7 +59,10 @@ int HeapEntry::set_children_index(int index) {
HeapGraphEdge** HeapEntry::children_arr() {
ASSERT(children_index_ >= 0);
- return &snapshot_->children()[children_index_];
+ SLOW_ASSERT(children_index_ < snapshot_->children().length() ||
+ (children_index_ == snapshot_->children().length() &&
+ children_count_ == 0));
+ return &snapshot_->children().first() + children_index_;
}
@@ -85,4 +88,3 @@ int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
} } // namespace v8::internal
#endif // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
-
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index 10d113c3d1..b67aa0f376 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -30,9 +30,11 @@
#include "heap-snapshot-generator-inl.h"
#include "allocation-tracker.h"
+#include "code-stubs.h"
#include "heap-profiler.h"
#include "debug.h"
#include "types.h"
+#include "v8conversions.h"
namespace v8 {
namespace internal {
@@ -46,7 +48,8 @@ HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
ASSERT(type == kContextVariable
|| type == kProperty
|| type == kInternal
- || type == kShortcut);
+ || type == kShortcut
+ || type == kWeak);
}
@@ -55,7 +58,7 @@ HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to)
from_index_(from),
to_index_(to),
index_(index) {
- ASSERT(type == kElement || type == kHidden || type == kWeak);
+ ASSERT(type == kElement || type == kHidden);
}
@@ -70,7 +73,7 @@ HeapEntry::HeapEntry(HeapSnapshot* snapshot,
Type type,
const char* name,
SnapshotObjectId id,
- int self_size)
+ size_t self_size)
: type_(type),
children_count_(0),
children_index_(-1),
@@ -98,15 +101,10 @@ void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
}
-Handle<HeapObject> HeapEntry::GetHeapObject() {
- return snapshot_->collection()->FindHeapObjectById(id());
-}
-
-
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
- OS::Print("%6d @%6u %*c %s%s: ",
+ OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ",
self_size(), id(), indent, ' ', prefix, edge_name);
if (type() != kString) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
@@ -154,7 +152,7 @@ void HeapEntry::Print(
break;
case HeapGraphEdge::kWeak:
edge_prefix = "w";
- OS::SNPrintF(index, "%d", edge.index());
+ edge_name = edge.name();
break;
default:
OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
@@ -196,15 +194,16 @@ template <> struct SnapshotSizeConstants<4> {
template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24;
- static const int kExpectedHeapEntrySize = 32;
+ static const int kExpectedHeapEntrySize = 40;
};
} // namespace
-HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
+
+HeapSnapshot::HeapSnapshot(HeapProfiler* profiler,
const char* title,
unsigned uid)
- : collection_(collection),
+ : profiler_(profiler),
title_(title),
uid_(uid),
root_index_(HeapEntry::kNoEntry),
@@ -217,6 +216,10 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
STATIC_CHECK(
sizeof(HeapEntry) ==
SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
+ USE(SnapshotSizeConstants<4>::kExpectedHeapGraphEdgeSize);
+ USE(SnapshotSizeConstants<4>::kExpectedHeapEntrySize);
+ USE(SnapshotSizeConstants<8>::kExpectedHeapGraphEdgeSize);
+ USE(SnapshotSizeConstants<8>::kExpectedHeapEntrySize);
for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
}
@@ -224,13 +227,13 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
void HeapSnapshot::Delete() {
- collection_->RemoveSnapshot(this);
+ profiler_->RemoveSnapshot(this);
delete this;
}
void HeapSnapshot::RememberLastJSObjectId() {
- max_snapshot_js_object_id_ = collection_->last_assigned_id();
+ max_snapshot_js_object_id_ = profiler_->heap_object_map()->last_assigned_id();
}
@@ -274,7 +277,7 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
- int size) {
+ size_t size) {
HeapEntry entry(this, type, name, id, size);
entries_.Add(entry);
return &entries_.last();
@@ -345,12 +348,6 @@ void HeapSnapshot::Print(int max_depth) {
}
-template<typename T, class P>
-static size_t GetMemoryUsedByList(const List<T, P>& list) {
- return list.length() * sizeof(T) + sizeof(list);
-}
-
-
size_t HeapSnapshot::RawSnapshotSize() const {
return
sizeof(*this) +
@@ -393,11 +390,6 @@ HeapObjectsMap::HeapObjectsMap(Heap* heap)
}
-void HeapObjectsMap::SnapshotGenerationFinished() {
- RemoveDeadEntries();
-}
-
-
void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
ASSERT(to != NULL);
ASSERT(from != NULL);
@@ -445,18 +437,6 @@ void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
}
-void HeapObjectsMap::NewObject(Address addr, int size) {
- if (FLAG_heap_profiler_trace_objects) {
- PrintF("New object : %p %6d. Next address is %p\n",
- addr,
- size,
- addr + size);
- }
- ASSERT(addr != NULL);
- FindOrAddEntry(addr, size, false);
-}
-
-
void HeapObjectsMap::UpdateObjectSize(Address addr, int size) {
FindOrAddEntry(addr, size, false);
}
@@ -513,7 +493,7 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
entries_map_.occupancy());
}
heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "HeapSnapshotsCollection::UpdateHeapObjectsMap");
+ "HeapObjectsMap::UpdateHeapObjectsMap");
HeapIterator iterator(heap_);
for (HeapObject* obj = iterator.next();
obj != NULL;
@@ -721,13 +701,12 @@ void HeapObjectsMap::RemoveDeadEntries() {
}
-SnapshotObjectId HeapObjectsMap::GenerateId(Heap* heap,
- v8::RetainedObjectInfo* info) {
+SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
const char* label = info->GetLabel();
id ^= StringHasher::HashSequentialString(label,
static_cast<int>(strlen(label)),
- heap->HashSeed());
+ heap_->HashSeed());
intptr_t element_count = info->GetElementCount();
if (element_count != -1)
id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
@@ -745,107 +724,6 @@ size_t HeapObjectsMap::GetUsedMemorySize() const {
}
-HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
- : is_tracking_objects_(false),
- names_(heap),
- ids_(heap),
- allocation_tracker_(NULL) {
-}
-
-
-static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
- delete *snapshot_ptr;
-}
-
-
-HeapSnapshotsCollection::~HeapSnapshotsCollection() {
- delete allocation_tracker_;
- snapshots_.Iterate(DeleteHeapSnapshot);
-}
-
-
-void HeapSnapshotsCollection::StartHeapObjectsTracking() {
- ids_.UpdateHeapObjectsMap();
- if (allocation_tracker_ == NULL) {
- allocation_tracker_ = new AllocationTracker(&ids_, names());
- }
- is_tracking_objects_ = true;
-}
-
-
-void HeapSnapshotsCollection::StopHeapObjectsTracking() {
- ids_.StopHeapObjectsTracking();
- if (allocation_tracker_ != NULL) {
- delete allocation_tracker_;
- allocation_tracker_ = NULL;
- }
-}
-
-
-HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
- unsigned uid) {
- is_tracking_objects_ = true; // Start watching for heap objects moves.
- return new HeapSnapshot(this, name, uid);
-}
-
-
-void HeapSnapshotsCollection::SnapshotGenerationFinished(
- HeapSnapshot* snapshot) {
- ids_.SnapshotGenerationFinished();
- if (snapshot != NULL) {
- snapshots_.Add(snapshot);
- }
-}
-
-
-void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
- snapshots_.RemoveElement(snapshot);
-}
-
-
-Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
- SnapshotObjectId id) {
- // First perform a full GC in order to avoid dead objects.
- heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "HeapSnapshotsCollection::FindHeapObjectById");
- DisallowHeapAllocation no_allocation;
- HeapObject* object = NULL;
- HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
- // Make sure that object with the given id is still reachable.
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next()) {
- if (ids_.FindEntry(obj->address()) == id) {
- ASSERT(object == NULL);
- object = obj;
- // Can't break -- kFilterUnreachable requires full heap traversal.
- }
- }
- return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
-}
-
-
-void HeapSnapshotsCollection::NewObjectEvent(Address addr, int size) {
- DisallowHeapAllocation no_allocation;
- ids_.NewObject(addr, size);
- if (allocation_tracker_ != NULL) {
- allocation_tracker_->NewObjectEvent(addr, size);
- }
-}
-
-
-size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
- size_t size = sizeof(*this);
- size += names_.GetUsedMemorySize();
- size += ids_.GetUsedMemorySize();
- size += GetMemoryUsedByList(snapshots_);
- for (int i = 0; i < snapshots_.length(); ++i) {
- size += snapshots_[i]->RawSnapshotSize();
- }
- return size;
-}
-
-
HeapEntriesMap::HeapEntriesMap()
: entries_(HeapThingsMatch) {
}
@@ -926,9 +804,10 @@ V8HeapExplorer::V8HeapExplorer(
HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress,
v8::HeapProfiler::ObjectNameResolver* resolver)
- : heap_(snapshot->collection()->heap()),
+ : heap_(snapshot->profiler()->heap_object_map()->heap()),
snapshot_(snapshot),
- collection_(snapshot_->collection()),
+ names_(snapshot_->profiler()->names()),
+ heap_object_map_(snapshot_->profiler()->heap_object_map()),
progress_(progress),
filler_(NULL),
global_object_name_resolver_(resolver) {
@@ -958,20 +837,20 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
const char* name = shared->bound() ? "native_bind" :
- collection_->names()->GetName(String::cast(shared->name()));
+ names_->GetName(String::cast(shared->name()));
return AddEntry(object, HeapEntry::kClosure, name);
} else if (object->IsJSRegExp()) {
JSRegExp* re = JSRegExp::cast(object);
return AddEntry(object,
HeapEntry::kRegExp,
- collection_->names()->GetName(re->Pattern()));
+ names_->GetName(re->Pattern()));
} else if (object->IsJSObject()) {
- const char* name = collection_->names()->GetName(
+ const char* name = names_->GetName(
GetConstructorName(JSObject::cast(object)));
if (object->IsJSGlobalObject()) {
const char* tag = objects_tags_.GetTag(object);
if (tag != NULL) {
- name = collection_->names()->GetFormatted("%s / %s", name, tag);
+ name = names_->GetFormatted("%s / %s", name, tag);
}
}
return AddEntry(object, HeapEntry::kObject, name);
@@ -987,20 +866,20 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
"(sliced string)");
return AddEntry(object,
HeapEntry::kString,
- collection_->names()->GetName(String::cast(object)));
+ names_->GetName(String::cast(object)));
} else if (object->IsCode()) {
return AddEntry(object, HeapEntry::kCode, "");
} else if (object->IsSharedFunctionInfo()) {
String* name = String::cast(SharedFunctionInfo::cast(object)->name());
return AddEntry(object,
HeapEntry::kCode,
- collection_->names()->GetName(name));
+ names_->GetName(name));
} else if (object->IsScript()) {
Object* name = Script::cast(object)->name();
return AddEntry(object,
HeapEntry::kCode,
name->IsString()
- ? collection_->names()->GetName(String::cast(name))
+ ? names_->GetName(String::cast(name))
: "");
} else if (object->IsNativeContext()) {
return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
@@ -1021,10 +900,17 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name) {
- int object_size = object->Size();
- SnapshotObjectId object_id =
- collection_->GetObjectId(object->address(), object_size);
- return snapshot_->AddEntry(type, name, object_id, object_size);
+ return AddEntry(object->address(), type, name, object->Size());
+}
+
+
+HeapEntry* V8HeapExplorer::AddEntry(Address address,
+ HeapEntry::Type type,
+ const char* name,
+ size_t size) {
+ SnapshotObjectId object_id = heap_object_map_->FindOrAddEntry(
+ address, static_cast<unsigned int>(size));
+ return snapshot_->AddEntry(type, name, object_id, size);
}
@@ -1110,12 +996,13 @@ class IndexedReferencesExtractor : public ObjectVisitor {
void VisitCodeEntry(Address entry_address) {
Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
generator_->SetInternalReference(parent_obj_, parent_, "code", code);
- generator_->TagObject(code, "(code)");
+ generator_->TagCodeObject(code);
}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
+ ++next_index_;
if (CheckVisitedAndUnmark(p)) continue;
- generator_->SetHiddenReference(parent_obj_, parent_, next_index_++, *p);
+ generator_->SetHiddenReference(parent_obj_, parent_, next_index_, *p);
}
}
static void MarkVisitedField(HeapObject* obj, int offset) {
@@ -1150,6 +1037,8 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
if (obj->IsJSGlobalProxy()) {
ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
+ } else if (obj->IsJSArrayBuffer()) {
+ ExtractJSArrayBufferReferences(entry, JSArrayBuffer::cast(obj));
} else if (obj->IsJSObject()) {
ExtractJSObjectReferences(entry, JSObject::cast(obj));
} else if (obj->IsString()) {
@@ -1168,6 +1057,8 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
} else if (obj->IsCode()) {
ExtractCodeReferences(entry, Code::cast(obj));
+ } else if (obj->IsBox()) {
+ ExtractBoxReferences(entry, Box::cast(obj));
} else if (obj->IsCell()) {
ExtractCellReferences(entry, Cell::cast(obj));
} else if (obj->IsPropertyCell()) {
@@ -1237,11 +1128,13 @@ void V8HeapExplorer::ExtractJSObjectReferences(
SetInternalReference(js_fun, entry,
"context", js_fun->context(),
JSFunction::kContextOffset);
- for (int i = JSFunction::kNonWeakFieldsEndOffset;
- i < JSFunction::kSize;
- i += kPointerSize) {
- SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
- }
+ SetWeakReference(js_fun, entry,
+ "next_function_link", js_fun->next_function_link(),
+ JSFunction::kNextFunctionLinkOffset);
+ STATIC_CHECK(JSFunction::kNextFunctionLinkOffset
+ == JSFunction::kNonWeakFieldsEndOffset);
+ STATIC_CHECK(JSFunction::kNextFunctionLinkOffset + kPointerSize
+ == JSFunction::kSize);
} else if (obj->IsGlobalObject()) {
GlobalObject* global_obj = GlobalObject::cast(obj);
SetInternalReference(global_obj, entry,
@@ -1251,8 +1144,19 @@ void V8HeapExplorer::ExtractJSObjectReferences(
"native_context", global_obj->native_context(),
GlobalObject::kNativeContextOffset);
SetInternalReference(global_obj, entry,
+ "global_context", global_obj->global_context(),
+ GlobalObject::kGlobalContextOffset);
+ SetInternalReference(global_obj, entry,
"global_receiver", global_obj->global_receiver(),
GlobalObject::kGlobalReceiverOffset);
+ STATIC_CHECK(GlobalObject::kHeaderSize - JSObject::kHeaderSize ==
+ 4 * kPointerSize);
+ } else if (obj->IsJSArrayBufferView()) {
+ JSArrayBufferView* view = JSArrayBufferView::cast(obj);
+ SetInternalReference(view, entry, "buffer", view->buffer(),
+ JSArrayBufferView::kBufferOffset);
+ SetWeakReference(view, entry, "weak_next", view->weak_next(),
+ JSArrayBufferView::kWeakNextOffset);
}
TagObject(js_obj->properties(), "(object properties)");
SetInternalReference(obj, entry,
@@ -1303,8 +1207,13 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
}
#define EXTRACT_CONTEXT_FIELD(index, type, name) \
- SetInternalReference(context, entry, #name, context->get(Context::index), \
- FixedArray::OffsetOfElementAt(Context::index));
+ if (Context::index < Context::FIRST_WEAK_SLOT) { \
+ SetInternalReference(context, entry, #name, context->get(Context::index), \
+ FixedArray::OffsetOfElementAt(Context::index)); \
+ } else { \
+ SetWeakReference(context, entry, #name, context->get(Context::index), \
+ FixedArray::OffsetOfElementAt(Context::index)); \
+ }
EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
@@ -1316,13 +1225,16 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
TagObject(context->runtime_context(), "(runtime context)");
TagObject(context->embedder_data(), "(context data)");
NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
+ EXTRACT_CONTEXT_FIELD(OPTIMIZED_FUNCTIONS_LIST, unused,
+ optimized_functions_list);
+ EXTRACT_CONTEXT_FIELD(OPTIMIZED_CODE_LIST, unused, optimized_code_list);
+ EXTRACT_CONTEXT_FIELD(DEOPTIMIZED_CODE_LIST, unused, deoptimized_code_list);
+ EXTRACT_CONTEXT_FIELD(NEXT_CONTEXT_LINK, unused, next_context_link);
#undef EXTRACT_CONTEXT_FIELD
- for (int i = Context::FIRST_WEAK_SLOT;
- i < Context::NATIVE_CONTEXT_SLOTS;
- ++i) {
- SetWeakReference(context, entry, i, context->get(i),
- FixedArray::OffsetOfElementAt(i));
- }
+ STATIC_CHECK(Context::OPTIMIZED_FUNCTIONS_LIST == Context::FIRST_WEAK_SLOT);
+ STATIC_CHECK(Context::NEXT_CONTEXT_LINK + 1
+ == Context::NATIVE_CONTEXT_SLOTS);
+ STATIC_CHECK(Context::FIRST_WEAK_SLOT + 5 == Context::NATIVE_CONTEXT_SLOTS);
}
}
@@ -1370,10 +1282,19 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
int entry, SharedFunctionInfo* shared) {
HeapObject* obj = shared;
+ String* shared_name = shared->DebugName();
+ const char* name = NULL;
+ if (shared_name != *heap_->isolate()->factory()->empty_string()) {
+ name = names_->GetName(shared_name);
+ TagObject(shared->code(), names_->GetFormatted("(code for %s)", name));
+ } else {
+ TagObject(shared->code(), names_->GetFormatted("(%s code)",
+ Code::Kind2String(shared->code()->kind())));
+ }
+
SetInternalReference(obj, entry,
"name", shared->name(),
SharedFunctionInfo::kNameOffset);
- TagObject(shared->code(), "(code)");
SetInternalReference(obj, entry,
"code", shared->code(),
SharedFunctionInfo::kCodeOffset);
@@ -1387,7 +1308,10 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry,
"script", shared->script(),
SharedFunctionInfo::kScriptOffset);
- TagObject(shared->construct_stub(), "(code)");
+ const char* construct_stub_name = name ?
+ names_->GetFormatted("(construct stub code for %s)", name) :
+ "(construct stub code)";
+ TagObject(shared->construct_stub(), construct_stub_name);
SetInternalReference(obj, entry,
"construct_stub", shared->construct_stub(),
SharedFunctionInfo::kConstructStubOffset);
@@ -1400,8 +1324,11 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry,
"inferred_name", shared->inferred_name(),
SharedFunctionInfo::kInferredNameOffset);
+ SetInternalReference(obj, entry,
+ "optimized_code_map", shared->optimized_code_map(),
+ SharedFunctionInfo::kOptimizedCodeMapOffset);
SetWeakReference(obj, entry,
- 1, shared->initial_map(),
+ "initial_map", shared->initial_map(),
SharedFunctionInfo::kInitialMapOffset);
}
@@ -1449,7 +1376,22 @@ void V8HeapExplorer::ExtractCodeCacheReferences(
}
+void V8HeapExplorer::TagBuiltinCodeObject(Code* code, const char* name) {
+ TagObject(code, names_->GetFormatted("(%s builtin)", name));
+}
+
+
+void V8HeapExplorer::TagCodeObject(Code* code) {
+ if (code->kind() == Code::STUB) {
+ TagObject(code, names_->GetFormatted(
+ "(%s code)", CodeStub::MajorName(
+ static_cast<CodeStub::Major>(code->major_key()), true)));
+ }
+}
+
+
void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
+ TagCodeObject(code);
TagObject(code->relocation_info(), "(code relocation info)");
SetInternalReference(code, entry,
"relocation_info", code->relocation_info(),
@@ -1469,6 +1411,19 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
SetInternalReference(code, entry,
"gc_metadata", code->gc_metadata(),
Code::kGCMetadataOffset);
+ SetInternalReference(code, entry,
+ "constant_pool", code->constant_pool(),
+ Code::kConstantPoolOffset);
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ SetWeakReference(code, entry,
+ "next_code_link", code->next_code_link(),
+ Code::kNextCodeLinkOffset);
+ }
+}
+
+
+void V8HeapExplorer::ExtractBoxReferences(int entry, Box* box) {
+ SetInternalReference(box, entry, "value", box->value(), Box::kValueOffset);
}
@@ -1495,6 +1450,46 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
AllocationSite::kNestedSiteOffset);
SetInternalReference(site, entry, "dependent_code", site->dependent_code(),
AllocationSite::kDependentCodeOffset);
+ // Do not visit weak_next as it is not visited by the StaticVisitor,
+ // and we're not very interested in weak_next field here.
+ STATIC_CHECK(AllocationSite::kWeakNextOffset >=
+ AllocationSite::BodyDescriptor::kEndOffset);
+}
+
+
+class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
+ public:
+ JSArrayBufferDataEntryAllocator(size_t size, V8HeapExplorer* explorer)
+ : size_(size)
+ , explorer_(explorer) {
+ }
+ virtual HeapEntry* AllocateEntry(HeapThing ptr) {
+ return explorer_->AddEntry(
+ static_cast<Address>(ptr),
+ HeapEntry::kNative, "system / JSArrayBufferData", size_);
+ }
+ private:
+ size_t size_;
+ V8HeapExplorer* explorer_;
+};
+
+
+void V8HeapExplorer::ExtractJSArrayBufferReferences(
+ int entry, JSArrayBuffer* buffer) {
+ SetWeakReference(buffer, entry, "weak_next", buffer->weak_next(),
+ JSArrayBuffer::kWeakNextOffset);
+ SetWeakReference(buffer, entry,
+ "weak_first_view", buffer->weak_first_view(),
+ JSArrayBuffer::kWeakFirstViewOffset);
+ // Setup a reference to a native memory backing_store object.
+ if (!buffer->backing_store())
+ return;
+ size_t data_size = NumberToSize(heap_->isolate(), buffer->byte_length());
+ JSArrayBufferDataEntryAllocator allocator(data_size, this);
+ HeapEntry* data_entry =
+ filler_->FindOrAddEntry(buffer->backing_store(), &allocator);
+ filler_->SetNamedReference(HeapGraphEdge::kInternal,
+ entry, "backing_store", data_entry);
}
@@ -1510,7 +1505,7 @@ void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
bindings->get(JSFunction::kBoundFunctionIndex));
for (int i = JSFunction::kBoundArgumentsStartIndex;
i < bindings->length(); i++) {
- const char* reference_name = collection_->names()->GetFormatted(
+ const char* reference_name = names_->GetFormatted(
"bound_argument_%d",
i - JSFunction::kBoundArgumentsStartIndex);
SetNativeBindReference(js_obj, entry, reference_name,
@@ -1695,9 +1690,10 @@ class RootsReferencesExtractor : public ObjectVisitor {
};
public:
- RootsReferencesExtractor()
+ explicit RootsReferencesExtractor(Heap* heap)
: collecting_all_references_(false),
- previous_reference_count_(0) {
+ previous_reference_count_(0),
+ heap_(heap) {
}
void VisitPointers(Object** start, Object** end) {
@@ -1712,22 +1708,26 @@ class RootsReferencesExtractor : public ObjectVisitor {
void FillReferences(V8HeapExplorer* explorer) {
ASSERT(strong_references_.length() <= all_references_.length());
+ Builtins* builtins = heap_->isolate()->builtins();
for (int i = 0; i < reference_tags_.length(); ++i) {
explorer->SetGcRootsReference(reference_tags_[i].tag);
}
- int strong_index = 0, all_index = 0, tags_index = 0;
+ int strong_index = 0, all_index = 0, tags_index = 0, builtin_index = 0;
while (all_index < all_references_.length()) {
- if (strong_index < strong_references_.length() &&
- strong_references_[strong_index] == all_references_[all_index]) {
- explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
- false,
- all_references_[all_index++]);
- ++strong_index;
- } else {
- explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
- true,
- all_references_[all_index++]);
+ bool is_strong = strong_index < strong_references_.length()
+ && strong_references_[strong_index] == all_references_[all_index];
+ explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+ !is_strong,
+ all_references_[all_index]);
+ if (reference_tags_[tags_index].tag ==
+ VisitorSynchronization::kBuiltins) {
+ ASSERT(all_references_[all_index]->IsCode());
+ explorer->TagBuiltinCodeObject(
+ Code::cast(all_references_[all_index]),
+ builtins->name(builtin_index++));
}
+ ++all_index;
+ if (is_strong) ++strong_index;
if (reference_tags_[tags_index].index == all_index) ++tags_index;
}
}
@@ -1746,16 +1746,27 @@ class RootsReferencesExtractor : public ObjectVisitor {
List<Object*> all_references_;
int previous_reference_count_;
List<IndexTag> reference_tags_;
+ Heap* heap_;
};
bool V8HeapExplorer::IterateAndExtractReferences(
SnapshotFillerInterface* filler) {
- HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
-
filler_ = filler;
- bool interrupted = false;
+ // Make sure builtin code objects get their builtin tags
+ // first. Otherwise a particular JSFunction object could set
+ // its custom name to a generic builtin.
+ SetRootGcRootsReference();
+ RootsReferencesExtractor extractor(heap_);
+ heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
+ extractor.SetCollectingAllReferences();
+ heap_->IterateRoots(&extractor, VISIT_ALL);
+ extractor.FillReferences(this);
+
+ // Now iterate the whole heap.
+ bool interrupted = false;
+ HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
// Heap iteration with filtering must be finished in any case.
for (HeapObject* obj = iterator.next();
obj != NULL;
@@ -1770,12 +1781,6 @@ bool V8HeapExplorer::IterateAndExtractReferences(
return false;
}
- SetRootGcRootsReference();
- RootsReferencesExtractor extractor;
- heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
- extractor.SetCollectingAllReferences();
- heap_->IterateRoots(&extractor, VISIT_ALL);
- extractor.FillReferences(this);
filler_ = NULL;
return progress_->ProgressReport(true);
}
@@ -1807,7 +1812,7 @@ void V8HeapExplorer::SetContextReference(HeapObject* parent_obj,
if (child_entry != NULL) {
filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
parent_entry,
- collection_->names()->GetName(reference_name),
+ names_->GetName(reference_name),
child_entry);
IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
@@ -1873,7 +1878,7 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
if (IsEssentialObject(child_obj)) {
filler_->SetNamedReference(HeapGraphEdge::kInternal,
parent_entry,
- collection_->names()->GetName(index),
+ names_->GetName(index),
child_entry);
}
IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
@@ -1897,17 +1902,17 @@ void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
int parent_entry,
- int index,
+ const char* reference_name,
Object* child_obj,
int field_offset) {
ASSERT(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == NULL) return;
if (IsEssentialObject(child_obj)) {
- filler_->SetIndexedReference(HeapGraphEdge::kWeak,
- parent_entry,
- index,
- child_entry);
+ filler_->SetNamedReference(HeapGraphEdge::kWeak,
+ parent_entry,
+ reference_name,
+ child_entry);
}
IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
@@ -1926,11 +1931,11 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
reference_name->IsSymbol() || String::cast(reference_name)->length() > 0
? HeapGraphEdge::kProperty : HeapGraphEdge::kInternal;
const char* name = name_format_string != NULL && reference_name->IsString()
- ? collection_->names()->GetFormatted(
+ ? names_->GetFormatted(
name_format_string,
- *String::cast(reference_name)->ToCString(
- DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)) :
- collection_->names()->GetName(reference_name);
+ String::cast(reference_name)->ToCString(
+ DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).get()) :
+ names_->GetName(reference_name);
filler_->SetNamedReference(type,
parent_entry,
@@ -1979,10 +1984,17 @@ void V8HeapExplorer::SetGcSubrootReference(
name,
child_entry);
} else {
- filler_->SetIndexedAutoIndexReference(
- is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
- snapshot_->gc_subroot(tag)->index(),
- child_entry);
+ if (is_weak) {
+ filler_->SetNamedAutoIndexReference(
+ HeapGraphEdge::kWeak,
+ snapshot_->gc_subroot(tag)->index(),
+ child_entry);
+ } else {
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement,
+ snapshot_->gc_subroot(tag)->index(),
+ child_entry);
+ }
}
// Add a shortcut to JS global object reference at snapshot root.
@@ -2106,13 +2118,15 @@ class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
HeapSnapshot* snapshot,
HeapEntry::Type entries_type)
: snapshot_(snapshot),
- collection_(snapshot_->collection()),
+ names_(snapshot_->profiler()->names()),
+ heap_object_map_(snapshot_->profiler()->heap_object_map()),
entries_type_(entries_type) {
}
virtual HeapEntry* AllocateEntry(HeapThing ptr);
private:
HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
+ StringsStorage* names_;
+ HeapObjectsMap* heap_object_map_;
HeapEntry::Type entries_type_;
};
@@ -2122,13 +2136,13 @@ HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
intptr_t elements = info->GetElementCount();
intptr_t size = info->GetSizeInBytes();
const char* name = elements != -1
- ? collection_->names()->GetFormatted(
+ ? names_->GetFormatted(
"%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
- : collection_->names()->GetCopy(info->GetLabel());
+ : names_->GetCopy(info->GetLabel());
return snapshot_->AddEntry(
entries_type_,
name,
- HeapObjectsMap::GenerateId(collection_->heap(), info),
+ heap_object_map_->GenerateId(info),
size != -1 ? static_cast<int>(size) : 0);
}
@@ -2136,9 +2150,9 @@ HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
NativeObjectsExplorer::NativeObjectsExplorer(
HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress)
- : isolate_(snapshot->collection()->heap()->isolate()),
+ : isolate_(snapshot->profiler()->heap_object_map()->heap()->isolate()),
snapshot_(snapshot),
- collection_(snapshot_->collection()),
+ names_(snapshot_->profiler()->names()),
progress_(progress),
embedder_queried_(false),
objects_by_info_(RetainedInfosMatch),
@@ -2200,7 +2214,7 @@ void NativeObjectsExplorer::FillRetainedObjects() {
group->info = NULL; // Acquire info object ownership.
}
isolate->global_handles()->RemoveObjectGroups();
- isolate->heap()->CallGCEpilogueCallbacks(major_gc_type);
+ isolate->heap()->CallGCEpilogueCallbacks(major_gc_type, kNoGCCallbackFlags);
// Record objects that are not in ObjectGroups, but have class ID.
GlobalHandlesExtractor extractor(this);
isolate->global_handles()->IterateAllRootsWithClassIds(&extractor);
@@ -2300,7 +2314,7 @@ class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo {
NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
const char* label) {
- const char* label_copy = collection_->names()->GetCopy(label);
+ const char* label_copy = names_->GetCopy(label);
uint32_t hash = StringHasher::HashSequentialString(
label_copy,
static_cast<int>(strlen(label_copy)),
@@ -2378,7 +2392,7 @@ class SnapshotFiller : public SnapshotFillerInterface {
public:
explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
: snapshot_(snapshot),
- collection_(snapshot->collection()),
+ names_(snapshot->profiler()->names()),
entries_(entries) { }
HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
HeapEntry* entry = allocator->AllocateEntry(ptr);
@@ -2421,13 +2435,13 @@ class SnapshotFiller : public SnapshotFillerInterface {
int index = parent_entry->children_count() + 1;
parent_entry->SetNamedReference(
type,
- collection_->names()->GetName(index),
+ names_->GetName(index),
child_entry);
}
private:
HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
+ StringsStorage* names_;
HeapEntriesMap* entries_;
};
@@ -2633,7 +2647,7 @@ const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
if (AllocationTracker* allocation_tracker =
- snapshot_->collection()->allocation_tracker()) {
+ snapshot_->profiler()->allocation_tracker()) {
allocation_tracker->PrepareForSerialization();
}
ASSERT(writer_ == NULL);
@@ -2688,9 +2702,26 @@ int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
}
-static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
+namespace {
+
+template<size_t size> struct ToUnsigned;
+
+template<> struct ToUnsigned<4> {
+ typedef uint32_t Type;
+};
+
+template<> struct ToUnsigned<8> {
+ typedef uint64_t Type;
+};
+
+} // namespace
+
+
+template<typename T>
+static int utoa_impl(T value, const Vector<char>& buffer, int buffer_pos) {
+ STATIC_CHECK(static_cast<T>(-1) > 0); // Check that T is unsigned
int number_of_digits = 0;
- unsigned t = value;
+ T t = value;
do {
++number_of_digits;
} while (t /= 10);
@@ -2698,7 +2729,7 @@ static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
buffer_pos += number_of_digits;
int result = buffer_pos;
do {
- int last_digit = value % 10;
+ int last_digit = static_cast<int>(value % 10);
buffer[--buffer_pos] = '0' + last_digit;
value /= 10;
} while (value);
@@ -2706,6 +2737,14 @@ static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
}
+template<typename T>
+static int utoa(T value, const Vector<char>& buffer, int buffer_pos) {
+ typename ToUnsigned<sizeof(value)>::Type unsigned_value = value;
+ STATIC_CHECK(sizeof(value) == sizeof(unsigned_value));
+ return utoa_impl(unsigned_value, buffer, buffer_pos);
+}
+
+
void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
bool first_edge) {
// The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
@@ -2714,7 +2753,6 @@ void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
EmbeddedVector<char, kBufferSize> buffer;
int edge_name_or_index = edge->type() == HeapGraphEdge::kElement
|| edge->type() == HeapGraphEdge::kHidden
- || edge->type() == HeapGraphEdge::kWeak
? edge->index() : GetStringId(edge->name());
int buffer_pos = 0;
if (!first_edge) {
@@ -2743,9 +2781,10 @@ void HeapSnapshotJSONSerializer::SerializeEdges() {
void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
- // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
+ // The buffer needs space for 4 unsigned ints, 1 size_t, 5 commas, \n and \0
static const int kBufferSize =
- 5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ 4 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ + MaxDecimalDigitsIn<sizeof(size_t)>::kUnsigned // NOLINT
+ 5 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
int buffer_pos = 0;
@@ -2852,7 +2891,7 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
writer_->AddNumber(snapshot_->edges().length());
writer_->AddString(",\"trace_function_count\":");
uint32_t count = 0;
- AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ AllocationTracker* tracker = snapshot_->profiler()->allocation_tracker();
if (tracker) {
count = tracker->id_to_function_info()->occupancy();
}
@@ -2871,7 +2910,7 @@ static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
void HeapSnapshotJSONSerializer::SerializeTraceTree() {
- AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ AllocationTracker* tracker = snapshot_->profiler()->allocation_tracker();
if (!tracker) return;
AllocationTraceTree* traces = tracker->trace_tree();
SerializeTraceNode(traces->root());
@@ -2922,7 +2961,7 @@ static int SerializePosition(int position, const Vector<char>& buffer,
void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
- AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ AllocationTracker* tracker = snapshot_->profiler()->allocation_tracker();
if (!tracker) return;
// The buffer needs space for 6 unsigned ints, 6 commas, \n and \0
const int kBufferSize =
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
index e4038b10f4..8717f8f25e 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -57,14 +57,15 @@ class HeapGraphEdge BASE_EMBEDDED {
Type type() const { return static_cast<Type>(type_); }
int index() const {
- ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
+ ASSERT(type_ == kElement || type_ == kHidden);
return index_;
}
const char* name() const {
ASSERT(type_ == kContextVariable
|| type_ == kProperty
|| type_ == kInternal
- || type_ == kShortcut);
+ || type_ == kShortcut
+ || type_ == kWeak);
return name_;
}
INLINE(HeapEntry* from() const);
@@ -113,14 +114,14 @@ class HeapEntry BASE_EMBEDDED {
Type type,
const char* name,
SnapshotObjectId id,
- int self_size);
+ size_t self_size);
HeapSnapshot* snapshot() { return snapshot_; }
Type type() { return static_cast<Type>(type_); }
const char* name() { return name_; }
void set_name(const char* name) { name_ = name; }
inline SnapshotObjectId id() { return id_; }
- int self_size() { return self_size_; }
+ size_t self_size() { return self_size_; }
INLINE(int index() const);
int children_count() const { return children_count_; }
INLINE(int set_children_index(int index));
@@ -138,8 +139,6 @@ class HeapEntry BASE_EMBEDDED {
void Print(
const char* prefix, const char* edge_name, int max_depth, int indent);
- Handle<HeapObject> GetHeapObject();
-
private:
INLINE(HeapGraphEdge** children_arr());
const char* TypeAsString();
@@ -147,28 +146,26 @@ class HeapEntry BASE_EMBEDDED {
unsigned type_: 4;
int children_count_: 28;
int children_index_;
- int self_size_;
+ size_t self_size_;
SnapshotObjectId id_;
HeapSnapshot* snapshot_;
const char* name_;
};
-class HeapSnapshotsCollection;
-
// HeapSnapshot represents a single heap snapshot. It is stored in
-// HeapSnapshotsCollection, which is also a factory for
+// HeapProfiler, which is also a factory for
// HeapSnapshots. All HeapSnapshots share strings copied from JS heap
// to be able to return them even if they were collected.
// HeapSnapshotGenerator fills in a HeapSnapshot.
class HeapSnapshot {
public:
- HeapSnapshot(HeapSnapshotsCollection* collection,
+ HeapSnapshot(HeapProfiler* profiler,
const char* title,
unsigned uid);
void Delete();
- HeapSnapshotsCollection* collection() { return collection_; }
+ HeapProfiler* profiler() { return profiler_; }
const char* title() { return title_; }
unsigned uid() { return uid_; }
size_t RawSnapshotSize() const;
@@ -189,7 +186,7 @@ class HeapSnapshot {
HeapEntry* AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
- int size);
+ size_t size);
HeapEntry* AddRootEntry();
HeapEntry* AddGcRootsEntry();
HeapEntry* AddGcSubrootEntry(int tag);
@@ -202,7 +199,7 @@ class HeapSnapshot {
void PrintEntriesSize();
private:
- HeapSnapshotsCollection* collection_;
+ HeapProfiler* profiler_;
const char* title_;
unsigned uid_;
int root_index_;
@@ -227,13 +224,11 @@ class HeapObjectsMap {
Heap* heap() const { return heap_; }
- void SnapshotGenerationFinished();
SnapshotObjectId FindEntry(Address addr);
SnapshotObjectId FindOrAddEntry(Address addr,
unsigned int size,
bool accessed = true);
void MoveObject(Address from, Address to, int size);
- void NewObject(Address addr, int size);
void UpdateObjectSize(Address addr, int size);
SnapshotObjectId last_assigned_id() const {
return next_id_ - kObjectIdStep;
@@ -243,7 +238,7 @@ class HeapObjectsMap {
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
size_t GetUsedMemorySize() const;
- static SnapshotObjectId GenerateId(Heap* heap, v8::RetainedObjectInfo* info);
+ SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
static inline SnapshotObjectId GetNthGcSubrootId(int delta);
static const int kObjectIdStep = 2;
@@ -256,6 +251,7 @@ class HeapObjectsMap {
int FindUntrackedObjects();
void UpdateHeapObjectsMap();
+ void RemoveDeadEntries();
private:
struct EntryInfo {
@@ -275,8 +271,6 @@ class HeapObjectsMap {
uint32_t count;
};
- void RemoveDeadEntries();
-
SnapshotObjectId next_id_;
HashMap entries_map_;
List<EntryInfo> entries_;
@@ -287,63 +281,6 @@ class HeapObjectsMap {
};
-class HeapSnapshotsCollection {
- public:
- explicit HeapSnapshotsCollection(Heap* heap);
- ~HeapSnapshotsCollection();
-
- Heap* heap() const { return ids_.heap(); }
-
- bool is_tracking_objects() { return is_tracking_objects_; }
- SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) {
- return ids_.PushHeapObjectsStats(stream);
- }
- void StartHeapObjectsTracking();
- void StopHeapObjectsTracking();
-
- HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
- void SnapshotGenerationFinished(HeapSnapshot* snapshot);
- List<HeapSnapshot*>* snapshots() { return &snapshots_; }
- void RemoveSnapshot(HeapSnapshot* snapshot);
-
- StringsStorage* names() { return &names_; }
- AllocationTracker* allocation_tracker() { return allocation_tracker_; }
-
- SnapshotObjectId FindObjectId(Address object_addr) {
- return ids_.FindEntry(object_addr);
- }
- SnapshotObjectId GetObjectId(Address object_addr, int object_size) {
- return ids_.FindOrAddEntry(object_addr, object_size);
- }
- Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
- void ObjectMoveEvent(Address from, Address to, int size) {
- ids_.MoveObject(from, to, size);
- }
- void NewObjectEvent(Address addr, int size);
- void UpdateObjectSizeEvent(Address addr, int size) {
- ids_.UpdateObjectSize(addr, size);
- }
- SnapshotObjectId last_assigned_id() const {
- return ids_.last_assigned_id();
- }
- size_t GetUsedMemorySize() const;
-
- int FindUntrackedObjects() { return ids_.FindUntrackedObjects(); }
-
- void UpdateHeapObjectsMap() { ids_.UpdateHeapObjectsMap(); }
-
- private:
- bool is_tracking_objects_; // Whether tracking object moves is needed.
- List<HeapSnapshot*> snapshots_;
- StringsStorage names_;
- // Mapping from HeapObject addresses to objects' uids.
- HeapObjectsMap ids_;
- AllocationTracker* allocation_tracker_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
-};
-
-
// A typedef for referencing anything that can be snapshotted living
// in any kind of heap memory.
typedef void* HeapThing;
@@ -447,6 +384,12 @@ class V8HeapExplorer : public HeapEntriesAllocator {
int EstimateObjectsCount(HeapIterator* iterator);
bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
void TagGlobalObjects();
+ void TagCodeObject(Code* code);
+ void TagBuiltinCodeObject(Code* code, const char* name);
+ HeapEntry* AddEntry(Address address,
+ HeapEntry::Type type,
+ const char* name,
+ size_t size);
static String* GetConstructorName(JSObject* object);
@@ -457,6 +400,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapEntry* AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name);
+
const char* GetSystemEntryName(HeapObject* object);
void ExtractReferences(HeapObject* obj);
@@ -471,9 +415,11 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
void ExtractCodeReferences(int entry, Code* code);
+ void ExtractBoxReferences(int entry, Box* box);
void ExtractCellReferences(int entry, Cell* cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
+ void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
void ExtractClosureReferences(JSObject* js_obj, int entry);
void ExtractPropertyReferences(JSObject* js_obj, int entry);
bool ExtractAccessorPairProperty(JSObject* js_obj, int entry,
@@ -510,7 +456,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
Object* child);
void SetWeakReference(HeapObject* parent_obj,
int parent,
- int index,
+ const char* reference_name,
Object* child_obj,
int field_offset);
void SetPropertyReference(HeapObject* parent_obj,
@@ -534,7 +480,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
Heap* heap_;
HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
+ StringsStorage* names_;
+ HeapObjectsMap* heap_object_map_;
SnapshottingProgressReportingInterface* progress_;
SnapshotFillerInterface* filler_;
HeapObjectsSet objects_tags_;
@@ -595,7 +542,7 @@ class NativeObjectsExplorer {
Isolate* isolate_;
HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
+ StringsStorage* names_;
SnapshottingProgressReportingInterface* progress_;
bool embedder_queried_;
HeapObjectsSet in_groups_;
@@ -702,4 +649,3 @@ class HeapSnapshotJSONSerializer {
} } // namespace v8::internal
#endif // V8_HEAP_SNAPSHOT_GENERATOR_H_
-
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 3f7030eb66..42e56ca1eb 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -49,6 +49,7 @@
#include "snapshot.h"
#include "store-buffer.h"
#include "utils/random-number-generator.h"
+#include "v8conversions.h"
#include "v8threads.h"
#include "v8utils.h"
#include "vm-state-inl.h"
@@ -79,6 +80,7 @@ Heap::Heap()
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
+ maximum_committed_(0),
survived_since_last_expansion_(0),
sweep_generation_(0),
always_allocate_scope_depth_(0),
@@ -86,7 +88,6 @@ Heap::Heap()
contexts_disposed_(0),
global_ic_age_(0),
flush_monomorphic_ics_(false),
- allocation_mementos_found_(0),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
@@ -113,6 +114,7 @@ Heap::Heap()
amount_of_external_allocated_memory_(0),
amount_of_external_allocated_memory_at_last_global_gc_(0),
old_gen_exhausted_(false),
+ inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
hidden_string_(NULL),
gc_safe_size_of_old_object_(NULL),
@@ -132,6 +134,7 @@ Heap::Heap()
last_gc_end_timestamp_(0.0),
marking_time_(0.0),
sweeping_time_(0.0),
+ mark_compact_collector_(this),
store_buffer_(this),
marking_(this),
incremental_marking_(this),
@@ -147,8 +150,10 @@ Heap::Heap()
#ifdef VERIFY_HEAP
no_weak_object_verification_scope_depth_(0),
#endif
+ allocation_sites_scratchpad_length_(0),
promotion_queue_(this),
configured_(false),
+ external_string_table_(this),
chunks_queued_for_free_(NULL),
relocation_mutex_(NULL) {
// Allow build-time customization of the max semispace size. Building
@@ -174,8 +179,6 @@ Heap::Heap()
native_contexts_list_ = NULL;
array_buffers_list_ = Smi::FromInt(0);
allocation_sites_list_ = Smi::FromInt(0);
- mark_compact_collector_.heap_ = this;
- external_string_table_.heap_ = this;
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
@@ -232,6 +235,16 @@ intptr_t Heap::CommittedMemoryExecutable() {
}
+void Heap::UpdateMaximumCommitted() {
+ if (!HasBeenSetUp()) return;
+
+ intptr_t current_committed_memory = CommittedMemory();
+ if (current_committed_memory > maximum_committed_) {
+ maximum_committed_ = current_committed_memory;
+ }
+}
+
+
intptr_t Heap::Available() {
if (!HasBeenSetUp()) return 0;
@@ -400,7 +413,7 @@ void Heap::PrintShortHeapStatistics() {
this->Available() / KB,
this->CommittedMemory() / KB);
PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
- amount_of_external_allocated_memory_ / KB);
+ static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
}
@@ -425,7 +438,6 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() {
{ AllowHeapAllocation for_the_first_part_of_prologue;
- isolate_->transcendental_cache()->Clear();
ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
@@ -441,6 +453,8 @@ void Heap::GarbageCollectionPrologue() {
#endif
}
+ UpdateMaximumCommitted();
+
#ifdef DEBUG
ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
@@ -451,7 +465,7 @@ void Heap::GarbageCollectionPrologue() {
store_buffer()->GCPrologue();
- if (FLAG_concurrent_osr) {
+ if (isolate()->concurrent_osr_enabled()) {
isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
}
}
@@ -467,6 +481,20 @@ intptr_t Heap::SizeOfObjects() {
}
+void Heap::ClearAllICsByKind(Code::Kind kind) {
+ HeapObjectIterator it(code_space());
+
+ for (Object* object = it.Next(); object != NULL; object = it.Next()) {
+ Code* code = Code::cast(object);
+ Code::Kind current_kind = code->kind();
+ if (current_kind == Code::FUNCTION ||
+ current_kind == Code::OPTIMIZED_FUNCTION) {
+ code->ClearInlineCaches(kind);
+ }
+ }
+}
+
+
void Heap::RepairFreeListsAfterBoot() {
PagedSpaces spaces(this);
for (PagedSpace* space = spaces.next();
@@ -477,6 +505,89 @@ void Heap::RepairFreeListsAfterBoot() {
}
+void Heap::ProcessPretenuringFeedback() {
+ if (FLAG_allocation_site_pretenuring) {
+ int tenure_decisions = 0;
+ int dont_tenure_decisions = 0;
+ int allocation_mementos_found = 0;
+ int allocation_sites = 0;
+ int active_allocation_sites = 0;
+
+ // If the scratchpad overflowed, we have to iterate over the allocation
+ // sites list.
+ bool use_scratchpad =
+ allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize;
+
+ int i = 0;
+ Object* list_element = allocation_sites_list();
+ bool trigger_deoptimization = false;
+ while (use_scratchpad ?
+ i < allocation_sites_scratchpad_length_ :
+ list_element->IsAllocationSite()) {
+ AllocationSite* site = use_scratchpad ?
+ AllocationSite::cast(allocation_sites_scratchpad()->get(i)) :
+ AllocationSite::cast(list_element);
+ allocation_mementos_found += site->memento_found_count();
+ if (site->memento_found_count() > 0) {
+ active_allocation_sites++;
+ }
+ if (site->DigestPretenuringFeedback()) trigger_deoptimization = true;
+ if (site->GetPretenureMode() == TENURED) {
+ tenure_decisions++;
+ } else {
+ dont_tenure_decisions++;
+ }
+ allocation_sites++;
+ if (use_scratchpad) {
+ i++;
+ } else {
+ list_element = site->weak_next();
+ }
+ }
+
+ if (trigger_deoptimization) {
+ isolate_->stack_guard()->DeoptMarkedAllocationSites();
+ }
+
+ FlushAllocationSitesScratchpad();
+
+ if (FLAG_trace_pretenuring_statistics &&
+ (allocation_mementos_found > 0 ||
+ tenure_decisions > 0 ||
+ dont_tenure_decisions > 0)) {
+ PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
+ "#mementos, #tenure decisions, #donttenure decisions) "
+ "(%s, %d, %d, %d, %d, %d)\n",
+ use_scratchpad ? "use scratchpad" : "use list",
+ allocation_sites,
+ active_allocation_sites,
+ allocation_mementos_found,
+ tenure_decisions,
+ dont_tenure_decisions);
+ }
+ }
+}
+
+
+void Heap::DeoptMarkedAllocationSites() {
+ // TODO(hpayer): If iterating over the allocation sites list becomes a
+ // performance issue, use a cache heap data structure instead (similar to the
+ // allocation sites scratchpad).
+ Object* list_element = allocation_sites_list();
+ while (list_element->IsAllocationSite()) {
+ AllocationSite* site = AllocationSite::cast(list_element);
+ if (site->deopt_dependent_code()) {
+ site->dependent_code()->MarkCodeForDeoptimization(
+ isolate_,
+ DependentCode::kAllocationSiteTenuringChangedGroup);
+ site->set_deopt_dependent_code(false);
+ }
+ list_element = site->weak_next();
+ }
+ Deoptimizer::DeoptimizeMarkedCode(isolate_);
+}
+
+
void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
@@ -485,6 +596,9 @@ void Heap::GarbageCollectionEpilogue() {
ZapFromSpace();
}
+ // Process pretenuring feedback and update allocation sites.
+ ProcessPretenuringFeedback();
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -506,6 +620,8 @@ void Heap::GarbageCollectionEpilogue() {
}
}
+ UpdateMaximumCommitted();
+
isolate_->counters()->alive_after_last_gc()->Set(
static_cast<int>(SizeOfObjects()));
@@ -567,6 +683,9 @@ void Heap::GarbageCollectionEpilogue() {
property_cell_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_code_space_committed()->AddSample(
static_cast<int>(code_space()->CommittedMemory() / KB));
+
+ isolate_->counters()->heap_sample_maximum_committed()->AddSample(
+ static_cast<int>(MaximumCommittedMemory() / KB));
}
#define UPDATE_COUNTERS_FOR_SPACE(space) \
@@ -607,12 +726,14 @@ void Heap::GarbageCollectionEpilogue() {
}
-void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
+void Heap::CollectAllGarbage(int flags,
+ const char* gc_reason,
+ const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
mark_compact_collector_.SetFlags(flags);
- CollectGarbage(OLD_POINTER_SPACE, gc_reason);
+ CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
mark_compact_collector_.SetFlags(kNoGCFlags);
}
@@ -629,7 +750,7 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
- if (FLAG_concurrent_recompilation) {
+ if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
isolate()->optimizing_compiler_thread()->Flush();
@@ -640,7 +761,7 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
const int kMaxNumberOfAttempts = 7;
const int kMinNumberOfAttempts = 2;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
+ if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
attempt + 1 >= kMinNumberOfAttempts) {
break;
}
@@ -652,10 +773,25 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
}
-bool Heap::CollectGarbage(AllocationSpace space,
- GarbageCollector collector,
+void Heap::EnsureFillerObjectAtTop() {
+ // There may be an allocation memento behind every object in new space.
+ // If we evacuate a not full new space or if we are on the last page of
+ // the new space, then there may be uninitialized memory behind the top
+ // pointer of the new space page. We store a filler object there to
+ // identify the unused space.
+ Address from_top = new_space_.top();
+ Address from_limit = new_space_.limit();
+ if (from_top < from_limit) {
+ int remaining_in_page = static_cast<int>(from_limit - from_top);
+ CreateFillerObjectAt(from_top, remaining_in_page);
+ }
+}
+
+
+bool Heap::CollectGarbage(GarbageCollector collector,
const char* gc_reason,
- const char* collector_reason) {
+ const char* collector_reason,
+ const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate_);
@@ -668,6 +804,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif
+ EnsureFillerObjectAtTop();
+
if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Scavenge during marking.\n");
@@ -710,7 +848,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
(collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
: isolate_->counters()->gc_compactor());
next_gc_likely_to_collect_more =
- PerformGarbageCollection(collector, &tracer);
+ PerformGarbageCollection(collector, &tracer, gc_callback_flags);
}
GarbageCollectionEpilogue();
@@ -730,7 +868,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
int Heap::NotifyContextDisposed() {
- if (FLAG_concurrent_recompilation) {
+ if (isolate()->concurrent_recompilation_enabled()) {
// Flush the queued recompilation tasks.
isolate()->optimizing_compiler_thread()->Flush();
}
@@ -740,16 +878,6 @@ int Heap::NotifyContextDisposed() {
}
-void Heap::PerformScavenge() {
- GCTracer tracer(this, NULL, NULL);
- if (incremental_marking()->IsStopped()) {
- PerformGarbageCollection(SCAVENGER, &tracer);
- } else {
- PerformGarbageCollection(MARK_COMPACTOR, &tracer);
- }
-}
-
-
void Heap::MoveElements(FixedArray* array,
int dst_index,
int src_index,
@@ -808,9 +936,7 @@ static bool AbortIncrementalMarkingAndCollectGarbage(
}
-void Heap::ReserveSpace(
- int *sizes,
- Address *locations_out) {
+void Heap::ReserveSpace(int *sizes, Address *locations_out) {
bool gc_performed = true;
int counter = 0;
static const int kThreshold = 20;
@@ -908,6 +1034,8 @@ void Heap::ClearNormalizedMapCaches() {
void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
+ if (start_new_space_size == 0) return;
+
double survival_rate =
(static_cast<double>(young_survivors_after_last_gc_) * 100) /
start_new_space_size;
@@ -937,8 +1065,10 @@ void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
survival_rate_ = survival_rate;
}
-bool Heap::PerformGarbageCollection(GarbageCollector collector,
- GCTracer* tracer) {
+bool Heap::PerformGarbageCollection(
+ GarbageCollector collector,
+ GCTracer* tracer,
+ const v8::GCCallbackFlags gc_callback_flags) {
bool next_gc_likely_to_collect_more = false;
if (collector != SCAVENGER) {
@@ -1006,12 +1136,13 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
PrintPID("Limited new space size due to high promotion rate: %d MB\n",
new_space_.InitialCapacity() / MB);
}
- // Support for global pre-tenuring uses the high promotion mode as a
- // heuristic indicator of whether to pretenure or not, we trigger
- // deoptimization here to take advantage of pre-tenuring as soon as
- // possible.
+ // The high promotion mode is our indicator to turn on pretenuring. We have
+ // to deoptimize all optimized code in global pretenuring mode and all
+ // code which should be tenured in local pretenuring mode.
if (FLAG_pretenuring) {
- isolate_->stack_guard()->FullDeopt();
+ if (!FLAG_allocation_site_pretenuring) {
+ isolate_->stack_guard()->FullDeopt();
+ }
}
} else if (new_space_high_promotion_mode_active_ &&
IsStableOrDecreasingSurvivalTrend() &&
@@ -1024,9 +1155,9 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
new_space_.MaximumCapacity() / MB);
}
- // Trigger deoptimization here to turn off pre-tenuring as soon as
+ // Trigger deoptimization here to turn off global pretenuring as soon as
// possible.
- if (FLAG_pretenuring) {
+ if (FLAG_pretenuring && !FLAG_allocation_site_pretenuring) {
isolate_->stack_guard()->FullDeopt();
}
}
@@ -1066,7 +1197,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(gc_type);
+ CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
}
#ifdef VERIFY_HEAP
@@ -1096,18 +1227,19 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
}
-void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
+void Heap::CallGCEpilogueCallbacks(GCType gc_type,
+ GCCallbackFlags gc_callback_flags) {
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
if (!gc_epilogue_callbacks_[i].pass_isolate_) {
v8::GCPrologueCallback callback =
reinterpret_cast<v8::GCPrologueCallback>(
gc_epilogue_callbacks_[i].callback);
- callback(gc_type, kNoGCCallbackFlags);
+ callback(gc_type, gc_callback_flags);
} else {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
gc_epilogue_callbacks_[i].callback(
- isolate, gc_type, kNoGCCallbackFlags);
+ isolate, gc_type, gc_callback_flags);
}
}
}
@@ -1118,6 +1250,8 @@ void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = MARK_COMPACT;
LOG(isolate_, ResourceEvent("markcompact", "begin"));
+ uint64_t size_of_objects_before_gc = SizeOfObjects();
+
mark_compact_collector_.Prepare(tracer);
ms_count_++;
@@ -1134,6 +1268,10 @@ void Heap::MarkCompact(GCTracer* tracer) {
isolate_->counters()->objs_since_last_full()->Set(0);
flush_monomorphic_ics_ = false;
+
+ if (FLAG_allocation_site_pretenuring) {
+ EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
+ }
}
@@ -1360,8 +1498,6 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
RelocationLock relocation_lock(this);
- allocation_mementos_found_ = 0;
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
@@ -1484,9 +1620,6 @@ void Heap::Scavenge() {
promotion_queue_.Destroy();
- if (!FLAG_watch_ic_patching) {
- isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
- }
incremental_marking()->UpdateMarkingDequeAfterScavenge();
ScavengeWeakObjectRetainer weak_object_retainer(this);
@@ -1509,11 +1642,6 @@ void Heap::Scavenge() {
gc_state_ = NOT_IN_GC;
scavenges_since_last_idle_round_++;
-
- if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0) {
- PrintF("AllocationMementos found during scavenge = %d\n",
- allocation_mementos_found_);
- }
}
@@ -1751,6 +1879,8 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
mark_compact_collector()->is_compacting();
ProcessArrayBuffers(retainer, record_slots);
ProcessNativeContexts(retainer, record_slots);
+ // TODO(mvstanton): AllocationSites only need to be processed during
+ // MARK_COMPACT, as they live in old space. Verify and address.
ProcessAllocationSites(retainer, record_slots);
}
@@ -1856,7 +1986,7 @@ struct WeakListVisitor<AllocationSite> {
}
static void VisitLiveObject(Heap* heap,
- AllocationSite* array_buffer,
+ AllocationSite* site,
WeakObjectRetainer* retainer,
bool record_slots) {}
@@ -1878,14 +2008,47 @@ void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
}
+void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
+ DisallowHeapAllocation no_allocation_scope;
+ Object* cur = allocation_sites_list();
+ bool marked = false;
+ while (cur->IsAllocationSite()) {
+ AllocationSite* casted = AllocationSite::cast(cur);
+ if (casted->GetPretenureMode() == flag) {
+ casted->ResetPretenureDecision();
+ casted->set_deopt_dependent_code(true);
+ marked = true;
+ }
+ cur = casted->weak_next();
+ }
+ if (marked) isolate_->stack_guard()->DeoptMarkedAllocationSites();
+}
+
+
+void Heap::EvaluateOldSpaceLocalPretenuring(
+ uint64_t size_of_objects_before_gc) {
+ uint64_t size_of_objects_after_gc = SizeOfObjects();
+ double old_generation_survival_rate =
+ (static_cast<double>(size_of_objects_after_gc) * 100) /
+ static_cast<double>(size_of_objects_before_gc);
+
+ if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
+ // Too many objects died in the old generation, pretenuring of wrong
+ // allocation sites may be the cause for that. We have to deopt all
+ // dependent code registered in the allocation sites to re-evaluate
+ // our pretenuring decisions.
+ ResetAllAllocationSitesDependentCode(TENURED);
+ if (FLAG_trace_pretenuring) {
+ PrintF("Deopt all allocation sites dependent code due to low survival "
+ "rate in the old generation %f\n", old_generation_survival_rate);
+ }
+ }
+}
+
+
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
DisallowHeapAllocation no_allocation;
-
- // Both the external string table and the string table may contain
- // external strings, but neither lists them exhaustively, nor is the
- // intersection set empty. Therefore we iterate over the external string
- // table first, ignoring internalized strings, and then over the
- // internalized string table.
+ // All external strings are listed in the external string table.
class ExternalStringTableVisitorAdapter : public ObjectVisitor {
public:
@@ -1893,13 +2056,9 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
- // Visit non-internalized external strings,
- // since internalized strings are listed in the string table.
- if (!(*p)->IsInternalizedString()) {
- ASSERT((*p)->IsExternalString());
- visitor_->VisitExternalString(Utils::ToLocal(
- Handle<String>(String::cast(*p))));
- }
+ ASSERT((*p)->IsExternalString());
+ visitor_->VisitExternalString(Utils::ToLocal(
+ Handle<String>(String::cast(*p))));
}
}
private:
@@ -1907,25 +2066,6 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
} external_string_table_visitor(visitor);
external_string_table_.Iterate(&external_string_table_visitor);
-
- class StringTableVisitorAdapter : public ObjectVisitor {
- public:
- explicit StringTableVisitorAdapter(
- v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
- virtual void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsExternalString()) {
- ASSERT((*p)->IsInternalizedString());
- visitor_->VisitExternalString(Utils::ToLocal(
- Handle<String>(String::cast(*p))));
- }
- }
- }
- private:
- v8::ExternalResourceVisitor* visitor_;
- } string_table_visitor(visitor);
-
- string_table()->IterateElements(&string_table_visitor);
}
@@ -2029,6 +2169,8 @@ class ScavengingVisitor : public StaticVisitorBase {
table_.Register(kVisitByteArray, &EvacuateByteArray);
table_.Register(kVisitFixedArray, &EvacuateFixedArray);
table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
+ table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
+ table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
table_.Register(kVisitNativeContext,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
@@ -2135,7 +2277,7 @@ class ScavengingVisitor : public StaticVisitorBase {
RecordCopiedObject(heap, target);
Isolate* isolate = heap->isolate();
HeapProfiler* heap_profiler = isolate->heap_profiler();
- if (heap_profiler->is_profiling()) {
+ if (heap_profiler->is_tracking_object_moves()) {
heap_profiler->ObjectMoveEvent(source->address(), target->address(),
size);
}
@@ -2161,7 +2303,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object,
int object_size) {
- SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
SLOW_ASSERT(object->Size() == object_size);
int allocation_size = object_size;
@@ -2269,6 +2411,24 @@ class ScavengingVisitor : public StaticVisitorBase {
}
+ static inline void EvacuateFixedTypedArray(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+ EvacuateObject<DATA_OBJECT, kObjectAlignment>(
+ map, slot, object, object_size);
+ }
+
+
+ static inline void EvacuateFixedFloat64Array(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
+ EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
+ map, slot, object, object_size);
+ }
+
+
static inline void EvacuateByteArray(Map* map,
HeapObject** slot,
HeapObject* object) {
@@ -2386,7 +2546,7 @@ void Heap::SelectScavengingVisitorsTable() {
isolate()->logger()->is_logging() ||
isolate()->cpu_profiler()->is_profiling() ||
(isolate()->heap_profiler() != NULL &&
- isolate()->heap_profiler()->is_profiling());
+ isolate()->heap_profiler()->is_tracking_object_moves());
if (!incremental_marking()->IsMarking()) {
if (!logging_and_profiling) {
@@ -2449,7 +2609,7 @@ MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
reinterpret_cast<Map*>(result)->set_bit_field(0);
reinterpret_cast<Map*>(result)->set_bit_field2(0);
- int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
+ int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true);
reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
return result;
@@ -2481,7 +2641,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_instance_descriptors(empty_descriptor_array());
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
- int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
+ int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true);
map->set_bit_field3(bit_field3);
map->set_elements_kind(elements_kind);
@@ -2524,8 +2684,7 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() {
if (!maybe_info->To(&info)) return maybe_info;
}
info->initialize_storage();
- info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
- SKIP_WRITE_BARRIER);
+ info->set_feedback_vector(empty_fixed_array(), SKIP_WRITE_BARRIER);
return info;
}
@@ -2585,6 +2744,12 @@ bool Heap::CreateInitialMaps() {
}
set_oddball_map(Map::cast(obj));
+ { MaybeObject* maybe_obj =
+ AllocatePartialMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_constant_pool_array_map(Map::cast(obj));
+
// Allocate the empty array.
{ MaybeObject* maybe_obj = AllocateEmptyFixedArray();
if (!maybe_obj->ToObject(&obj)) return false;
@@ -2610,6 +2775,12 @@ bool Heap::CreateInitialMaps() {
}
set_empty_descriptor_array(DescriptorArray::cast(obj));
+ // Allocate the constant pool array.
+ { MaybeObject* maybe_obj = AllocateEmptyConstantPoolArray();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
+
// Fix the instance_descriptors for the existing maps.
meta_map()->set_code_cache(empty_fixed_array());
meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
@@ -2627,6 +2798,12 @@ bool Heap::CreateInitialMaps() {
oddball_map()->init_back_pointer(undefined_value());
oddball_map()->set_instance_descriptors(empty_descriptor_array());
+ constant_pool_array_map()->set_code_cache(empty_fixed_array());
+ constant_pool_array_map()->set_dependent_code(
+ DependentCode::cast(empty_fixed_array()));
+ constant_pool_array_map()->init_back_pointer(undefined_value());
+ constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
+
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
meta_map()->set_constructor(null_value());
@@ -2637,297 +2814,119 @@ bool Heap::CreateInitialMaps() {
oddball_map()->set_prototype(null_value());
oddball_map()->set_constructor(null_value());
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_fixed_cow_array_map(Map::cast(obj));
- ASSERT(fixed_array_map() != fixed_cow_array_map());
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_scope_info_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_heap_number_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_symbol_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_foreign_map(Map::cast(obj));
+ constant_pool_array_map()->set_prototype(null_value());
+ constant_pool_array_map()->set_constructor(null_value());
- for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
- const StringTypeTable& entry = string_type_table[i];
- { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
- if (!maybe_obj->ToObject(&obj)) return false;
+ { // Map allocation
+#define ALLOCATE_MAP(instance_type, size, field_name) \
+ { Map* map; \
+ if (!AllocateMap((instance_type), size)->To(&map)) return false; \
+ set_##field_name##_map(map); \
}
- roots_[entry.index] = Map::cast(obj);
- }
-
- { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_undetectable_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- { MaybeObject* maybe_obj =
- AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_undetectable_ascii_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_fixed_double_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_constant_pool_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_byte_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_free_space_map(Map::cast(obj));
- { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_byte_array(ByteArray::cast(obj));
+#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
+ ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
- { MaybeObject* maybe_obj =
- AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_pixel_array_map(Map::cast(obj));
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
+ ASSERT(fixed_array_map() != fixed_cow_array_map());
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_byte_array_map(Map::cast(obj));
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
+ ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
+ ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
+ ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_unsigned_byte_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_short_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_unsigned_short_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_int_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_unsigned_int_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_float_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_non_strict_arguments_elements_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
- ExternalArray::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_external_double_array_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_byte_array(ExternalArray::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateEmptyExternalArray(kExternalUnsignedByteArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
-
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_short_array(ExternalArray::cast(obj));
+ for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
+ const StringTypeTable& entry = string_type_table[i];
+ { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ roots_[entry.index] = Map::cast(obj);
+ }
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
- kExternalUnsignedShortArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
+ ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
+ undetectable_string_map()->set_is_undetectable();
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_int_array(ExternalArray::cast(obj));
+ ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
+ undetectable_ascii_string_map()->set_is_undetectable();
- { MaybeObject* maybe_obj =
- AllocateEmptyExternalArray(kExternalUnsignedIntArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
+ ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
+ ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
+ ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_float_array(ExternalArray::cast(obj));
+#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
+ ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
+ external_##type##_array)
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_double_array(ExternalArray::cast(obj));
+ TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
+#undef ALLOCATE_EXTERNAL_ARRAY_MAP
- { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_external_pixel_array(ExternalArray::cast(obj));
+#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
+ ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, \
+ fixed_##type##_array)
- { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_code_map(Map::cast(obj));
+ TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
+#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
- { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_cell_map(Map::cast(obj));
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, non_strict_arguments_elements)
- { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
- PropertyCell::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_global_property_cell_map(Map::cast(obj));
+ ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
- { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_one_pointer_filler_map(Map::cast(obj));
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
+ ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
+ ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
+ ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
- { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_two_pointer_filler_map(Map::cast(obj));
- for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
- const StructTable& entry = struct_table[i];
- { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
- if (!maybe_obj->ToObject(&obj)) return false;
+ for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
+ const StructTable& entry = struct_table[i];
+ Map* map;
+ if (!AllocateMap(entry.type, entry.size)->To(&map))
+ return false;
+ roots_[entry.index] = map;
}
- roots_[entry.index] = Map::cast(obj);
- }
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_hash_table_map(Map::cast(obj));
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_function_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_catch_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_with_context_map(Map::cast(obj));
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_block_context_map(Map::cast(obj));
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
+ native_context_map()->set_dictionary_map(true);
+ native_context_map()->set_visitor_id(
+ StaticVisitorBase::kVisitNativeContext);
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_module_context_map(Map::cast(obj));
-
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_global_context_map(Map::cast(obj));
+ ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
+ shared_function_info)
- { MaybeObject* maybe_obj =
- AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
+ ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
+ message_object)
+ ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
+ external)
+ external_map()->set_is_extensible(false);
+#undef ALLOCATE_VARSIZE_MAP
+#undef ALLOCATE_MAP
}
- Map* native_context_map = Map::cast(obj);
- native_context_map->set_dictionary_map(true);
- native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
- set_native_context_map(native_context_map);
- { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
- SharedFunctionInfo::kAlignedSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_shared_function_info_map(Map::cast(obj));
+ { // Empty arrays
+ { ByteArray* byte_array;
+ if (!AllocateByteArray(0, TENURED)->To(&byte_array)) return false;
+ set_empty_byte_array(byte_array);
+ }
- { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
- JSMessageObject::kSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_message_object_map(Map::cast(obj));
+#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
+ { ExternalArray* obj; \
+ if (!AllocateEmptyExternalArray(kExternal##Type##Array)->To(&obj)) \
+ return false; \
+ set_empty_external_##type##_array(obj); \
+ }
- Map* external_map;
- { MaybeObject* maybe_obj =
- AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
- if (!maybe_obj->To(&external_map)) return false;
+ TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
+#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
}
- external_map->set_is_extensible(false);
- set_external_map(external_map);
-
ASSERT(!InNewSpace(empty_fixed_array()));
return true;
}
@@ -2937,7 +2936,8 @@ MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
int size = HeapNumber::kSize;
- STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
+ STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
+
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
@@ -2953,7 +2953,7 @@ MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
- STATIC_ASSERT(Cell::kSize <= Page::kNonCodeObjectAreaSize);
+ STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
Object* result;
{ MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
@@ -2967,7 +2967,7 @@ MaybeObject* Heap::AllocateCell(Object* value) {
MaybeObject* Heap::AllocatePropertyCell() {
int size = PropertyCell::kSize;
- STATIC_ASSERT(PropertyCell::kSize <= Page::kNonCodeObjectAreaSize);
+ STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
Object* result;
MaybeObject* maybe_result =
@@ -2980,7 +2980,7 @@ MaybeObject* Heap::AllocatePropertyCell() {
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
cell->set_value(the_hole_value());
- cell->set_type(Type::None());
+ cell->set_type(HeapType::None());
return result;
}
@@ -3066,6 +3066,17 @@ void Heap::CreateFixedStubs() {
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope(isolate());
+
+ // Create stubs that should be there, so we don't unexpectedly have to
+ // create them if we need them during the creation of another stub.
+ // Stub creation mixes raw pointers and handles in an unsafe manner so
+ // we cannot create stubs while we are creating stubs.
+ CodeStub::GenerateStubsAheadOfTime(isolate());
+
+ // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
+ // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
+ // is created.
+
// gcc-4.4 has problem generating correct code of following snippet:
// { JSEntryStub stub;
// js_entry_code_ = *stub.GetCode();
@@ -3076,12 +3087,6 @@ void Heap::CreateFixedStubs() {
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
-
- // Create stubs that should be there, so we don't unexpectedly have to
- // create them if we need them during the creation of another stub.
- // Stub creation mixes raw pointers and handles in an unsafe manner so
- // we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime(isolate());
}
@@ -3269,6 +3274,11 @@ bool Heap::CreateInitialObjects() {
}
set_natives_source_cache(FixedArray::cast(obj));
+ { MaybeObject* maybe_obj = AllocateCell(undefined_value());
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_undefined_cell(Cell::cast(obj));
+
// Allocate object to hold object observation state.
{ MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -3278,14 +3288,25 @@ bool Heap::CreateInitialObjects() {
}
set_observation_state(JSObject::cast(obj));
+ // Allocate object to hold object microtask state.
+ { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_microtask_state(JSObject::cast(obj));
+
{ MaybeObject* maybe_obj = AllocateSymbol();
if (!maybe_obj->ToObject(&obj)) return false;
}
+ Symbol::cast(obj)->set_is_private(true);
set_frozen_symbol(Symbol::cast(obj));
{ MaybeObject* maybe_obj = AllocateSymbol();
if (!maybe_obj->ToObject(&obj)) return false;
}
+ Symbol::cast(obj)->set_is_private(true);
set_elements_transition_symbol(Symbol::cast(obj));
{ MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
@@ -3297,11 +3318,23 @@ bool Heap::CreateInitialObjects() {
{ MaybeObject* maybe_obj = AllocateSymbol();
if (!maybe_obj->ToObject(&obj)) return false;
}
+ Symbol::cast(obj)->set_is_private(true);
set_observed_symbol(Symbol::cast(obj));
+ { MaybeObject* maybe_obj = AllocateFixedArray(0, TENURED);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_materialized_objects(FixedArray::cast(obj));
+
// Handling of script id generation is in Factory::NewScript.
set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
+ { MaybeObject* maybe_obj = AllocateAllocationSitesScratchpad();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_allocation_sites_scratchpad(FixedArray::cast(obj));
+ InitializeAllocationSitesScratchpad();
+
// Initialize keyed lookup cache.
isolate_->keyed_lookup_cache()->Clear();
@@ -3549,8 +3582,7 @@ void Heap::SetNumberStringCache(Object* number, String* string) {
MaybeObject* Heap::NumberToString(Object* number,
- bool check_number_string_cache,
- PretenureFlag pretenure) {
+ bool check_number_string_cache) {
isolate_->counters()->number_to_string_runtime()->Increment();
if (check_number_string_cache) {
Object* cached = GetNumberStringCache(number);
@@ -3571,8 +3603,11 @@ MaybeObject* Heap::NumberToString(Object* number,
}
Object* js_string;
+
+ // We tenure the allocated string since it is referenced from the
+ // number-string cache which lives in the old space.
MaybeObject* maybe_js_string =
- AllocateStringFromOneByte(CStrVector(str), pretenure);
+ AllocateStringFromOneByte(CStrVector(str), TENURED);
if (maybe_js_string->ToObject(&js_string)) {
SetNumberStringCache(number, String::cast(js_string));
}
@@ -3589,6 +3624,45 @@ MaybeObject* Heap::Uint32ToString(uint32_t value,
}
+MaybeObject* Heap::AllocateAllocationSitesScratchpad() {
+ MaybeObject* maybe_obj =
+ AllocateFixedArray(kAllocationSiteScratchpadSize, TENURED);
+ return maybe_obj;
+}
+
+
+void Heap::FlushAllocationSitesScratchpad() {
+ for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
+ allocation_sites_scratchpad()->set_undefined(i);
+ }
+ allocation_sites_scratchpad_length_ = 0;
+}
+
+
+void Heap::InitializeAllocationSitesScratchpad() {
+ ASSERT(allocation_sites_scratchpad()->length() ==
+ kAllocationSiteScratchpadSize);
+ for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
+ allocation_sites_scratchpad()->set_undefined(i);
+ }
+}
+
+
+void Heap::AddAllocationSiteToScratchpad(AllocationSite* site) {
+ if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
+ // We cannot use the normal write-barrier because slots need to be
+ // recorded with non-incremental marking as well. We have to explicitly
+ // record the slot to take evacuation candidates into account.
+ allocation_sites_scratchpad()->set(
+ allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
+ Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
+ allocation_sites_scratchpad_length_);
+ mark_compact_collector()->RecordSlot(slot, slot, *slot);
+ allocation_sites_scratchpad_length_++;
+ }
+}
+
+
Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
}
@@ -3597,51 +3671,52 @@ Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
Heap::RootListIndex Heap::RootIndexForExternalArrayType(
ExternalArrayType array_type) {
switch (array_type) {
- case kExternalByteArray:
- return kExternalByteArrayMapRootIndex;
- case kExternalUnsignedByteArray:
- return kExternalUnsignedByteArrayMapRootIndex;
- case kExternalShortArray:
- return kExternalShortArrayMapRootIndex;
- case kExternalUnsignedShortArray:
- return kExternalUnsignedShortArrayMapRootIndex;
- case kExternalIntArray:
- return kExternalIntArrayMapRootIndex;
- case kExternalUnsignedIntArray:
- return kExternalUnsignedIntArrayMapRootIndex;
- case kExternalFloatArray:
- return kExternalFloatArrayMapRootIndex;
- case kExternalDoubleArray:
- return kExternalDoubleArrayMapRootIndex;
- case kExternalPixelArray:
- return kExternalPixelArrayMapRootIndex;
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return kExternal##Type##ArrayMapRootIndex;
+
+ TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
+#undef ARRAY_TYPE_TO_ROOT_INDEX
+
default:
UNREACHABLE();
return kUndefinedValueRootIndex;
}
}
+
+Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
+ return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
+}
+
+
+Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
+ ExternalArrayType array_type) {
+ switch (array_type) {
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return kFixed##Type##ArrayMapRootIndex;
+
+ TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
+#undef ARRAY_TYPE_TO_ROOT_INDEX
+
+ default:
+ UNREACHABLE();
+ return kUndefinedValueRootIndex;
+ }
+}
+
+
Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
ElementsKind elementsKind) {
switch (elementsKind) {
- case EXTERNAL_BYTE_ELEMENTS:
- return kEmptyExternalByteArrayRootIndex;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return kEmptyExternalUnsignedByteArrayRootIndex;
- case EXTERNAL_SHORT_ELEMENTS:
- return kEmptyExternalShortArrayRootIndex;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return kEmptyExternalUnsignedShortArrayRootIndex;
- case EXTERNAL_INT_ELEMENTS:
- return kEmptyExternalIntArrayRootIndex;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- return kEmptyExternalUnsignedIntArrayRootIndex;
- case EXTERNAL_FLOAT_ELEMENTS:
- return kEmptyExternalFloatArrayRootIndex;
- case EXTERNAL_DOUBLE_ELEMENTS:
- return kEmptyExternalDoubleArrayRootIndex;
- case EXTERNAL_PIXEL_ELEMENTS:
- return kEmptyExternalPixelArrayRootIndex;
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ return kEmptyExternal##Type##ArrayRootIndex;
+
+ TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
+#undef ELEMENT_KIND_TO_ROOT_INDEX
+
default:
UNREACHABLE();
return kUndefinedValueRootIndex;
@@ -3655,16 +3730,11 @@ ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
}
-
-
MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
// We need to distinguish the minus zero value and this cannot be
// done after conversion to int. Doing this by comparing bit
// patterns is faster than using fpclassify() et al.
- static const DoubleRepresentation minus_zero(-0.0);
-
- DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) {
+ if (IsMinusZero(value)) {
return AllocateHeapNumber(-0.0, pretenure);
}
@@ -3680,7 +3750,7 @@ MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
- STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
+ STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Foreign* result;
MaybeObject* maybe_result = Allocate(foreign_map(), space);
@@ -3734,7 +3804,6 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
int start_position,
int end_position,
Object* script,
- Object* stack_trace,
Object* stack_frames) {
Object* result;
{ MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
@@ -3749,268 +3818,11 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
message->set_start_position(start_position);
message->set_end_position(end_position);
message->set_script(script);
- message->set_stack_trace(stack_trace);
message->set_stack_frames(stack_frames);
return result;
}
-
-// Returns true for a character in a range. Both limits are inclusive.
-static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
- // This makes uses of the the unsigned wraparound.
- return character - from <= to - from;
-}
-
-
-MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
- Heap* heap,
- uint16_t c1,
- uint16_t c2) {
- String* result;
- // Numeric strings have a different hash algorithm not known by
- // LookupTwoCharsStringIfExists, so we skip this step for such strings.
- if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
- heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
- return result;
- // Now we know the length is 2, we might as well make use of that fact
- // when building the new string.
- } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
- // We can do this.
- ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
- Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
- dest[0] = static_cast<uint8_t>(c1);
- dest[1] = static_cast<uint8_t>(c2);
- return result;
- } else {
- Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- uc16* dest = SeqTwoByteString::cast(result)->GetChars();
- dest[0] = c1;
- dest[1] = c2;
- return result;
- }
-}
-
-
-MaybeObject* Heap::AllocateConsString(String* first, String* second) {
- int first_length = first->length();
- if (first_length == 0) {
- return second;
- }
-
- int second_length = second->length();
- if (second_length == 0) {
- return first;
- }
-
- int length = first_length + second_length;
-
- // Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. Check whether we already have the string in the string
- // table to prevent creation of many unneccesary strings.
- if (length == 2) {
- uint16_t c1 = first->Get(0);
- uint16_t c2 = second->Get(0);
- return MakeOrFindTwoCharacterString(this, c1, c2);
- }
-
- bool first_is_one_byte = first->IsOneByteRepresentation();
- bool second_is_one_byte = second->IsOneByteRepresentation();
- bool is_one_byte = first_is_one_byte && second_is_one_byte;
- // Make sure that an out of memory exception is thrown if the length
- // of the new cons string is too large.
- if (length > String::kMaxLength || length < 0) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x4);
- }
-
- bool is_one_byte_data_in_two_byte_string = false;
- if (!is_one_byte) {
- // At least one of the strings uses two-byte representation so we
- // can't use the fast case code for short ASCII strings below, but
- // we can try to save memory if all chars actually fit in ASCII.
- is_one_byte_data_in_two_byte_string =
- first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
- if (is_one_byte_data_in_two_byte_string) {
- isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
- }
- }
-
- // If the resulting string is small make a flat string.
- if (length < ConsString::kMinLength) {
- // Note that neither of the two inputs can be a slice because:
- STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
- ASSERT(first->IsFlat());
- ASSERT(second->IsFlat());
- if (is_one_byte) {
- Object* result;
- { MaybeObject* maybe_result = AllocateRawOneByteString(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Copy the characters into the new object.
- uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
- // Copy first part.
- const uint8_t* src;
- if (first->IsExternalString()) {
- src = ExternalAsciiString::cast(first)->GetChars();
- } else {
- src = SeqOneByteString::cast(first)->GetChars();
- }
- for (int i = 0; i < first_length; i++) *dest++ = src[i];
- // Copy second part.
- if (second->IsExternalString()) {
- src = ExternalAsciiString::cast(second)->GetChars();
- } else {
- src = SeqOneByteString::cast(second)->GetChars();
- }
- for (int i = 0; i < second_length; i++) *dest++ = src[i];
- return result;
- } else {
- if (is_one_byte_data_in_two_byte_string) {
- Object* result;
- { MaybeObject* maybe_result = AllocateRawOneByteString(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Copy the characters into the new object.
- uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
- String::WriteToFlat(first, dest, 0, first_length);
- String::WriteToFlat(second, dest + first_length, 0, second_length);
- isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
- return result;
- }
-
- Object* result;
- { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Copy the characters into the new object.
- uc16* dest = SeqTwoByteString::cast(result)->GetChars();
- String::WriteToFlat(first, dest, 0, first_length);
- String::WriteToFlat(second, dest + first_length, 0, second_length);
- return result;
- }
- }
-
- Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
- cons_ascii_string_map() : cons_string_map();
-
- Object* result;
- { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- DisallowHeapAllocation no_gc;
- ConsString* cons_string = ConsString::cast(result);
- WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
- cons_string->set_length(length);
- cons_string->set_hash_field(String::kEmptyHashField);
- cons_string->set_first(first, mode);
- cons_string->set_second(second, mode);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateSubString(String* buffer,
- int start,
- int end,
- PretenureFlag pretenure) {
- int length = end - start;
- if (length <= 0) {
- return empty_string();
- } else if (length == 1) {
- return LookupSingleCharacterStringFromCode(buffer->Get(start));
- } else if (length == 2) {
- // Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. Check whether we already have the string in the string
- // table to prevent creation of many unnecessary strings.
- uint16_t c1 = buffer->Get(start);
- uint16_t c2 = buffer->Get(start + 1);
- return MakeOrFindTwoCharacterString(this, c1, c2);
- }
-
- // Make an attempt to flatten the buffer to reduce access time.
- buffer = buffer->TryFlattenGetString();
-
- if (!FLAG_string_slices ||
- !buffer->IsFlat() ||
- length < SlicedString::kMinLength ||
- pretenure == TENURED) {
- Object* result;
- // WriteToFlat takes care of the case when an indirect string has a
- // different encoding from its underlying string. These encodings may
- // differ because of externalization.
- bool is_one_byte = buffer->IsOneByteRepresentation();
- { MaybeObject* maybe_result = is_one_byte
- ? AllocateRawOneByteString(length, pretenure)
- : AllocateRawTwoByteString(length, pretenure);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- String* string_result = String::cast(result);
- // Copy the characters into the new object.
- if (is_one_byte) {
- ASSERT(string_result->IsOneByteRepresentation());
- uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
- String::WriteToFlat(buffer, dest, start, end);
- } else {
- ASSERT(string_result->IsTwoByteRepresentation());
- uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
- String::WriteToFlat(buffer, dest, start, end);
- }
- return result;
- }
-
- ASSERT(buffer->IsFlat());
-#if VERIFY_HEAP
- if (FLAG_verify_heap) {
- buffer->StringVerify();
- }
-#endif
-
- Object* result;
- // When slicing an indirect string we use its encoding for a newly created
- // slice and don't check the encoding of the underlying string. This is safe
- // even if the encodings are different because of externalization. If an
- // indirect ASCII string is pointing to a two-byte string, the two-byte char
- // codes of the underlying string must still fit into ASCII (because
- // externalization must not change char codes).
- { Map* map = buffer->IsOneByteRepresentation()
- ? sliced_ascii_string_map()
- : sliced_string_map();
- MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- DisallowHeapAllocation no_gc;
- SlicedString* sliced_string = SlicedString::cast(result);
- sliced_string->set_length(length);
- sliced_string->set_hash_field(String::kEmptyHashField);
- if (buffer->IsConsString()) {
- ConsString* cons = ConsString::cast(buffer);
- ASSERT(cons->second()->length() == 0);
- sliced_string->set_parent(cons->first());
- sliced_string->set_offset(start);
- } else if (buffer->IsSlicedString()) {
- // Prevent nesting sliced strings.
- SlicedString* parent_slice = SlicedString::cast(buffer);
- sliced_string->set_parent(parent_slice->parent());
- sliced_string->set_offset(start + parent_slice->offset());
- } else {
- sliced_string->set_parent(buffer);
- sliced_string->set_offset(start);
- }
- ASSERT(sliced_string->parent()->IsSeqString() ||
- sliced_string->parent()->IsExternalString());
- return result;
-}
-
-
MaybeObject* Heap::AllocateExternalStringFromAscii(
const ExternalAsciiString::Resource* resource) {
size_t length = resource->length();
@@ -4071,8 +3883,8 @@ MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
uint8_t buffer[1];
buffer[0] = static_cast<uint8_t>(code);
Object* result;
- MaybeObject* maybe_result =
- InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
+ OneByteStringKey key(Vector<const uint8_t>(buffer, 1), HashSeed());
+ MaybeObject* maybe_result = InternalizeStringWithKey(&key);
if (!maybe_result->ToObject(&result)) return maybe_result;
single_character_string_cache()->set(code, result);
@@ -4140,6 +3952,57 @@ MaybeObject* Heap::AllocateExternalArray(int length,
return result;
}
+static void ForFixedTypedArray(ExternalArrayType array_type,
+ int* element_size,
+ ElementsKind* element_kind) {
+ switch (array_type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ *element_size = size; \
+ *element_kind = TYPE##_ELEMENTS; \
+ return;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ default:
+ *element_size = 0; // Bogus
+ *element_kind = UINT8_ELEMENTS; // Bogus
+ UNREACHABLE();
+ }
+}
+
+
+MaybeObject* Heap::AllocateFixedTypedArray(int length,
+ ExternalArrayType array_type,
+ PretenureFlag pretenure) {
+ int element_size;
+ ElementsKind elements_kind;
+ ForFixedTypedArray(array_type, &element_size, &elements_kind);
+ int size = OBJECT_POINTER_ALIGN(
+ length * element_size + FixedTypedArrayBase::kDataOffset);
+#ifndef V8_HOST_ARCH_64_BIT
+ if (array_type == kExternalFloat64Array) {
+ size += kPointerSize;
+ }
+#endif
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
+ HeapObject* object;
+ MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!maybe_object->To(&object)) return maybe_object;
+
+ if (array_type == kExternalFloat64Array) {
+ object = EnsureDoubleAligned(this, object, size);
+ }
+
+ FixedTypedArrayBase* elements =
+ reinterpret_cast<FixedTypedArrayBase*>(object);
+ elements->set_map(MapForFixedTypedArray(array_type));
+ elements->set_length(length);
+ return elements;
+}
+
MaybeObject* Heap::CreateCode(const CodeDesc& desc,
Code::Flags flags,
@@ -4165,7 +4028,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
if (force_lo_space) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
- maybe_result = code_space_->AllocateRaw(obj_size);
+ maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
}
if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
@@ -4187,12 +4050,11 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(reloc_info);
code->set_flags(flags);
- if (code->is_call_stub() || code->is_keyed_call_stub()) {
- code->set_check_type(RECEIVER_MAP_CHECK);
- }
+ code->set_raw_kind_specific_flags1(0);
+ code->set_raw_kind_specific_flags2(0);
code->set_is_crankshafted(crankshafted);
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
+ code->set_raw_type_feedback_info(undefined_value());
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
@@ -4200,6 +4062,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
}
+ code->set_constant_pool(empty_constant_pool_array());
#ifdef ENABLE_DEBUGGER_SUPPORT
if (code->kind() == Code::FUNCTION) {
@@ -4236,7 +4099,7 @@ MaybeObject* Heap::CopyCode(Code* code) {
if (obj_size > code_space()->AreaSize()) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
- maybe_result = code_space_->AllocateRaw(obj_size);
+ maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
}
Object* result;
@@ -4279,7 +4142,7 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
if (new_obj_size > code_space()->AreaSize()) {
maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
} else {
- maybe_result = code_space_->AllocateRaw(new_obj_size);
+ maybe_result = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
}
Object* result;
@@ -4313,6 +4176,17 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
}
+void Heap::InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site) {
+ memento->set_map_no_write_barrier(allocation_memento_map());
+ ASSERT(allocation_site->map() == allocation_site_map());
+ memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
+ if (FLAG_allocation_site_pretenuring) {
+ allocation_site->IncrementMementoCreateCount();
+ }
+}
+
+
MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
Handle<AllocationSite> allocation_site) {
ASSERT(gc_state_ == NOT_IN_GC);
@@ -4329,9 +4203,7 @@ MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
HeapObject::cast(result)->set_map_no_write_barrier(map);
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- ASSERT(allocation_site->map() == allocation_site_map());
- alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
+ InitializeAllocationMemento(alloc_memento, *allocation_site);
return result;
}
@@ -4368,39 +4240,6 @@ void Heap::InitializeFunction(JSFunction* function,
}
-MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
- // Make sure to use globals from the function's context, since the function
- // can be from a different context.
- Context* native_context = function->context()->native_context();
- Map* new_map;
- if (function->shared()->is_generator()) {
- // Generator prototypes can share maps since they don't have "constructor"
- // properties.
- new_map = native_context->generator_object_prototype_map();
- } else {
- // Each function prototype gets a fresh map to avoid unwanted sharing of
- // maps between prototypes of different constructors.
- JSFunction* object_function = native_context->object_function();
- ASSERT(object_function->has_initial_map());
- MaybeObject* maybe_map = object_function->initial_map()->Copy();
- if (!maybe_map->To(&new_map)) return maybe_map;
- }
-
- Object* prototype;
- MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
-
- if (!function->shared()->is_generator()) {
- MaybeObject* maybe_failure =
- JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributesTrampoline(
- constructor_string(), function, DONT_ENUM);
- if (maybe_failure->IsFailure()) return maybe_failure;
- }
-
- return prototype;
-}
-
-
MaybeObject* Heap::AllocateFunction(Map* function_map,
SharedFunctionInfo* shared,
Object* prototype,
@@ -4472,48 +4311,6 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
}
-MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
- ASSERT(!fun->has_initial_map());
-
- // First create a new map with the size and number of in-object properties
- // suggested by the function.
- InstanceType instance_type;
- int instance_size;
- int in_object_properties;
- if (fun->shared()->is_generator()) {
- instance_type = JS_GENERATOR_OBJECT_TYPE;
- instance_size = JSGeneratorObject::kSize;
- in_object_properties = 0;
- } else {
- instance_type = JS_OBJECT_TYPE;
- instance_size = fun->shared()->CalculateInstanceSize();
- in_object_properties = fun->shared()->CalculateInObjectProperties();
- }
- Map* map;
- MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
- if (!maybe_map->To(&map)) return maybe_map;
-
- // Fetch or allocate prototype.
- Object* prototype;
- if (fun->has_instance_prototype()) {
- prototype = fun->instance_prototype();
- } else {
- MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
- if (!maybe_prototype->To(&prototype)) return maybe_prototype;
- }
- map->set_inobject_properties(in_object_properties);
- map->set_unused_property_fields(in_object_properties);
- map->set_prototype(prototype);
- ASSERT(map->has_fast_object_elements());
-
- if (!fun->shared()->is_generator()) {
- fun->shared()->StartInobjectSlackTracking(map);
- }
-
- return map;
-}
-
-
void Heap::InitializeJSObjectFromMap(JSObject* obj,
FixedArray* properties,
Map* map) {
@@ -4620,15 +4417,7 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
PretenureFlag pretenure) {
- // Allocate the initial map if absent.
- if (!constructor->has_initial_map()) {
- Object* initial_map;
- { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
- if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
- }
- constructor->set_initial_map(Map::cast(initial_map));
- Map::cast(initial_map)->set_constructor(constructor);
- }
+ ASSERT(constructor->has_initial_map());
// Allocate the object based on the constructors initial map.
MaybeObject* result = AllocateJSObjectFromMap(
constructor->initial_map(), pretenure);
@@ -4643,21 +4432,12 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
Handle<AllocationSite> allocation_site) {
- // Allocate the initial map if absent.
- if (!constructor->has_initial_map()) {
- Object* initial_map;
- { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
- if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
- }
- constructor->set_initial_map(Map::cast(initial_map));
- Map::cast(initial_map)->set_constructor(constructor);
- }
+ ASSERT(constructor->has_initial_map());
// Allocate the object based on the constructors initial map, or the payload
// advice
Map* initial_map = constructor->initial_map();
- Smi* smi = Smi::cast(allocation_site->transition_info());
- ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
+ ElementsKind to_kind = allocation_site->GetElementsKind();
AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
if (to_kind != initial_map->elements_kind()) {
MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
@@ -4683,23 +4463,6 @@ MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
}
-MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
- ASSERT(function->shared()->is_generator());
- Map *map;
- if (function->has_initial_map()) {
- map = function->initial_map();
- } else {
- // Allocate the initial map if absent.
- MaybeObject* maybe_map = AllocateInitialMap(function);
- if (!maybe_map->To(&map)) return maybe_map;
- function->set_initial_map(map);
- map->set_constructor(function);
- }
- ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
- return AllocateJSObjectFromMap(map);
-}
-
-
MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
// Allocate a fresh map. Modules do not have a prototype.
Map* map;
@@ -4873,8 +4636,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
int object_size = map->instance_size();
Object* clone;
- ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) &&
- map->instance_type() == JS_ARRAY_TYPE));
+ ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
@@ -4899,7 +4661,8 @@ MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
{ int adjusted_object_size = site != NULL
? object_size + AllocationMemento::kSize
: object_size;
- MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
+ MaybeObject* maybe_clone =
+ AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
SLOW_ASSERT(InNewSpace(clone));
@@ -4912,16 +4675,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
if (site != NULL) {
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(clone) + object_size);
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- ASSERT(site->map() == allocation_site_map());
- alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
- HeapProfiler* profiler = isolate()->heap_profiler();
- if (profiler->is_tracking_allocations()) {
- profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
- object_size);
- profiler->NewObjectEvent(alloc_memento->address(),
- AllocationMemento::kSize);
- }
+ InitializeAllocationMemento(alloc_memento, site);
}
}
@@ -4983,8 +4737,9 @@ MaybeObject* Heap::ReinitializeJSReceiver(
SharedFunctionInfo* shared = NULL;
if (type == JS_FUNCTION_TYPE) {
String* name;
- maybe =
- InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
+ OneByteStringKey key(STATIC_ASCII_VECTOR("<freezing call trap>"),
+ HashSeed());
+ maybe = InternalizeStringWithKey(&key);
if (!maybe->To<String>(&name)) return maybe;
maybe = AllocateSharedFunctionInfo(name);
if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
@@ -5045,7 +4800,7 @@ MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
- PretenureFlag pretenure) {
+ PretenureFlag pretenure) {
int length = string.length();
if (length == 1) {
return Heap::LookupSingleCharacterStringFromCode(string[0]);
@@ -5548,16 +5303,31 @@ MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
constant_pool->SetEntryCounts(number_of_int64_entries,
number_of_ptr_entries,
number_of_int32_entries);
- MemsetPointer(
- HeapObject::RawField(
- constant_pool,
- constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
- undefined_value(),
- number_of_ptr_entries);
+ if (number_of_ptr_entries > 0) {
+ MemsetPointer(
+ HeapObject::RawField(
+ constant_pool,
+ constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
+ undefined_value(),
+ number_of_ptr_entries);
+ }
return constant_pool;
}
+MaybeObject* Heap::AllocateEmptyConstantPoolArray() {
+ int size = ConstantPoolArray::SizeFor(0, 0, 0);
+ Object* result;
+ { MaybeObject* maybe_result =
+ AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ HeapObject::cast(result)->set_map_no_write_barrier(constant_pool_array_map());
+ ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0);
+ return result;
+}
+
+
MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
Object* result;
{ MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
@@ -5572,7 +5342,7 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateSymbol() {
// Statically ensure that it is safe to allocate symbols in paged spaces.
- STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
+ STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
Object* result;
MaybeObject* maybe =
@@ -5593,12 +5363,22 @@ MaybeObject* Heap::AllocateSymbol() {
Symbol::cast(result)->set_hash_field(
Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
Symbol::cast(result)->set_name(undefined_value());
+ Symbol::cast(result)->set_flags(Smi::FromInt(0));
- ASSERT(result->IsSymbol());
+ ASSERT(!Symbol::cast(result)->is_private());
return result;
}
+MaybeObject* Heap::AllocatePrivateSymbol() {
+ MaybeObject* maybe = AllocateSymbol();
+ Symbol* symbol;
+ if (!maybe->To(&symbol)) return maybe;
+ symbol->set_is_private(true);
+ return symbol;
+}
+
+
MaybeObject* Heap::AllocateNativeContext() {
Object* result;
{ MaybeObject* maybe_result =
@@ -5834,7 +5614,7 @@ bool Heap::IdleNotification(int hint) {
return false;
}
- if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
+ if (!FLAG_incremental_marking || Serializer::enabled()) {
return IdleGlobalGC();
}
@@ -6075,6 +5855,9 @@ void Heap::Verify() {
VerifyPointersVisitor visitor;
IterateRoots(&visitor, VISIT_ONLY_STRONG);
+ VerifySmisVisitor smis_visitor;
+ IterateSmiRoots(&smis_visitor);
+
new_space_.Verify();
old_pointer_space_->Verify(&visitor);
@@ -6092,45 +5875,17 @@ void Heap::Verify() {
MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupUtf8String(string, &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
-}
-
-
-MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupOneByteString(string, &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
- }
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
+ Utf8StringKey key(string, HashSeed());
+ return InternalizeStringWithKey(&key);
}
-MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
- int from,
- int length) {
+MaybeObject* Heap::InternalizeString(String* string) {
+ if (string->IsInternalizedString()) return string;
Object* result = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
- string_table()->LookupSubStringOneByteString(string,
- from,
- length,
- &result);
+ string_table()->LookupString(string, &result);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
// Can't use set_string_table because StringTable::cast knows that
@@ -6141,27 +5896,20 @@ MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
}
-MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
- Object* result = NULL;
- Object* new_table;
- { MaybeObject* maybe_new_table =
- string_table()->LookupTwoByteString(string, &result);
- if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
+bool Heap::InternalizeStringIfExists(String* string, String** result) {
+ if (string->IsInternalizedString()) {
+ *result = string;
+ return true;
}
- // Can't use set_string_table because StringTable::cast knows that
- // StringTable is a singleton and checks for identity.
- roots_[kStringTableRootIndex] = new_table;
- ASSERT(result != NULL);
- return result;
+ return string_table()->LookupStringIfExists(string, result);
}
-MaybeObject* Heap::InternalizeString(String* string) {
- if (string->IsInternalizedString()) return string;
+MaybeObject* Heap::InternalizeStringWithKey(HashTableKey* key) {
Object* result = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
- string_table()->LookupString(string, &result);
+ string_table()->LookupKey(key, &result);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
// Can't use set_string_table because StringTable::cast knows that
@@ -6172,15 +5920,6 @@ MaybeObject* Heap::InternalizeString(String* string) {
}
-bool Heap::InternalizeStringIfExists(String* string, String** result) {
- if (string->IsInternalizedString()) {
- *result = string;
- return true;
- }
- return string_table()->LookupStringIfExists(string, result);
-}
-
-
void Heap::ZapFromSpace() {
NewSpacePageIterator it(new_space_.FromSpaceStart(),
new_space_.FromSpaceEnd());
@@ -6416,6 +6155,12 @@ void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
}
+void Heap::IterateSmiRoots(ObjectVisitor* v) {
+ v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
+ v->Synchronize(VisitorSynchronization::kSmiRootList);
+}
+
+
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
@@ -6570,7 +6315,7 @@ bool Heap::ConfigureHeap(int max_semispace_size,
Page::kPageSize));
// We rely on being able to allocate new arrays in paged spaces.
- ASSERT(MaxRegularSpaceAllocationSize() >=
+ ASSERT(Page::kMaxRegularHeapObjectSize >=
(JSArray::kSize +
FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
AllocationMemento::kSize));
@@ -6637,7 +6382,15 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
}
-intptr_t Heap::PromotedExternalMemorySize() {
+bool Heap::AdvanceSweepers(int step_size) {
+ ASSERT(!mark_compact_collector()->AreSweeperThreadsActivated());
+ bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
+ sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
+ return sweeping_complete;
+}
+
+
+int64_t Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
return amount_of_external_allocated_memory_
@@ -6645,6 +6398,32 @@ intptr_t Heap::PromotedExternalMemorySize() {
}
+void Heap::EnableInlineAllocation() {
+ if (!inline_allocation_disabled_) return;
+ inline_allocation_disabled_ = false;
+
+ // Update inline allocation limit for new space.
+ new_space()->UpdateInlineAllocationLimit(0);
+}
+
+
+void Heap::DisableInlineAllocation() {
+ if (inline_allocation_disabled_) return;
+ inline_allocation_disabled_ = true;
+
+ // Update inline allocation limit for new space.
+ new_space()->UpdateInlineAllocationLimit(0);
+
+ // Update inline allocation limit for old spaces.
+ PagedSpaces spaces(this);
+ for (PagedSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
+ space->EmptyAllocationInfo();
+ }
+}
+
+
V8_DECLARE_ONCE(initialize_gc_once);
static void InitializeGCOnce() {
@@ -6756,10 +6535,9 @@ bool Heap::SetUp() {
store_buffer()->SetUp();
+ mark_compact_collector()->SetUp();
+
if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
-#ifdef DEBUG
- relocation_mutex_locked_by_optimizer_thread_ = false;
-#endif // DEBUG
return true;
}
@@ -6805,6 +6583,8 @@ void Heap::TearDown() {
}
#endif
+ UpdateMaximumCommitted();
+
if (FLAG_print_cumulative_gc_stat) {
PrintF("\n");
PrintF("gc_count=%d ", gc_count_);
@@ -6819,6 +6599,31 @@ void Heap::TearDown() {
PrintF("\n\n");
}
+ if (FLAG_print_max_heap_committed) {
+ PrintF("\n");
+ PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
+ MaximumCommittedMemory());
+ PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
+ new_space_.MaximumCommittedMemory());
+ PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
+ old_data_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
+ old_pointer_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
+ old_pointer_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
+ code_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
+ map_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
+ cell_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
+ property_cell_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
+ lo_space_->MaximumCommittedMemory());
+ PrintF("\n\n");
+ }
+
TearDownArrayBuffers();
isolate_->global_handles()->TearDown();
@@ -6877,6 +6682,7 @@ void Heap::TearDown() {
isolate_->memory_allocator()->TearDown();
delete relocation_mutex_;
+ relocation_mutex_ = NULL;
}
@@ -6955,6 +6761,10 @@ void Heap::EnsureWeakObjectToCodeTable() {
}
+void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
+ v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
+}
+
#ifdef DEBUG
class PrintHandleVisitor: public ObjectVisitor {
@@ -7752,29 +7562,6 @@ void Heap::GarbageCollectionGreedyCheck() {
#endif
-TranscendentalCache::SubCache::SubCache(Isolate* isolate, Type t)
- : type_(t),
- isolate_(isolate) {
- uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
- uint32_t in1 = 0xffffffffu; // generated by the FPU.
- for (int i = 0; i < kCacheSize; i++) {
- elements_[i].in[0] = in0;
- elements_[i].in[1] = in1;
- elements_[i].output = NULL;
- }
-}
-
-
-void TranscendentalCache::Clear() {
- for (int i = 0; i < kNumberOfCaches; i++) {
- if (caches_[i] != NULL) {
- delete caches_[i];
- caches_[i] = NULL;
- }
- }
-}
-
-
void ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
@@ -7811,7 +7598,13 @@ void ExternalStringTable::CleanUp() {
void ExternalStringTable::TearDown() {
+ for (int i = 0; i < new_space_strings_.length(); ++i) {
+ heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
+ }
new_space_strings_.Free();
+ for (int i = 0; i < old_space_strings_.length(); ++i) {
+ heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
+ }
old_space_strings_.Free();
}
@@ -7937,17 +7730,18 @@ void Heap::CheckpointObjectStats() {
static_cast<int>(object_sizes_last_time_[index]));
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- index = FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge; \
- counters->count_of_CODE_AGE_##name()->Increment( \
- static_cast<int>(object_counts_[index])); \
- counters->count_of_CODE_AGE_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[index])); \
- counters->size_of_CODE_AGE_##name()->Increment( \
- static_cast<int>(object_sizes_[index])); \
- counters->size_of_CODE_AGE_##name()->Decrement( \
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = \
+ FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
+ counters->count_of_CODE_AGE_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_CODE_AGE_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_CODE_AGE_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_CODE_AGE_##name()->Decrement( \
static_cast<int>(object_sizes_last_time_[index]));
- CODE_AGE_LIST_WITH_NO_AGE(ADJUST_LAST_TIME_OBJECT_COUNT)
+ CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
@@ -7955,15 +7749,4 @@ void Heap::CheckpointObjectStats() {
ClearObjectStats();
}
-
-Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
- if (FLAG_concurrent_recompilation) {
- heap_->relocation_mutex_->Lock();
-#ifdef DEBUG
- heap_->relocation_mutex_locked_by_optimizer_thread_ =
- heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
-#endif // DEBUG
- }
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 96cda586b7..1ac4dfaa08 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -77,7 +77,7 @@ namespace internal {
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
- V(Smi, stack_limit, StackLimit) \
+ V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \
V(Oddball, arguments_marker, ArgumentsMarker) \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
@@ -132,29 +132,38 @@ namespace internal {
V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
V(Map, undetectable_string_map, UndetectableStringMap) \
V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
- V(Map, external_byte_array_map, ExternalByteArrayMap) \
- V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
- V(Map, external_short_array_map, ExternalShortArrayMap) \
- V(Map, external_unsigned_short_array_map, ExternalUnsignedShortArrayMap) \
- V(Map, external_int_array_map, ExternalIntArrayMap) \
- V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
- V(Map, external_float_array_map, ExternalFloatArrayMap) \
- V(Map, external_double_array_map, ExternalDoubleArrayMap) \
- V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
- V(ExternalArray, empty_external_byte_array, \
- EmptyExternalByteArray) \
- V(ExternalArray, empty_external_unsigned_byte_array, \
- EmptyExternalUnsignedByteArray) \
- V(ExternalArray, empty_external_short_array, EmptyExternalShortArray) \
- V(ExternalArray, empty_external_unsigned_short_array, \
- EmptyExternalUnsignedShortArray) \
- V(ExternalArray, empty_external_int_array, EmptyExternalIntArray) \
- V(ExternalArray, empty_external_unsigned_int_array, \
- EmptyExternalUnsignedIntArray) \
- V(ExternalArray, empty_external_float_array, EmptyExternalFloatArray) \
- V(ExternalArray, empty_external_double_array, EmptyExternalDoubleArray) \
- V(ExternalArray, empty_external_pixel_array, \
- EmptyExternalPixelArray) \
+ V(Map, external_int8_array_map, ExternalInt8ArrayMap) \
+ V(Map, external_uint8_array_map, ExternalUint8ArrayMap) \
+ V(Map, external_int16_array_map, ExternalInt16ArrayMap) \
+ V(Map, external_uint16_array_map, ExternalUint16ArrayMap) \
+ V(Map, external_int32_array_map, ExternalInt32ArrayMap) \
+ V(Map, external_uint32_array_map, ExternalUint32ArrayMap) \
+ V(Map, external_float32_array_map, ExternalFloat32ArrayMap) \
+ V(Map, external_float64_array_map, ExternalFloat64ArrayMap) \
+ V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap) \
+ V(ExternalArray, empty_external_int8_array, \
+ EmptyExternalInt8Array) \
+ V(ExternalArray, empty_external_uint8_array, \
+ EmptyExternalUint8Array) \
+ V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array) \
+ V(ExternalArray, empty_external_uint16_array, \
+ EmptyExternalUint16Array) \
+ V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array) \
+ V(ExternalArray, empty_external_uint32_array, \
+ EmptyExternalUint32Array) \
+ V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array) \
+ V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array) \
+ V(ExternalArray, empty_external_uint8_clamped_array, \
+ EmptyExternalUint8ClampedArray) \
+ V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
+ V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
+ V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \
+ V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \
+ V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \
+ V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \
+ V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
+ V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
+ V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
V(Map, non_strict_arguments_elements_map, NonStrictArgumentsElementsMap) \
V(Map, function_context_map, FunctionContextMap) \
V(Map, catch_context_map, CatchContextMap) \
@@ -176,26 +185,80 @@ namespace internal {
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
- V(Smi, last_script_id, LastScriptId) \
V(Script, empty_script, EmptyScript) \
- V(Smi, real_stack_limit, RealStackLimit) \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
- V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
- V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
- V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
- V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
+ V(Cell, undefined_cell, UndefineCell) \
V(JSObject, observation_state, ObservationState) \
V(Map, external_map, ExternalMap) \
V(Symbol, frozen_symbol, FrozenSymbol) \
V(Symbol, elements_transition_symbol, ElementsTransitionSymbol) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
- V(Symbol, observed_symbol, ObservedSymbol)
+ V(Symbol, observed_symbol, ObservedSymbol) \
+ V(FixedArray, materialized_objects, MaterializedObjects) \
+ V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
+ V(JSObject, microtask_state, MicrotaskState)
+
+// Entries in this list are limited to Smis and are not visited during GC.
+#define SMI_ROOT_LIST(V) \
+ V(Smi, stack_limit, StackLimit) \
+ V(Smi, real_stack_limit, RealStackLimit) \
+ V(Smi, last_script_id, LastScriptId) \
+ V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
+ V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
+ V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
+ V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
+ SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)
+// Heap roots that are known to be immortal immovable, for which we can safely
+// skip write barriers.
+#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
+ V(byte_array_map) \
+ V(free_space_map) \
+ V(one_pointer_filler_map) \
+ V(two_pointer_filler_map) \
+ V(undefined_value) \
+ V(the_hole_value) \
+ V(null_value) \
+ V(true_value) \
+ V(false_value) \
+ V(uninitialized_value) \
+ V(cell_map) \
+ V(global_property_cell_map) \
+ V(shared_function_info_map) \
+ V(meta_map) \
+ V(heap_number_map) \
+ V(native_context_map) \
+ V(fixed_array_map) \
+ V(code_map) \
+ V(scope_info_map) \
+ V(fixed_cow_array_map) \
+ V(fixed_double_array_map) \
+ V(constant_pool_array_map) \
+ V(no_interceptor_result_sentinel) \
+ V(hash_table_map) \
+ V(empty_fixed_array) \
+ V(empty_byte_array) \
+ V(empty_descriptor_array) \
+ V(empty_constant_pool_array) \
+ V(arguments_marker) \
+ V(symbol_map) \
+ V(non_strict_arguments_elements_map) \
+ V(function_context_map) \
+ V(catch_context_map) \
+ V(with_context_map) \
+ V(block_context_map) \
+ V(module_context_map) \
+ V(global_context_map) \
+ V(oddball_map) \
+ V(message_object_map) \
+ V(foreign_map) \
+ V(neander_map)
+
#define INTERNALIZED_STRING_LIST(V) \
V(Array_string, "Array") \
V(Object_string, "Object") \
@@ -209,8 +272,10 @@ namespace internal {
V(Boolean_string, "Boolean") \
V(callee_string, "callee") \
V(constructor_string, "constructor") \
- V(result_string, ".result") \
+ V(dot_result_string, ".result") \
V(dot_for_string, ".for.") \
+ V(dot_iterator_string, ".iterator") \
+ V(dot_generator_object_string, ".generator_object") \
V(eval_string, "eval") \
V(empty_string, "") \
V(function_string, "function") \
@@ -449,7 +514,7 @@ class ExternalStringTable {
void TearDown();
private:
- ExternalStringTable() { }
+ explicit ExternalStringTable(Heap* heap) : heap_(heap) { }
friend class Heap;
@@ -518,7 +583,6 @@ class Heap {
int InitialSemiSpaceSize() { return initial_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; }
- int MaxRegularSpaceAllocationSize() { return InitialSemiSpaceSize() * 4/5; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@@ -533,6 +597,13 @@ class Heap {
// Returns the amount of phyical memory currently committed for the heap.
size_t CommittedPhysicalMemory();
+ // Returns the maximum amount of memory ever committed for the heap.
+ intptr_t MaximumCommittedMemory() { return maximum_committed_; }
+
+ // Updates the maximum committed memory for the heap. Should be called
+ // whenever a space grows.
+ void UpdateMaximumCommitted();
+
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
@@ -608,9 +679,6 @@ class Heap {
return old_data_space_->allocation_limit_address();
}
- // Uncommit unused semi space.
- bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
-
// Allocates and initializes a new JavaScript object based on a
// constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -624,9 +692,6 @@ class Heap {
JSFunction* constructor,
Handle<AllocationSite> allocation_site);
- MUST_USE_RESULT MaybeObject* AllocateJSGeneratorObject(
- JSFunction* function);
-
MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
ScopeInfo* scope_info);
@@ -668,12 +733,6 @@ class Heap {
MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source,
AllocationSite* site = NULL);
- // Allocates the function prototype.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function);
-
// Allocates a JS ArrayBuffer object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -740,9 +799,6 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
int instance_size);
- // Allocate a map for the specified function
- MUST_USE_RESULT MaybeObject* AllocateInitialMap(JSFunction* fun);
-
// Allocates an empty code cache.
MUST_USE_RESULT MaybeObject* AllocateCodeCache();
@@ -767,6 +823,9 @@ class Heap {
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
+ // Iterates the whole code space to clear all ICs of the given kind.
+ void ClearAllICsByKind(Code::Kind kind);
+
// For use during bootup.
void RepairFreeListsAfterBoot();
@@ -876,11 +935,21 @@ class Heap {
void* external_pointer,
PretenureFlag pretenure);
+ // Allocates a fixed typed array of the specified length and type.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ MUST_USE_RESULT MaybeObject* AllocateFixedTypedArray(
+ int length,
+ ExternalArrayType array_type,
+ PretenureFlag pretenure);
+
// Allocate a symbol in old space.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateSymbol();
+ MUST_USE_RESULT MaybeObject* AllocatePrivateSymbol();
// Allocate a tenured AllocationSite. It's payload is null
MUST_USE_RESULT MaybeObject* AllocateAllocationSite();
@@ -1070,28 +1139,8 @@ class Heap {
int start_position,
int end_position,
Object* script,
- Object* stack_trace,
Object* stack_frames);
- // Allocates a new cons string object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateConsString(String* first,
- String* second);
-
- // Allocates a new sub string object which is a substring of an underlying
- // string buffer stretching from the index start (inclusive) to the index
- // end (exclusive).
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateSubString(
- String* buffer,
- int start,
- int end,
- PretenureFlag pretenure = NOT_TENURED);
-
// Allocate a new external string object, which is backed by a string
// resource that resides outside the V8 heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -1144,16 +1193,13 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* InternalizeUtf8String(Vector<const char> str);
MUST_USE_RESULT MaybeObject* InternalizeUtf8String(const char* str) {
return InternalizeUtf8String(CStrVector(str));
}
- MUST_USE_RESULT MaybeObject* InternalizeOneByteString(
- Vector<const uint8_t> str);
- MUST_USE_RESULT MaybeObject* InternalizeTwoByteString(Vector<const uc16> str);
+ MUST_USE_RESULT MaybeObject* InternalizeUtf8String(Vector<const char> str);
+
MUST_USE_RESULT MaybeObject* InternalizeString(String* str);
- MUST_USE_RESULT MaybeObject* InternalizeOneByteString(
- Handle<SeqOneByteString> string, int from, int length);
+ MUST_USE_RESULT MaybeObject* InternalizeStringWithKey(HashTableKey* key);
bool InternalizeStringIfExists(String* str, String** result);
bool InternalizeTwoCharsStringIfExists(String* str, String** result);
@@ -1174,24 +1220,13 @@ class Heap {
// Converts the given boolean condition to JavaScript boolean value.
inline Object* ToBoolean(bool condition);
- // Code that should be run before and after each GC. Includes some
- // reporting/verification activities when compiled with DEBUG set.
- void GarbageCollectionPrologue();
- void GarbageCollectionEpilogue();
-
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- bool CollectGarbage(AllocationSpace space,
- GarbageCollector collector,
- const char* gc_reason,
- const char* collector_reason);
-
- // Performs garbage collection operation.
- // Returns whether there is a chance that another major GC could
- // collect more garbage.
- inline bool CollectGarbage(AllocationSpace space,
- const char* gc_reason = NULL);
+ inline bool CollectGarbage(
+ AllocationSpace space,
+ const char* gc_reason = NULL,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
static const int kNoGCFlags = 0;
static const int kSweepPreciselyMask = 1;
@@ -1206,7 +1241,10 @@ class Heap {
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap
// in a state where we can iterate over the heap visiting all objects.
- void CollectAllGarbage(int flags, const char* gc_reason = NULL);
+ void CollectAllGarbage(
+ int flags,
+ const char* gc_reason = NULL,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Last hope GC, should try to squeeze as much as possible.
void CollectAllAvailableGarbage(const char* gc_reason = NULL);
@@ -1221,10 +1259,6 @@ class Heap {
// Notify the heap that a context has been disposed.
int NotifyContextDisposed();
- // Utility to invoke the scavenger. This is needed in test code to
- // ensure correct callback for weak global handles.
- void PerformScavenge();
-
inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
@@ -1313,6 +1347,9 @@ class Heap {
void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+ // Iterates over entries in the smi roots list. Only interesting to the
+ // serializer/deserializer, since GC does not care about smis.
+ void IterateSmiRoots(ObjectVisitor* v);
// Iterates over all the other roots in the heap.
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
@@ -1380,6 +1417,10 @@ class Heap {
roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
}
+ void public_set_materialized_objects(FixedArray* objects) {
+ roots_[kMaterializedObjectsRootIndex] = objects;
+ }
+
// Generated code can embed this address to get access to the roots.
Object** roots_array_start() { return roots_; }
@@ -1428,20 +1469,9 @@ class Heap {
#endif
}
- // Fill in bogus values in from space
- void ZapFromSpace();
-
// Print short heap statistics.
void PrintShortHeapStatistics();
- // Makes a new internalized string object
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* CreateInternalizedString(
- const char* str, int length, int hash);
- MUST_USE_RESULT MaybeObject* CreateInternalizedString(String* str);
-
// Write barrier support for address[offset] = o.
INLINE(void RecordWrite(Address address, int offset));
@@ -1474,8 +1504,10 @@ class Heap {
static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
- // Commits from space if it is uncommitted.
- void EnsureFromSpaceIsCommitted();
+ // An object may have an AllocationSite associated with it through a trailing
+ // AllocationMemento. Its feedback should be updated when objects are found
+ // in the heap.
+ static inline void UpdateAllocationSiteFeedback(HeapObject* object);
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
@@ -1496,8 +1528,8 @@ class Heap {
// Adjusts the amount of registered external memory.
// Returns the adjusted value.
- inline intptr_t AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes);
+ inline int64_t AdjustAmountOfExternalAllocatedMemory(
+ int64_t change_in_bytes);
// This is only needed for testing high promotion mode.
void SetNewSpaceHighPromotionModeActive(bool mode) {
@@ -1516,7 +1548,10 @@ class Heap {
}
inline intptr_t PromotedTotalSize() {
- return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+ int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+ if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt);
+ if (total < 0) return 0;
+ return static_cast<intptr_t>(total);
}
inline intptr_t OldGenerationSpaceAvailable() {
@@ -1531,25 +1566,26 @@ class Heap {
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size) {
- const int divisor = FLAG_stress_compaction ? 10 :
- new_space_high_promotion_mode_active_ ? 1 : 3;
+ const int divisor = FLAG_stress_compaction ? 10 : 1;
intptr_t limit =
Max(old_gen_size + old_gen_size / divisor,
kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
- // TODO(hpayer): Can be removed when when pretenuring is supported for all
- // allocation sites.
- if (IsHighSurvivalRate() && IsStableOrIncreasingSurvivalTrend()) {
- limit *= 2;
- }
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
+ // Indicates whether inline bump-pointer allocation has been disabled.
+ bool inline_allocation_disabled() { return inline_allocation_disabled_; }
+
+ // Switch whether inline bump-pointer allocation should be used.
+ void EnableInlineAllocation();
+ void DisableInlineAllocation();
+
// Implements the corresponding V8 API function.
bool IdleNotification(int hint);
- // Declare all the root indices.
+ // Declare all the root indices. This defines the root list order.
enum RootListIndex {
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
@@ -1565,8 +1601,14 @@ class Heap {
#undef DECLARE_STRUCT_MAP
kStringTableRootIndex,
+
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+ SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+
+ kRootListLength,
kStrongRootListLength = kStringTableRootIndex,
- kRootListLength
+ kSmiRootsStart = kStringTableRootIndex + 1
};
STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex);
@@ -1582,11 +1624,14 @@ class Heap {
bool RootCanBeTreatedAsConstant(RootListIndex root_index);
MUST_USE_RESULT MaybeObject* NumberToString(
- Object* number, bool check_number_string_cache = true,
- PretenureFlag pretenure = NOT_TENURED);
+ Object* number, bool check_number_string_cache = true);
MUST_USE_RESULT MaybeObject* Uint32ToString(
uint32_t value, bool check_number_string_cache = true);
+ Map* MapForFixedTypedArray(ExternalArrayType array_type);
+ RootListIndex RootIndexForFixedTypedArray(
+ ExternalArrayType array_type);
+
Map* MapForExternalArrayType(ExternalArrayType array_type);
RootListIndex RootIndexForExternalArrayType(
ExternalArrayType array_type);
@@ -1713,12 +1758,7 @@ class Heap {
old_pointer_space()->IsLazySweepingComplete();
}
- bool AdvanceSweepers(int step_size) {
- ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping);
- bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
- sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
- return sweeping_complete;
- }
+ bool AdvanceSweepers(int step_size);
bool EnsureSweepersProgressed(int step_size) {
bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size);
@@ -1738,7 +1778,7 @@ class Heap {
inline Isolate* isolate();
void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
- void CallGCEpilogueCallbacks(GCType gc_type);
+ void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
inline bool OldGenerationAllocationLimitReached();
@@ -1799,10 +1839,12 @@ class Heap {
bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
- intptr_t amount_of_external_allocated_memory() {
+ int64_t amount_of_external_allocated_memory() {
return amount_of_external_allocated_memory_;
}
+ void DeoptMarkedAllocationSites();
+
// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
// another.
@@ -1812,7 +1854,7 @@ class Heap {
FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
FIRST_CODE_AGE_SUB_TYPE =
FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
- OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kLastCodeAge + 1
+ OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
};
void RecordObjectStats(InstanceType type, size_t size) {
@@ -1822,12 +1864,17 @@ class Heap {
}
void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
- ASSERT(code_sub_type < Code::NUMBER_OF_KINDS);
- ASSERT(code_age < Code::kLastCodeAge);
- object_counts_[FIRST_CODE_KIND_SUB_TYPE + code_sub_type]++;
- object_sizes_[FIRST_CODE_KIND_SUB_TYPE + code_sub_type] += size;
- object_counts_[FIRST_CODE_AGE_SUB_TYPE + code_age]++;
- object_sizes_[FIRST_CODE_AGE_SUB_TYPE + code_age] += size;
+ int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
+ int code_age_index =
+ FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
+ ASSERT(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
+ code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
+ ASSERT(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
+ code_age_index < OBJECT_STATS_COUNT);
+ object_counts_[code_sub_type_index]++;
+ object_sizes_[code_sub_type_index] += size;
+ object_counts_[code_age_index]++;
+ object_sizes_[code_age_index] += size;
}
void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
@@ -1842,23 +1889,19 @@ class Heap {
// only when FLAG_concurrent_recompilation is true.
class RelocationLock {
public:
- explicit RelocationLock(Heap* heap);
+ explicit RelocationLock(Heap* heap) : heap_(heap) {
+ if (FLAG_concurrent_recompilation) {
+ heap_->relocation_mutex_->Lock();
+ }
+ }
+
~RelocationLock() {
if (FLAG_concurrent_recompilation) {
-#ifdef DEBUG
- heap_->relocation_mutex_locked_by_optimizer_thread_ = false;
-#endif // DEBUG
heap_->relocation_mutex_->Unlock();
}
}
-#ifdef DEBUG
- static bool IsLockedByOptimizerThread(Heap* heap) {
- return heap->relocation_mutex_locked_by_optimizer_thread_;
- }
-#endif // DEBUG
-
private:
Heap* heap_;
};
@@ -1873,6 +1916,9 @@ class Heap {
void EnsureWeakObjectToCodeTable();
+ static void FatalProcessOutOfMemory(const char* location,
+ bool take_snapshot = false);
+
private:
Heap();
@@ -1888,6 +1934,7 @@ class Heap {
int initial_semispace_size_;
intptr_t max_old_generation_size_;
intptr_t max_executable_size_;
+ intptr_t maximum_committed_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
@@ -1906,9 +1953,6 @@ class Heap {
bool flush_monomorphic_ics_;
- // AllocationMementos found in new space.
- int allocation_mementos_found_;
-
int scan_on_scavenge_pages_;
NewSpace new_space_;
@@ -1923,7 +1967,7 @@ class Heap {
int gc_post_processing_depth_;
// Returns the amount of external memory registered since last global gc.
- intptr_t PromotedExternalMemorySize();
+ int64_t PromotedExternalMemorySize();
unsigned int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
@@ -1977,15 +2021,19 @@ class Heap {
// The amount of external memory registered through the API kept alive
// by global handles
- intptr_t amount_of_external_allocated_memory_;
+ int64_t amount_of_external_allocated_memory_;
// Caches the amount of external memory registered at the last global gc.
- intptr_t amount_of_external_allocated_memory_at_last_global_gc_;
+ int64_t amount_of_external_allocated_memory_at_last_global_gc_;
// Indicates that an allocation has failed in the old generation since the
// last GC.
bool old_gen_exhausted_;
+ // Indicates that inline bump-pointer allocation has been globally disabled
+ // for all spaces. This is used to disable allocations in generated code.
+ bool inline_allocation_disabled_;
+
// Weak list heads, threaded through the objects.
// List heads are initilized lazily and contain the undefined_value at start.
Object* native_contexts_list_;
@@ -2068,15 +2116,42 @@ class Heap {
gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
}
+ // Code that should be run before and after each GC. Includes some
+ // reporting/verification activities when compiled with DEBUG set.
+ void GarbageCollectionPrologue();
+ void GarbageCollectionEpilogue();
+
+ // Pretenuring decisions are made based on feedback collected during new
+ // space evacuation. Note that between feedback collection and calling this
+ // method object in old space must not move.
+ // Right now we only process pretenuring feedback in high promotion mode.
+ void ProcessPretenuringFeedback();
+
// Checks whether a global GC is necessary
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
+ // Make sure there is a filler value behind the top of the new space
+ // so that the GC does not confuse some unintialized/stale memory
+ // with the allocation memento of the object at the top
+ void EnsureFillerObjectAtTop();
+
+ // Performs garbage collection operation.
+ // Returns whether there is a chance that another major GC could
+ // collect more garbage.
+ bool CollectGarbage(
+ GarbageCollector collector,
+ const char* gc_reason,
+ const char* collector_reason,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
// Performs garbage collection
// Returns whether there is a chance another major GC could
// collect more garbage.
- bool PerformGarbageCollection(GarbageCollector collector,
- GCTracer* tracer);
+ bool PerformGarbageCollection(
+ GarbageCollector collector,
+ GCTracer* tracer,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
inline void UpdateOldSpaceLimits();
@@ -2087,7 +2162,7 @@ class Heap {
PretenureFlag pretenure) {
ASSERT(preferred_old_space == OLD_POINTER_SPACE ||
preferred_old_space == OLD_DATA_SPACE);
- if (object_size > Page::kMaxNonCodeHeapObjectSize) return LO_SPACE;
+ if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
}
@@ -2107,6 +2182,8 @@ class Heap {
void InitializeJSObjectFromMap(JSObject* obj,
FixedArray* properties,
Map* map);
+ void InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site);
bool CreateInitialMaps();
bool CreateInitialObjects();
@@ -2137,6 +2214,9 @@ class Heap {
// Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
+ // Allocate empty constant pool array.
+ MUST_USE_RESULT MaybeObject* AllocateEmptyConstantPoolArray();
+
// Allocate a tenured simple cell.
MUST_USE_RESULT MaybeObject* AllocateCell(Object* value);
@@ -2150,6 +2230,15 @@ class Heap {
// Performs a minor collection in new generation.
void Scavenge();
+ // Commits from space if it is uncommitted.
+ void EnsureFromSpaceIsCommitted();
+
+ // Uncommit unused semi space.
+ bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+
+ // Fill in bogus values in from space
+ void ZapFromSpace();
+
static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
Heap* heap,
Object** pointer);
@@ -2169,6 +2258,15 @@ class Heap {
void ProcessArrayBuffers(WeakObjectRetainer* retainer, bool record_slots);
void ProcessAllocationSites(WeakObjectRetainer* retainer, bool record_slots);
+ // Deopts all code that contains allocation instruction which are tenured or
+ // not tenured. Moreover it clears the pretenuring allocation site statistics.
+ void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
+
+ // Evaluates local pretenuring for the old space and calls
+ // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
+ // the old space.
+ void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
+
// Called on heap tear-down.
void TearDownArrayBuffers();
@@ -2204,6 +2302,18 @@ class Heap {
// Flush the number to string cache.
void FlushNumberStringCache();
+ // Allocates a fixed-size allocation sites scratchpad.
+ MUST_USE_RESULT MaybeObject* AllocateAllocationSitesScratchpad();
+
+ // Sets used allocation sites entries to undefined.
+ void FlushAllocationSitesScratchpad();
+
+ // Initializes the allocation sites scratchpad with undefined values.
+ void InitializeAllocationSitesScratchpad();
+
+ // Adds an allocation site to the scratchpad if there is space left.
+ void AddAllocationSiteToScratchpad(AllocationSite* site);
+
void UpdateSurvivalRateTrend(int start_new_space_size);
enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
@@ -2212,6 +2322,8 @@ class Heap {
static const int kYoungSurvivalRateLowThreshold = 10;
static const int kYoungSurvivalRateAllowedDeviation = 15;
+ static const int kOldSurvivalRateLowThreshold = 20;
+
int young_survivors_after_last_gc_;
int high_survival_rate_period_length_;
int low_survival_rate_period_length_;
@@ -2374,6 +2486,9 @@ class Heap {
int no_weak_object_verification_scope_depth_;
#endif
+ static const int kAllocationSiteScratchpadSize = 256;
+ int allocation_sites_scratchpad_length_;
+
static const int kMaxMarkSweepsInIdleRound = 7;
static const int kIdleScavengeThreshold = 5;
@@ -2469,6 +2584,7 @@ class AlwaysAllocateScope {
DisallowAllocationFailure disallow_allocation_failure_;
};
+
#ifdef VERIFY_HEAP
class NoWeakObjectVerificationScope {
public:
@@ -2489,6 +2605,13 @@ class VerifyPointersVisitor: public ObjectVisitor {
};
+// Verify that all objects are Smis.
+class VerifySmisVisitor: public ObjectVisitor {
+ public:
+ inline void VisitPointers(Object** start, Object** end);
+};
+
+
// Space iterator for iterating over all spaces of the heap. Returns each space
// in turn, and null when it is done.
class AllSpaces BASE_EMBEDDED {
@@ -2884,85 +3007,6 @@ class RegExpResultsCache {
};
-class TranscendentalCache {
- public:
- enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
- static const int kTranscendentalTypeBits = 3;
- STATIC_ASSERT((1 << kTranscendentalTypeBits) >= kNumberOfCaches);
-
- // Returns a heap number with f(input), where f is a math function specified
- // by the 'type' argument.
- MUST_USE_RESULT inline MaybeObject* Get(Type type, double input);
-
- // The cache contains raw Object pointers. This method disposes of
- // them before a garbage collection.
- void Clear();
-
- private:
- class SubCache {
- static const int kCacheSize = 512;
-
- explicit SubCache(Isolate* isolate, Type t);
-
- MUST_USE_RESULT inline MaybeObject* Get(double input);
-
- inline double Calculate(double input);
-
- struct Element {
- uint32_t in[2];
- Object* output;
- };
-
- union Converter {
- double dbl;
- uint32_t integers[2];
- };
-
- inline static int Hash(const Converter& c) {
- uint32_t hash = (c.integers[0] ^ c.integers[1]);
- hash ^= static_cast<int32_t>(hash) >> 16;
- hash ^= static_cast<int32_t>(hash) >> 8;
- return (hash & (kCacheSize - 1));
- }
-
- Element elements_[kCacheSize];
- Type type_;
- Isolate* isolate_;
-
- // Allow access to the caches_ array as an ExternalReference.
- friend class ExternalReference;
- // Inline implementation of the cache.
- friend class TranscendentalCacheStub;
- // For evaluating value.
- friend class TranscendentalCache;
-
- DISALLOW_COPY_AND_ASSIGN(SubCache);
- };
-
- explicit TranscendentalCache(Isolate* isolate) : isolate_(isolate) {
- for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
- }
-
- ~TranscendentalCache() {
- for (int i = 0; i < kNumberOfCaches; ++i) delete caches_[i];
- }
-
- // Used to create an external reference.
- inline Address cache_array_address();
-
- // Instantiation
- friend class Isolate;
- // Inline implementation of the caching.
- friend class TranscendentalCacheStub;
- // Allow access to the caches_ array as an ExternalReference.
- friend class ExternalReference;
-
- Isolate* isolate_;
- SubCache* caches_[kNumberOfCaches];
- DISALLOW_COPY_AND_ASSIGN(TranscendentalCache);
-};
-
-
// Abstract base class for checking whether a weak object should be retained.
class WeakObjectRetainer {
public:
diff --git a/deps/v8/src/hydrogen-check-elimination.cc b/deps/v8/src/hydrogen-check-elimination.cc
index f712a39db8..f15267349f 100644
--- a/deps/v8/src/hydrogen-check-elimination.cc
+++ b/deps/v8/src/hydrogen-check-elimination.cc
@@ -27,59 +27,338 @@
#include "hydrogen-check-elimination.h"
#include "hydrogen-alias-analysis.h"
+#include "hydrogen-flow-engine.h"
+
+#define GLOBAL 1
+
+// Only collect stats in debug mode.
+#if DEBUG
+#define INC_STAT(x) phase_->x++
+#else
+#define INC_STAT(x)
+#endif
+
+// For code de-uglification.
+#define TRACE(x) if (FLAG_trace_check_elimination) PrintF x
namespace v8 {
namespace internal {
-static const int kMaxTrackedObjects = 10;
typedef UniqueSet<Map>* MapSet;
-// The main datastructure used during check elimination, which stores a
+struct HCheckTableEntry {
+ HValue* object_; // The object being approximated. NULL => invalid entry.
+ HInstruction* check_; // The last check instruction.
+ MapSet maps_; // The set of known maps for the object.
+ bool is_stable_;
+};
+
+
+// The main data structure used during check elimination, which stores a
// set of known maps for each object.
-class HCheckTable {
+class HCheckTable : public ZoneObject {
public:
- explicit HCheckTable(Zone* zone) : zone_(zone) {
- Kill();
- redundant_ = 0;
- narrowed_ = 0;
- empty_ = 0;
- removed_ = 0;
- compares_true_ = 0;
- compares_false_ = 0;
- transitions_ = 0;
- loads_ = 0;
+ static const int kMaxTrackedObjects = 10;
+
+ explicit HCheckTable(HCheckEliminationPhase* phase)
+ : phase_(phase),
+ cursor_(0),
+ size_(0) {
+ }
+
+ // The main processing of instructions.
+ HCheckTable* Process(HInstruction* instr, Zone* zone) {
+ switch (instr->opcode()) {
+ case HValue::kCheckMaps: {
+ ReduceCheckMaps(HCheckMaps::cast(instr));
+ break;
+ }
+ case HValue::kCheckValue: {
+ ReduceCheckValue(HCheckValue::cast(instr));
+ break;
+ }
+ case HValue::kLoadNamedField: {
+ ReduceLoadNamedField(HLoadNamedField::cast(instr));
+ break;
+ }
+ case HValue::kStoreNamedField: {
+ ReduceStoreNamedField(HStoreNamedField::cast(instr));
+ break;
+ }
+ case HValue::kCompareMap: {
+ ReduceCompareMap(HCompareMap::cast(instr));
+ break;
+ }
+ case HValue::kTransitionElementsKind: {
+ ReduceTransitionElementsKind(
+ HTransitionElementsKind::cast(instr));
+ break;
+ }
+ case HValue::kCheckMapValue: {
+ ReduceCheckMapValue(HCheckMapValue::cast(instr));
+ break;
+ }
+ case HValue::kCheckHeapObject: {
+ ReduceCheckHeapObject(HCheckHeapObject::cast(instr));
+ break;
+ }
+ default: {
+ // If the instruction changes maps uncontrollably, drop everything.
+ if (instr->CheckChangesFlag(kOsrEntries)) {
+ Reset();
+ } else if (instr->CheckChangesFlag(kMaps)) {
+ KillUnstableEntries();
+ }
+ }
+ // Improvements possible:
+ // - eliminate redundant HCheckSmi, HCheckInstanceType instructions
+ // - track which values have been HCheckHeapObject'd
+ }
+
+ return this;
+ }
+
+ // Support for global analysis with HFlowEngine: Merge given state with
+ // the other incoming state.
+ static HCheckTable* Merge(HCheckTable* succ_state, HBasicBlock* succ_block,
+ HCheckTable* pred_state, HBasicBlock* pred_block,
+ Zone* zone) {
+ if (pred_state == NULL || pred_block->IsUnreachable()) {
+ return succ_state;
+ }
+ if (succ_state == NULL) {
+ return pred_state->Copy(succ_block, pred_block, zone);
+ } else {
+ return succ_state->Merge(succ_block, pred_state, pred_block, zone);
+ }
+ }
+
+ // Support for global analysis with HFlowEngine: Given state merged with all
+ // the other incoming states, prepare it for use.
+ static HCheckTable* Finish(HCheckTable* state, HBasicBlock* block,
+ Zone* zone) {
+ if (state == NULL) {
+ block->MarkUnreachable();
+ }
+ return state;
+ }
+
+ private:
+ // Copy state to successor block.
+ HCheckTable* Copy(HBasicBlock* succ, HBasicBlock* from_block, Zone* zone) {
+ HCheckTable* copy = new(phase_->zone()) HCheckTable(phase_);
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* old_entry = &entries_[i];
+ HCheckTableEntry* new_entry = &copy->entries_[i];
+ new_entry->object_ = old_entry->object_;
+ new_entry->maps_ = old_entry->maps_->Copy(phase_->zone());
+ new_entry->is_stable_ = old_entry->is_stable_;
+ // Keep the check if the existing check's block dominates the successor.
+ if (old_entry->check_ != NULL &&
+ old_entry->check_->block()->Dominates(succ)) {
+ new_entry->check_ = old_entry->check_;
+ } else {
+ // Leave it NULL till we meet a new check instruction for this object
+ // in the control flow.
+ new_entry->check_ = NULL;
+ }
+ }
+ copy->cursor_ = cursor_;
+ copy->size_ = size_;
+
+ // Create entries for succ block's phis.
+ if (!succ->IsLoopHeader() && succ->phis()->length() > 0) {
+ int pred_index = succ->PredecessorIndexOf(from_block);
+ for (int phi_index = 0;
+ phi_index < succ->phis()->length();
+ ++phi_index) {
+ HPhi* phi = succ->phis()->at(phi_index);
+ HValue* phi_operand = phi->OperandAt(pred_index);
+
+ HCheckTableEntry* pred_entry = copy->Find(phi_operand);
+ if (pred_entry != NULL) {
+ // Create an entry for a phi in the table.
+ copy->Insert(phi, NULL, pred_entry->maps_->Copy(phase_->zone()),
+ pred_entry->is_stable_);
+ }
+ }
+ }
+
+ // Branch-sensitive analysis for certain comparisons may add more facts
+ // to the state for the successor on the true branch.
+ bool learned = false;
+ if (succ->predecessors()->length() == 1) {
+ HControlInstruction* end = succ->predecessors()->at(0)->end();
+ bool is_true_branch = end->SuccessorAt(0) == succ;
+ if (end->IsCompareMap()) {
+ HCompareMap* cmp = HCompareMap::cast(end);
+ HValue* object = cmp->value()->ActualValue();
+ HCheckTableEntry* entry = copy->Find(object);
+ if (is_true_branch) {
+ // Learn on the true branch of if(CompareMap(x)).
+ if (entry == NULL) {
+ copy->Insert(object, cmp, cmp->map(), cmp->is_stable());
+ } else {
+ MapSet list = new(phase_->zone()) UniqueSet<Map>();
+ list->Add(cmp->map(), phase_->zone());
+ entry->maps_ = list;
+ entry->check_ = cmp;
+ entry->is_stable_ = cmp->is_stable();
+ }
+ } else {
+ // Learn on the false branch of if(CompareMap(x)).
+ if (entry != NULL) {
+ entry->maps_->Remove(cmp->map());
+ }
+ }
+ learned = true;
+ } else if (is_true_branch && end->IsCompareObjectEqAndBranch()) {
+ // Learn on the true branch of if(CmpObjectEq(x, y)).
+ HCompareObjectEqAndBranch* cmp =
+ HCompareObjectEqAndBranch::cast(end);
+ HValue* left = cmp->left()->ActualValue();
+ HValue* right = cmp->right()->ActualValue();
+ HCheckTableEntry* le = copy->Find(left);
+ HCheckTableEntry* re = copy->Find(right);
+ if (le == NULL) {
+ if (re != NULL) {
+ copy->Insert(left, NULL, re->maps_->Copy(zone), re->is_stable_);
+ }
+ } else if (re == NULL) {
+ copy->Insert(right, NULL, le->maps_->Copy(zone), le->is_stable_);
+ } else {
+ MapSet intersect = le->maps_->Intersect(re->maps_, zone);
+ le->maps_ = intersect;
+ re->maps_ = intersect->Copy(zone);
+ }
+ learned = true;
+ }
+ // Learning on false branches requires storing negative facts.
+ }
+
+ if (FLAG_trace_check_elimination) {
+ PrintF("B%d checkmaps-table %s from B%d:\n",
+ succ->block_id(),
+ learned ? "learned" : "copied",
+ from_block->block_id());
+ copy->Print();
+ }
+
+ return copy;
+ }
+
+ // Merge this state with the other incoming state.
+ HCheckTable* Merge(HBasicBlock* succ, HCheckTable* that,
+ HBasicBlock* pred_block, Zone* zone) {
+ if (that->size_ == 0) {
+ // If the other state is empty, simply reset.
+ Reset();
+ } else {
+ int pred_index = succ->PredecessorIndexOf(pred_block);
+ bool compact = false;
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* this_entry = &entries_[i];
+ HCheckTableEntry* that_entry;
+ if (this_entry->object_->IsPhi() &&
+ this_entry->object_->block() == succ) {
+ HPhi* phi = HPhi::cast(this_entry->object_);
+ HValue* phi_operand = phi->OperandAt(pred_index);
+ that_entry = that->Find(phi_operand);
+
+ } else {
+ that_entry = that->Find(this_entry->object_);
+ }
+
+ if (that_entry == NULL) {
+ this_entry->object_ = NULL;
+ compact = true;
+ } else {
+ this_entry->maps_ =
+ this_entry->maps_->Union(that_entry->maps_, phase_->zone());
+ this_entry->is_stable_ =
+ this_entry->is_stable_ && that_entry->is_stable_;
+ if (this_entry->check_ != that_entry->check_) {
+ this_entry->check_ = NULL;
+ }
+ ASSERT(this_entry->maps_->size() > 0);
+ }
+ }
+ if (compact) Compact();
+ }
+
+ if (FLAG_trace_check_elimination) {
+ PrintF("B%d checkmaps-table merged with B%d table:\n",
+ succ->block_id(), pred_block->block_id());
+ Print();
+ }
+ return this;
}
void ReduceCheckMaps(HCheckMaps* instr) {
HValue* object = instr->value()->ActualValue();
- int index = Find(object);
- if (index >= 0) {
+ HCheckTableEntry* entry = Find(object);
+ if (entry != NULL) {
// entry found;
- MapSet a = known_maps_[index];
- MapSet i = instr->map_set().Copy(zone_);
+ MapSet a = entry->maps_;
+ MapSet i = instr->map_set().Copy(phase_->zone());
if (a->IsSubset(i)) {
// The first check is more strict; the second is redundant.
- if (checks_[index] != NULL) {
- instr->DeleteAndReplaceWith(checks_[index]);
- redundant_++;
+ if (entry->check_ != NULL) {
+ TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
+ instr->id(), instr->block()->block_id(), entry->check_->id()));
+ instr->DeleteAndReplaceWith(entry->check_);
+ INC_STAT(redundant_);
} else {
- instr->DeleteAndReplaceWith(instr->value());
- removed_++;
+ TRACE(("Marking redundant CheckMaps #%d at B%d as dead\n",
+ instr->id(), instr->block()->block_id()));
+ // Mark check as dead but leave it in the graph as a checkpoint for
+ // subsequent checks.
+ instr->SetFlag(HValue::kIsDead);
+ entry->check_ = instr;
+ INC_STAT(removed_);
}
return;
}
- i = i->Intersect(a, zone_);
- if (i->size() == 0) {
+ MapSet intersection = i->Intersect(a, phase_->zone());
+ if (intersection->size() == 0) {
// Intersection is empty; probably megamorphic, which is likely to
// deopt anyway, so just leave things as they are.
- empty_++;
+ INC_STAT(empty_);
} else {
- // TODO(titzer): replace the first check with a more strict check.
- narrowed_++;
+ // Update set of maps in the entry.
+ entry->maps_ = intersection;
+ if (intersection->size() != i->size()) {
+ // Narrow set of maps in the second check maps instruction.
+ HGraph* graph = instr->block()->graph();
+ if (entry->check_ != NULL &&
+ entry->check_->block() == instr->block() &&
+ entry->check_->IsCheckMaps()) {
+ // There is a check in the same block so replace it with a more
+ // strict check and eliminate the second check entirely.
+ HCheckMaps* check = HCheckMaps::cast(entry->check_);
+ TRACE(("CheckMaps #%d at B%d narrowed\n", check->id(),
+ check->block()->block_id()));
+ check->set_map_set(intersection, graph->zone());
+ TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
+ instr->id(), instr->block()->block_id(), entry->check_->id()));
+ instr->DeleteAndReplaceWith(entry->check_);
+ } else {
+ TRACE(("CheckMaps #%d at B%d narrowed\n", instr->id(),
+ instr->block()->block_id()));
+ instr->set_map_set(intersection, graph->zone());
+ entry->check_ = instr;
+ }
+
+ if (FLAG_trace_check_elimination) {
+ Print();
+ }
+ INC_STAT(narrowed_);
+ }
}
} else {
// No entry; insert a new one.
- Insert(object, instr, instr->map_set().Copy(zone_));
+ Insert(object, instr, instr->map_set().Copy(phase_->zone()),
+ instr->is_stable());
}
}
@@ -88,10 +367,10 @@ class HCheckTable {
HValue* value = instr->Canonicalize();
if (value == NULL) {
instr->DeleteAndReplaceWith(instr->value());
- removed_++;
+ INC_STAT(removed_);
} else if (value != instr) {
instr->DeleteAndReplaceWith(value);
- redundant_++;
+ INC_STAT(redundant_);
}
}
@@ -107,7 +386,7 @@ class HCheckTable {
HConstant* constant = HConstant::CreateAndInsertBefore(
instr->block()->graph()->zone(), map, true, instr);
instr->DeleteAndReplaceWith(constant);
- loads_++;
+ INC_STAT(loads_);
}
void ReduceCheckMapValue(HCheckMapValue* instr) {
@@ -116,22 +395,41 @@ class HCheckTable {
HValue* object = instr->value()->ActualValue();
// Match a HCheckMapValue(object, HConstant(map))
Unique<Map> map = MapConstant(instr->map());
- MapSet maps = FindMaps(object);
- if (maps != NULL) {
+
+ HCheckTableEntry* entry = Find(object);
+ if (entry != NULL) {
+ MapSet maps = entry->maps_;
if (maps->Contains(map)) {
if (maps->size() == 1) {
// Object is known to have exactly this map.
- instr->DeleteAndReplaceWith(NULL);
- removed_++;
+ if (entry->check_ != NULL) {
+ instr->DeleteAndReplaceWith(entry->check_);
+ } else {
+ // Mark check as dead but leave it in the graph as a checkpoint for
+ // subsequent checks.
+ instr->SetFlag(HValue::kIsDead);
+ entry->check_ = instr;
+ }
+ INC_STAT(removed_);
} else {
// Only one map survives the check.
maps->Clear();
- maps->Add(map, zone_);
+ maps->Add(map, phase_->zone());
+ entry->check_ = instr;
}
}
} else {
// No prior information.
- Insert(object, map);
+ // TODO(verwaest): Tag map constants with stability.
+ Insert(object, instr, map, false);
+ }
+ }
+
+ void ReduceCheckHeapObject(HCheckHeapObject* instr) {
+ if (FindMaps(instr->value()->ActualValue()) != NULL) {
+ // If the object has known maps, it's definitely a heap object.
+ instr->DeleteAndReplaceWith(instr->value());
+ INC_STAT(removed_cho_);
}
}
@@ -140,29 +438,46 @@ class HCheckTable {
if (instr->has_transition()) {
// This store transitions the object to a new map.
Kill(object);
- Insert(object, MapConstant(instr->transition()));
+ Insert(object, NULL, MapConstant(instr->transition()),
+ instr->is_stable());
} else if (IsMapAccess(instr->access())) {
// This is a store directly to the map field of the object.
Kill(object);
if (!instr->value()->IsConstant()) return;
- Insert(object, MapConstant(instr->value()));
- } else if (instr->CheckGVNFlag(kChangesMaps)) {
- // This store indirectly changes the map of the object.
- Kill(instr->object());
- UNREACHABLE();
+ // TODO(verwaest): Tag with stability.
+ Insert(object, NULL, MapConstant(instr->value()), false);
+ } else {
+ // If the instruction changes maps, it should be handled above.
+ CHECK(!instr->CheckChangesFlag(kMaps));
}
}
void ReduceCompareMap(HCompareMap* instr) {
MapSet maps = FindMaps(instr->value()->ActualValue());
if (maps == NULL) return;
+
+ int succ;
if (maps->Contains(instr->map())) {
- // TODO(titzer): replace with goto true branch
- if (maps->size() == 1) compares_true_++;
+ if (maps->size() != 1) {
+ TRACE(("CompareMap #%d for #%d at B%d can't be eliminated: "
+ "ambiguous set of maps\n", instr->id(), instr->value()->id(),
+ instr->block()->block_id()));
+ return;
+ }
+ succ = 0;
+ INC_STAT(compares_true_);
} else {
- // TODO(titzer): replace with goto false branch
- compares_false_++;
+ succ = 1;
+ INC_STAT(compares_false_);
}
+
+ TRACE(("Marking redundant CompareMap #%d for #%d at B%d as %s\n",
+ instr->id(), instr->value()->id(), instr->block()->block_id(),
+ succ == 0 ? "true" : "false"));
+ instr->set_known_successor_index(succ);
+
+ int unreachable_succ = 1 - succ;
+ instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
}
void ReduceTransitionElementsKind(HTransitionElementsKind* instr) {
@@ -172,36 +487,91 @@ class HCheckTable {
if (maps->Contains(instr->original_map())) {
// If the object has the original map, it will be transitioned.
maps->Remove(instr->original_map());
- maps->Add(instr->transitioned_map(), zone_);
+ maps->Add(instr->transitioned_map(), phase_->zone());
} else {
// Object does not have the given map, thus the transition is redundant.
instr->DeleteAndReplaceWith(instr->object());
- transitions_++;
+ INC_STAT(transitions_);
}
}
+ // Reset the table.
+ void Reset() {
+ size_ = 0;
+ cursor_ = 0;
+ }
+
// Kill everything in the table.
- void Kill() {
- memset(objects_, 0, sizeof(objects_));
+ void KillUnstableEntries() {
+ bool compact = false;
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* entry = &entries_[i];
+ ASSERT(entry->object_ != NULL);
+ if (!entry->is_stable_) {
+ entry->object_ = NULL;
+ compact = true;
+ }
+ }
+ if (compact) Compact();
}
// Kill everything in the table that may alias {object}.
void Kill(HValue* object) {
- for (int i = 0; i < kMaxTrackedObjects; i++) {
- if (objects_[i] == NULL) continue;
- if (aliasing_.MayAlias(objects_[i], object)) objects_[i] = NULL;
+ bool compact = false;
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* entry = &entries_[i];
+ ASSERT(entry->object_ != NULL);
+ if (phase_->aliasing_->MayAlias(entry->object_, object)) {
+ entry->object_ = NULL;
+ compact = true;
+ }
}
- ASSERT(Find(object) < 0);
+ if (compact) Compact();
+ ASSERT(Find(object) == NULL);
+ }
+
+ void Compact() {
+ // First, compact the array in place.
+ int max = size_, dest = 0, old_cursor = cursor_;
+ for (int i = 0; i < max; i++) {
+ if (entries_[i].object_ != NULL) {
+ if (dest != i) entries_[dest] = entries_[i];
+ dest++;
+ } else {
+ if (i < old_cursor) cursor_--;
+ size_--;
+ }
+ }
+ ASSERT(size_ == dest);
+ ASSERT(cursor_ <= size_);
+
+ // Preserve the age of the entries by moving the older entries to the end.
+ if (cursor_ == size_) return; // Cursor already points at end.
+ if (cursor_ != 0) {
+ // | L = oldest | R = newest | |
+ // ^ cursor ^ size ^ MAX
+ HCheckTableEntry tmp_entries[kMaxTrackedObjects];
+ int L = cursor_;
+ int R = size_ - cursor_;
+
+ OS::MemMove(&tmp_entries[0], &entries_[0], L * sizeof(HCheckTableEntry));
+ OS::MemMove(&entries_[0], &entries_[L], R * sizeof(HCheckTableEntry));
+ OS::MemMove(&entries_[R], &tmp_entries[0], L * sizeof(HCheckTableEntry));
+ }
+
+ cursor_ = size_; // Move cursor to end.
}
void Print() {
- for (int i = 0; i < kMaxTrackedObjects; i++) {
- if (objects_[i] == NULL) continue;
- PrintF(" checkmaps-table @%d: object #%d ", i, objects_[i]->id());
- if (checks_[i] != NULL) {
- PrintF("check #%d ", checks_[i]->id());
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* entry = &entries_[i];
+ ASSERT(entry->object_ != NULL);
+ PrintF(" checkmaps-table @%d: %s #%d ", i,
+ entry->object_->IsPhi() ? "phi" : "object", entry->object_->id());
+ if (entry->check_ != NULL) {
+ PrintF("check #%d ", entry->check_->id());
}
- MapSet list = known_maps_[i];
+ MapSet list = entry->maps_;
PrintF("%d maps { ", list->size());
for (int j = 0; j < list->size(); j++) {
if (j > 0) PrintF(", ");
@@ -211,47 +581,42 @@ class HCheckTable {
}
}
- void PrintStats() {
- if (redundant_ > 0) PrintF(" redundant = %2d\n", redundant_);
- if (removed_ > 0) PrintF(" removed = %2d\n", removed_);
- if (narrowed_ > 0) PrintF(" narrowed = %2d\n", narrowed_);
- if (loads_ > 0) PrintF(" loads = %2d\n", loads_);
- if (empty_ > 0) PrintF(" empty = %2d\n", empty_);
- if (compares_true_ > 0) PrintF(" cmp_true = %2d\n", compares_true_);
- if (compares_false_ > 0) PrintF(" cmp_false = %2d\n", compares_false_);
- if (transitions_ > 0) PrintF(" transitions = %2d\n", transitions_);
- }
-
- private:
- int Find(HValue* object) {
- for (int i = 0; i < kMaxTrackedObjects; i++) {
- if (objects_[i] == NULL) continue;
- if (aliasing_.MustAlias(objects_[i], object)) return i;
+ HCheckTableEntry* Find(HValue* object) {
+ for (int i = size_ - 1; i >= 0; i--) {
+ // Search from most-recently-inserted to least-recently-inserted.
+ HCheckTableEntry* entry = &entries_[i];
+ ASSERT(entry->object_ != NULL);
+ if (phase_->aliasing_->MustAlias(entry->object_, object)) return entry;
}
- return -1;
+ return NULL;
}
MapSet FindMaps(HValue* object) {
- int index = Find(object);
- return index < 0 ? NULL : known_maps_[index];
+ HCheckTableEntry* entry = Find(object);
+ return entry == NULL ? NULL : entry->maps_;
}
- void Insert(HValue* object, Unique<Map> map) {
- MapSet list = new(zone_) UniqueSet<Map>();
- list->Add(map, zone_);
- Insert(object, NULL, list);
+ void Insert(HValue* object,
+ HInstruction* check,
+ Unique<Map> map,
+ bool is_stable) {
+ MapSet list = new(phase_->zone()) UniqueSet<Map>();
+ list->Add(map, phase_->zone());
+ Insert(object, check, list, is_stable);
}
- void Insert(HValue* object, HCheckMaps* check, MapSet maps) {
- for (int i = 0; i < kMaxTrackedObjects; i++) {
- // TODO(titzer): drop old entries instead of disallowing new ones.
- if (objects_[i] == NULL) {
- objects_[i] = object;
- checks_[i] = check;
- known_maps_[i] = maps;
- return;
- }
- }
+ void Insert(HValue* object,
+ HInstruction* check,
+ MapSet maps,
+ bool is_stable) {
+ HCheckTableEntry* entry = &entries_[cursor_++];
+ entry->object_ = object;
+ entry->check_ = check;
+ entry->maps_ = maps;
+ entry->is_stable_ = is_stable;
+ // If the table becomes full, wrap around and overwrite older entries.
+ if (cursor_ == kMaxTrackedObjects) cursor_ = 0;
+ if (size_ < kMaxTrackedObjects) size_++;
}
bool IsMapAccess(HObjectAccess access) {
@@ -262,96 +627,109 @@ class HCheckTable {
return Unique<Map>::cast(HConstant::cast(value)->GetUnique());
}
- Zone* zone_;
- HValue* objects_[kMaxTrackedObjects];
- HValue* checks_[kMaxTrackedObjects];
- MapSet known_maps_[kMaxTrackedObjects];
- HAliasAnalyzer aliasing_;
- int redundant_;
- int removed_;
- int narrowed_;
- int loads_;
- int empty_;
- int compares_true_;
- int compares_false_;
- int transitions_;
+ friend class HCheckMapsEffects;
+ friend class HCheckEliminationPhase;
+
+ HCheckEliminationPhase* phase_;
+ HCheckTableEntry entries_[kMaxTrackedObjects];
+ int16_t cursor_; // Must be <= kMaxTrackedObjects
+ int16_t size_; // Must be <= kMaxTrackedObjects
+ // TODO(titzer): STATIC_ASSERT kMaxTrackedObjects < max(cursor_)
};
-void HCheckEliminationPhase::Run() {
- for (int i = 0; i < graph()->blocks()->length(); i++) {
- EliminateLocalChecks(graph()->blocks()->at(i));
+// Collects instructions that can cause effects that invalidate information
+// needed for check elimination.
+class HCheckMapsEffects : public ZoneObject {
+ public:
+ explicit HCheckMapsEffects(Zone* zone)
+ : stores_(5, zone) { }
+
+ inline bool Disabled() {
+ return false; // Effects are _not_ disabled.
}
-}
+ // Process a possibly side-effecting instruction.
+ void Process(HInstruction* instr, Zone* zone) {
+ if (instr->IsStoreNamedField()) {
+ stores_.Add(HStoreNamedField::cast(instr), zone);
+ } else {
+ flags_.Add(instr->ChangesFlags());
+ }
+ }
-// For code de-uglification.
-#define TRACE(x) if (FLAG_trace_check_elimination) PrintF x
+ // Apply these effects to the given check elimination table.
+ void Apply(HCheckTable* table) {
+ if (flags_.Contains(kOsrEntries)) {
+ table->Reset();
+ return;
+ }
+ if (flags_.Contains(kMaps) || flags_.Contains(kElementsKind)) {
+ // Uncontrollable map modifications; kill everything.
+ table->KillUnstableEntries();
+ return;
+ }
+ // Kill maps for each store contained in these effects.
+ for (int i = 0; i < stores_.length(); i++) {
+ HStoreNamedField* s = stores_[i];
+ if (table->IsMapAccess(s->access()) || s->has_transition()) {
+ table->Kill(s->object()->ActualValue());
+ }
+ }
+ }
-// Eliminate checks local to a block.
-void HCheckEliminationPhase::EliminateLocalChecks(HBasicBlock* block) {
- HCheckTable table(zone());
- TRACE(("-- check-elim B%d ------------------------------------------------\n",
- block->block_id()));
+ // Union these effects with the other effects.
+ void Union(HCheckMapsEffects* that, Zone* zone) {
+ flags_.Add(that->flags_);
+ for (int i = 0; i < that->stores_.length(); i++) {
+ stores_.Add(that->stores_[i], zone);
+ }
+ }
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- bool changed = false;
- HInstruction* instr = it.Current();
+ private:
+ GVNFlagSet flags_;
+ ZoneList<HStoreNamedField*> stores_;
+};
- switch (instr->opcode()) {
- case HValue::kCheckMaps: {
- table.ReduceCheckMaps(HCheckMaps::cast(instr));
- changed = true;
- break;
- }
- case HValue::kCheckValue: {
- table.ReduceCheckValue(HCheckValue::cast(instr));
- changed = true;
- break;
- }
- case HValue::kLoadNamedField: {
- table.ReduceLoadNamedField(HLoadNamedField::cast(instr));
- changed = true;
- break;
- }
- case HValue::kStoreNamedField: {
- table.ReduceStoreNamedField(HStoreNamedField::cast(instr));
- changed = true;
- break;
- }
- case HValue::kCompareMap: {
- table.ReduceCompareMap(HCompareMap::cast(instr));
- changed = true;
- break;
- }
- case HValue::kTransitionElementsKind: {
- table.ReduceTransitionElementsKind(
- HTransitionElementsKind::cast(instr));
- changed = true;
- break;
- }
- case HValue::kCheckMapValue: {
- table.ReduceCheckMapValue(HCheckMapValue::cast(instr));
- changed = true;
- break;
- }
- default: {
- // If the instruction changes maps uncontrollably, kill the whole town.
- if (instr->CheckGVNFlag(kChangesMaps)) {
- table.Kill();
- changed = true;
- }
- }
- // Improvements possible:
- // - eliminate HCheckSmi and HCheckHeapObject
- }
- if (changed && FLAG_trace_check_elimination) table.Print();
+// The main routine of the analysis phase. Use the HFlowEngine for either a
+// local or a global analysis.
+void HCheckEliminationPhase::Run() {
+ HFlowEngine<HCheckTable, HCheckMapsEffects> engine(graph(), zone());
+ HCheckTable* table = new(zone()) HCheckTable(this);
+
+ if (GLOBAL) {
+ // Perform a global analysis.
+ engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table);
+ } else {
+ // Perform only local analysis.
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ table->Reset();
+ engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
+ }
}
- if (FLAG_trace_check_elimination) table.PrintStats();
+ if (FLAG_trace_check_elimination) PrintStats();
}
+// Are we eliminated yet?
+void HCheckEliminationPhase::PrintStats() {
+#if DEBUG
+ #define PRINT_STAT(x) if (x##_ > 0) PrintF(" %-16s = %2d\n", #x, x##_)
+#else
+ #define PRINT_STAT(x)
+#endif
+ PRINT_STAT(redundant);
+ PRINT_STAT(removed);
+ PRINT_STAT(removed_cho);
+ PRINT_STAT(narrowed);
+ PRINT_STAT(loads);
+ PRINT_STAT(empty);
+ PRINT_STAT(compares_true);
+ PRINT_STAT(compares_false);
+ PRINT_STAT(transitions);
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-check-elimination.h b/deps/v8/src/hydrogen-check-elimination.h
index fa01964f6f..b429b17462 100644
--- a/deps/v8/src/hydrogen-check-elimination.h
+++ b/deps/v8/src/hydrogen-check-elimination.h
@@ -29,6 +29,7 @@
#define V8_HYDROGEN_CHECK_ELIMINATION_H_
#include "hydrogen.h"
+#include "hydrogen-alias-analysis.h"
namespace v8 {
namespace internal {
@@ -38,12 +39,39 @@ namespace internal {
class HCheckEliminationPhase : public HPhase {
public:
explicit HCheckEliminationPhase(HGraph* graph)
- : HPhase("H_Check Elimination", graph) { }
+ : HPhase("H_Check Elimination", graph), aliasing_() {
+#ifdef DEBUG
+ redundant_ = 0;
+ removed_ = 0;
+ removed_cho_ = 0;
+ narrowed_ = 0;
+ loads_ = 0;
+ empty_ = 0;
+ compares_true_ = 0;
+ compares_false_ = 0;
+ transitions_ = 0;
+#endif
+ }
void Run();
+ friend class HCheckTable;
+
private:
- void EliminateLocalChecks(HBasicBlock* block);
+ void PrintStats();
+
+ HAliasAnalyzer* aliasing_;
+#ifdef DEBUG
+ int redundant_;
+ int removed_;
+ int removed_cho_;
+ int narrowed_;
+ int loads_;
+ int empty_;
+ int compares_true_;
+ int compares_false_;
+ int transitions_;
+#endif
};
diff --git a/deps/v8/src/hydrogen-dce.cc b/deps/v8/src/hydrogen-dce.cc
index e101ee5bcc..3a2eac40e5 100644
--- a/deps/v8/src/hydrogen-dce.cc
+++ b/deps/v8/src/hydrogen-dce.cc
@@ -64,7 +64,7 @@ void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) {
}
stream.Add(" -> ");
instr->PrintTo(&stream);
- PrintF("[MarkLive %s]\n", *stream.ToCString());
+ PrintF("[MarkLive %s]\n", stream.ToCString().get());
}
diff --git a/deps/v8/src/hydrogen-environment-liveness.cc b/deps/v8/src/hydrogen-environment-liveness.cc
index fad9755e5c..d7501ac49e 100644
--- a/deps/v8/src/hydrogen-environment-liveness.cc
+++ b/deps/v8/src/hydrogen-environment-liveness.cc
@@ -201,7 +201,7 @@ void HEnvironmentLivenessAnalysisPhase::Run() {
HBasicBlock* block = graph()->blocks()->at(block_id);
UpdateLivenessAtBlockEnd(block, &live);
- for (HInstruction* instr = block->last(); instr != NULL;
+ for (HInstruction* instr = block->end(); instr != NULL;
instr = instr->previous()) {
UpdateLivenessAtInstruction(instr, &live);
}
diff --git a/deps/v8/src/hydrogen-flow-engine.h b/deps/v8/src/hydrogen-flow-engine.h
index dfe43ec6c3..99a2f841a7 100644
--- a/deps/v8/src/hydrogen-flow-engine.h
+++ b/deps/v8/src/hydrogen-flow-engine.h
@@ -122,31 +122,37 @@ class HFlowEngine {
// Skip blocks not dominated by the root node.
if (SkipNonDominatedBlock(root, block)) continue;
- State* state = StateAt(block);
+ State* state = State::Finish(StateAt(block), block, zone_);
- if (block->IsLoopHeader()) {
- // Apply loop effects before analyzing loop body.
- ComputeLoopEffects(block)->Apply(state);
- } else {
- // Must have visited all predecessors before this block.
- CheckPredecessorCount(block);
- }
+ if (block->IsReachable()) {
+ ASSERT(state != NULL);
+ if (block->IsLoopHeader()) {
+ // Apply loop effects before analyzing loop body.
+ ComputeLoopEffects(block)->Apply(state);
+ } else {
+ // Must have visited all predecessors before this block.
+ CheckPredecessorCount(block);
+ }
- // Go through all instructions of the current block, updating the state.
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- state = state->Process(it.Current(), zone_);
+ // Go through all instructions of the current block, updating the state.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ state = state->Process(it.Current(), zone_);
+ }
}
// Propagate the block state forward to all successor blocks.
- for (int i = 0; i < block->end()->SuccessorCount(); i++) {
+ int max = block->end()->SuccessorCount();
+ for (int i = 0; i < max; i++) {
HBasicBlock* succ = block->end()->SuccessorAt(i);
IncrementPredecessorCount(succ);
- if (StateAt(succ) == NULL) {
- // This is the first state to reach the successor.
- SetStateAt(succ, state->Copy(succ, zone_));
+
+ if (max == 1 && succ->predecessors()->length() == 1) {
+ // Optimization: successor can inherit this state.
+ SetStateAt(succ, state);
} else {
// Merge the current state with the state already at the successor.
- SetStateAt(succ, state->Merge(succ, StateAt(succ), zone_));
+ SetStateAt(succ,
+ State::Merge(StateAt(succ), succ, state, block, zone_));
}
}
}
@@ -178,6 +184,7 @@ class HFlowEngine {
i = member->loop_information()->GetLastBackEdge()->block_id();
} else {
// Process all the effects of the block.
+ if (member->IsUnreachable()) continue;
ASSERT(member->current_loop() == loop);
for (HInstructionIterator it(member); !it.Done(); it.Advance()) {
effects->Process(it.Current(), zone_);
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
index e3bf316f37..6bf5a1b68e 100644
--- a/deps/v8/src/hydrogen-gvn.cc
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -32,39 +32,39 @@
namespace v8 {
namespace internal {
-class HValueMap: public ZoneObject {
+class HInstructionMap V8_FINAL : public ZoneObject {
public:
- explicit HValueMap(Zone* zone)
+ HInstructionMap(Zone* zone, SideEffectsTracker* side_effects_tracker)
: array_size_(0),
lists_size_(0),
count_(0),
- present_flags_(0),
array_(NULL),
lists_(NULL),
- free_list_head_(kNil) {
+ free_list_head_(kNil),
+ side_effects_tracker_(side_effects_tracker) {
ResizeLists(kInitialSize, zone);
Resize(kInitialSize, zone);
}
- void Kill(GVNFlagSet flags);
+ void Kill(SideEffects side_effects);
- void Add(HValue* value, Zone* zone) {
- present_flags_.Add(value->gvn_flags());
- Insert(value, zone);
+ void Add(HInstruction* instr, Zone* zone) {
+ present_depends_on_.Add(side_effects_tracker_->ComputeDependsOn(instr));
+ Insert(instr, zone);
}
- HValue* Lookup(HValue* value) const;
+ HInstruction* Lookup(HInstruction* instr) const;
- HValueMap* Copy(Zone* zone) const {
- return new(zone) HValueMap(zone, this);
+ HInstructionMap* Copy(Zone* zone) const {
+ return new(zone) HInstructionMap(zone, this);
}
bool IsEmpty() const { return count_ == 0; }
private:
- // A linked list of HValue* values. Stored in arrays.
- struct HValueMapListElement {
- HValue* value;
+ // A linked list of HInstruction* values. Stored in arrays.
+ struct HInstructionMapListElement {
+ HInstruction* instr;
int next; // Index in the array of the next list element.
};
static const int kNil = -1; // The end of a linked list
@@ -72,34 +72,36 @@ class HValueMap: public ZoneObject {
// Must be a power of 2.
static const int kInitialSize = 16;
- HValueMap(Zone* zone, const HValueMap* other);
+ HInstructionMap(Zone* zone, const HInstructionMap* other);
void Resize(int new_size, Zone* zone);
void ResizeLists(int new_size, Zone* zone);
- void Insert(HValue* value, Zone* zone);
+ void Insert(HInstruction* instr, Zone* zone);
uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
int array_size_;
int lists_size_;
- int count_; // The number of values stored in the HValueMap.
- GVNFlagSet present_flags_; // All flags that are in any value in the
- // HValueMap.
- HValueMapListElement* array_; // Primary store - contains the first value
+ int count_; // The number of values stored in the HInstructionMap.
+ SideEffects present_depends_on_;
+ HInstructionMapListElement* array_;
+ // Primary store - contains the first value
// with a given hash. Colliding elements are stored in linked lists.
- HValueMapListElement* lists_; // The linked lists containing hash collisions.
+ HInstructionMapListElement* lists_;
+ // The linked lists containing hash collisions.
int free_list_head_; // Unused elements in lists_ are on the free list.
+ SideEffectsTracker* side_effects_tracker_;
};
-class HSideEffectMap BASE_EMBEDDED {
+class HSideEffectMap V8_FINAL BASE_EMBEDDED {
public:
HSideEffectMap();
explicit HSideEffectMap(HSideEffectMap* other);
HSideEffectMap& operator= (const HSideEffectMap& other);
- void Kill(GVNFlagSet flags);
+ void Kill(SideEffects side_effects);
- void Store(GVNFlagSet flags, HInstruction* instr);
+ void Store(SideEffects side_effects, HInstruction* instr);
bool IsEmpty() const { return count_ == 0; }
@@ -152,35 +154,36 @@ void TraceGVN(const char* msg, ...) {
}
-HValueMap::HValueMap(Zone* zone, const HValueMap* other)
+HInstructionMap::HInstructionMap(Zone* zone, const HInstructionMap* other)
: array_size_(other->array_size_),
lists_size_(other->lists_size_),
count_(other->count_),
- present_flags_(other->present_flags_),
- array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
- lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
- free_list_head_(other->free_list_head_) {
+ present_depends_on_(other->present_depends_on_),
+ array_(zone->NewArray<HInstructionMapListElement>(other->array_size_)),
+ lists_(zone->NewArray<HInstructionMapListElement>(other->lists_size_)),
+ free_list_head_(other->free_list_head_),
+ side_effects_tracker_(other->side_effects_tracker_) {
OS::MemCopy(
- array_, other->array_, array_size_ * sizeof(HValueMapListElement));
+ array_, other->array_, array_size_ * sizeof(HInstructionMapListElement));
OS::MemCopy(
- lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
+ lists_, other->lists_, lists_size_ * sizeof(HInstructionMapListElement));
}
-void HValueMap::Kill(GVNFlagSet flags) {
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
- if (!present_flags_.ContainsAnyOf(depends_flags)) return;
- present_flags_.RemoveAll();
+void HInstructionMap::Kill(SideEffects changes) {
+ if (!present_depends_on_.ContainsAnyOf(changes)) return;
+ present_depends_on_.RemoveAll();
for (int i = 0; i < array_size_; ++i) {
- HValue* value = array_[i].value;
- if (value != NULL) {
+ HInstruction* instr = array_[i].instr;
+ if (instr != NULL) {
// Clear list of collisions first, so we know if it becomes empty.
int kept = kNil; // List of kept elements.
int next;
for (int current = array_[i].next; current != kNil; current = next) {
next = lists_[current].next;
- HValue* value = lists_[current].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
+ HInstruction* instr = lists_[current].instr;
+ SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
+ if (depends_on.ContainsAnyOf(changes)) {
// Drop it.
count_--;
lists_[current].next = free_list_head_;
@@ -189,40 +192,41 @@ void HValueMap::Kill(GVNFlagSet flags) {
// Keep it.
lists_[current].next = kept;
kept = current;
- present_flags_.Add(value->gvn_flags());
+ present_depends_on_.Add(depends_on);
}
}
array_[i].next = kept;
// Now possibly drop directly indexed element.
- value = array_[i].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) { // Drop it.
+ instr = array_[i].instr;
+ SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
+ if (depends_on.ContainsAnyOf(changes)) { // Drop it.
count_--;
int head = array_[i].next;
if (head == kNil) {
- array_[i].value = NULL;
+ array_[i].instr = NULL;
} else {
- array_[i].value = lists_[head].value;
+ array_[i].instr = lists_[head].instr;
array_[i].next = lists_[head].next;
lists_[head].next = free_list_head_;
free_list_head_ = head;
}
} else {
- present_flags_.Add(value->gvn_flags()); // Keep it.
+ present_depends_on_.Add(depends_on); // Keep it.
}
}
}
}
-HValue* HValueMap::Lookup(HValue* value) const {
- uint32_t hash = static_cast<uint32_t>(value->Hashcode());
+HInstruction* HInstructionMap::Lookup(HInstruction* instr) const {
+ uint32_t hash = static_cast<uint32_t>(instr->Hashcode());
uint32_t pos = Bound(hash);
- if (array_[pos].value != NULL) {
- if (array_[pos].value->Equals(value)) return array_[pos].value;
+ if (array_[pos].instr != NULL) {
+ if (array_[pos].instr->Equals(instr)) return array_[pos].instr;
int next = array_[pos].next;
while (next != kNil) {
- if (lists_[next].value->Equals(value)) return lists_[next].value;
+ if (lists_[next].instr->Equals(instr)) return lists_[next].instr;
next = lists_[next].next;
}
}
@@ -230,7 +234,7 @@ HValue* HValueMap::Lookup(HValue* value) const {
}
-void HValueMap::Resize(int new_size, Zone* zone) {
+void HInstructionMap::Resize(int new_size, Zone* zone) {
ASSERT(new_size > count_);
// Hashing the values into the new array has no more collisions than in the
// old hash map, so we can use the existing lists_ array, if we are careful.
@@ -240,33 +244,33 @@ void HValueMap::Resize(int new_size, Zone* zone) {
ResizeLists(lists_size_ << 1, zone);
}
- HValueMapListElement* new_array =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
+ HInstructionMapListElement* new_array =
+ zone->NewArray<HInstructionMapListElement>(new_size);
+ memset(new_array, 0, sizeof(HInstructionMapListElement) * new_size);
- HValueMapListElement* old_array = array_;
+ HInstructionMapListElement* old_array = array_;
int old_size = array_size_;
int old_count = count_;
count_ = 0;
- // Do not modify present_flags_. It is currently correct.
+ // Do not modify present_depends_on_. It is currently correct.
array_size_ = new_size;
array_ = new_array;
if (old_array != NULL) {
// Iterate over all the elements in lists, rehashing them.
for (int i = 0; i < old_size; ++i) {
- if (old_array[i].value != NULL) {
+ if (old_array[i].instr != NULL) {
int current = old_array[i].next;
while (current != kNil) {
- Insert(lists_[current].value, zone);
+ Insert(lists_[current].instr, zone);
int next = lists_[current].next;
lists_[current].next = free_list_head_;
free_list_head_ = current;
current = next;
}
- // Rehash the directly stored value.
- Insert(old_array[i].value, zone);
+ // Rehash the directly stored instruction.
+ Insert(old_array[i].instr, zone);
}
}
}
@@ -275,21 +279,22 @@ void HValueMap::Resize(int new_size, Zone* zone) {
}
-void HValueMap::ResizeLists(int new_size, Zone* zone) {
+void HInstructionMap::ResizeLists(int new_size, Zone* zone) {
ASSERT(new_size > lists_size_);
- HValueMapListElement* new_lists =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
+ HInstructionMapListElement* new_lists =
+ zone->NewArray<HInstructionMapListElement>(new_size);
+ memset(new_lists, 0, sizeof(HInstructionMapListElement) * new_size);
- HValueMapListElement* old_lists = lists_;
+ HInstructionMapListElement* old_lists = lists_;
int old_size = lists_size_;
lists_size_ = new_size;
lists_ = new_lists;
if (old_lists != NULL) {
- OS::MemCopy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
+ OS::MemCopy(
+ lists_, old_lists, old_size * sizeof(HInstructionMapListElement));
}
for (int i = old_size; i < lists_size_; ++i) {
lists_[i].next = free_list_head_;
@@ -298,15 +303,15 @@ void HValueMap::ResizeLists(int new_size, Zone* zone) {
}
-void HValueMap::Insert(HValue* value, Zone* zone) {
- ASSERT(value != NULL);
+void HInstructionMap::Insert(HInstruction* instr, Zone* zone) {
+ ASSERT(instr != NULL);
// Resizing when half of the hashtable is filled up.
if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
ASSERT(count_ < array_size_);
count_++;
- uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
- if (array_[pos].value == NULL) {
- array_[pos].value = value;
+ uint32_t pos = Bound(static_cast<uint32_t>(instr->Hashcode()));
+ if (array_[pos].instr == NULL) {
+ array_[pos].instr = instr;
array_[pos].next = kNil;
} else {
if (free_list_head_ == kNil) {
@@ -315,9 +320,9 @@ void HValueMap::Insert(HValue* value, Zone* zone) {
int new_element_pos = free_list_head_;
ASSERT(new_element_pos != kNil);
free_list_head_ = lists_[free_list_head_].next;
- lists_[new_element_pos].value = value;
+ lists_[new_element_pos].instr = instr;
lists_[new_element_pos].next = array_[pos].next;
- ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
+ ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL);
array_[pos].next = new_element_pos;
}
}
@@ -341,10 +346,9 @@ HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
}
-void HSideEffectMap::Kill(GVNFlagSet flags) {
+void HSideEffectMap::Kill(SideEffects side_effects) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
+ if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
if (data_[i] != NULL) count_--;
data_[i] = NULL;
}
@@ -352,10 +356,9 @@ void HSideEffectMap::Kill(GVNFlagSet flags) {
}
-void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
+void HSideEffectMap::Store(SideEffects side_effects, HInstruction* instr) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
+ if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
if (data_[i] == NULL) count_++;
data_[i] = instr;
}
@@ -363,45 +366,148 @@ void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
}
+SideEffects SideEffectsTracker::ComputeChanges(HInstruction* instr) {
+ SideEffects result(instr->ChangesFlags());
+ if (result.ContainsFlag(kInobjectFields)) {
+ int index;
+ if (instr->IsStoreNamedField() &&
+ ComputeInobjectField(HStoreNamedField::cast(instr)->access(), &index)) {
+ result.RemoveFlag(kInobjectFields);
+ result.AddSpecial(index);
+ } else {
+ result.AddAllSpecial();
+ }
+ }
+ return result;
+}
+
+
+SideEffects SideEffectsTracker::ComputeDependsOn(HInstruction* instr) {
+ SideEffects result(instr->DependsOnFlags());
+ if (result.ContainsFlag(kInobjectFields)) {
+ int index;
+ if (instr->IsLoadNamedField() &&
+ ComputeInobjectField(HLoadNamedField::cast(instr)->access(), &index)) {
+ result.RemoveFlag(kInobjectFields);
+ result.AddSpecial(index);
+ } else {
+ result.AddAllSpecial();
+ }
+ }
+ return result;
+}
+
+
+void SideEffectsTracker::PrintSideEffectsTo(StringStream* stream,
+ SideEffects side_effects) const {
+ const char* separator = "";
+ stream->Add("[");
+ for (int bit = 0; bit < kNumberOfFlags; ++bit) {
+ GVNFlag flag = GVNFlagFromInt(bit);
+ if (side_effects.ContainsFlag(flag)) {
+ stream->Add(separator);
+ separator = ", ";
+ switch (flag) {
+#define DECLARE_FLAG(Type) \
+ case k##Type: \
+ stream->Add(#Type); \
+ break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ default:
+ break;
+ }
+ }
+ }
+ for (int index = 0; index < num_inobject_fields_; ++index) {
+ if (side_effects.ContainsSpecial(index)) {
+ stream->Add(separator);
+ separator = ", ";
+ inobject_fields_[index].PrintTo(stream);
+ }
+ }
+ stream->Add("]");
+}
+
+
+bool SideEffectsTracker::ComputeInobjectField(HObjectAccess access,
+ int* index) {
+ for (int i = 0; i < num_inobject_fields_; ++i) {
+ if (access.Equals(inobject_fields_[i])) {
+ *index = i;
+ return true;
+ }
+ }
+ if (num_inobject_fields_ < SideEffects::kNumberOfSpecials) {
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Tracking inobject field access ");
+ access.PrintTo(&stream);
+ stream.Add(" (mapped to special index %d)\n", num_inobject_fields_);
+ stream.OutputToStdOut();
+ }
+ *index = num_inobject_fields_;
+ inobject_fields_[num_inobject_fields_++] = access;
+ return true;
+ }
+ return false;
+}
+
+
HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
- : HPhase("H_Global value numbering", graph),
- removed_side_effects_(false),
- block_side_effects_(graph->blocks()->length(), zone()),
- loop_side_effects_(graph->blocks()->length(), zone()),
- visited_on_paths_(graph->blocks()->length(), zone()) {
- ASSERT(!AllowHandleAllocation::IsAllowed());
- block_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
- zone());
- loop_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
- zone());
- }
-
-void HGlobalValueNumberingPhase::Analyze() {
- removed_side_effects_ = false;
- ComputeBlockSideEffects();
- if (FLAG_loop_invariant_code_motion) {
- LoopInvariantCodeMotion();
- }
- AnalyzeGraph();
+ : HPhase("H_Global value numbering", graph),
+ removed_side_effects_(false),
+ block_side_effects_(graph->blocks()->length(), zone()),
+ loop_side_effects_(graph->blocks()->length(), zone()),
+ visited_on_paths_(graph->blocks()->length(), zone()) {
+ ASSERT(!AllowHandleAllocation::IsAllowed());
+ block_side_effects_.AddBlock(
+ SideEffects(), graph->blocks()->length(), zone());
+ loop_side_effects_.AddBlock(
+ SideEffects(), graph->blocks()->length(), zone());
}
-void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
- // The Analyze phase of GVN can be called multiple times. Clear loop side
- // effects before computing them to erase the contents from previous Analyze
- // passes.
- for (int i = 0; i < loop_side_effects_.length(); ++i) {
- loop_side_effects_[i].RemoveAll();
+void HGlobalValueNumberingPhase::Run() {
+ ASSERT(!removed_side_effects_);
+ for (int i = FLAG_gvn_iterations; i > 0; --i) {
+ // Compute the side effects.
+ ComputeBlockSideEffects();
+
+ // Perform loop invariant code motion if requested.
+ if (FLAG_loop_invariant_code_motion) LoopInvariantCodeMotion();
+
+ // Perform the actual value numbering.
+ AnalyzeGraph();
+
+ // Continue GVN if we removed any side effects.
+ if (!removed_side_effects_) break;
+ removed_side_effects_ = false;
+
+ // Clear all side effects.
+ ASSERT_EQ(block_side_effects_.length(), graph()->blocks()->length());
+ ASSERT_EQ(loop_side_effects_.length(), graph()->blocks()->length());
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ block_side_effects_[i].RemoveAll();
+ loop_side_effects_[i].RemoveAll();
+ }
+ visited_on_paths_.Clear();
}
+}
+
+
+void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
// Compute side effects for the block.
HBasicBlock* block = graph()->blocks()->at(i);
- GVNFlagSet side_effects;
+ SideEffects side_effects;
if (block->IsReachable() && !block->IsDeoptimizing()) {
int id = block->block_id();
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- side_effects.Add(instr->ChangesFlags());
+ side_effects.Add(side_effects_tracker_.ComputeChanges(instr));
}
block_side_effects_[id].Add(side_effects);
@@ -412,120 +518,38 @@ void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
// Propagate loop side effects upwards.
if (block->HasParentLoopHeader()) {
- int header_id = block->parent_loop_header()->block_id();
- loop_side_effects_[header_id].Add(block->IsLoopHeader()
- ? loop_side_effects_[id]
- : side_effects);
+ HBasicBlock* with_parent = block;
+ if (block->IsLoopHeader()) side_effects = loop_side_effects_[id];
+ do {
+ HBasicBlock* parent_block = with_parent->parent_loop_header();
+ loop_side_effects_[parent_block->block_id()].Add(side_effects);
+ with_parent = parent_block;
+ } while (with_parent->HasParentLoopHeader());
}
}
}
}
-SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
- char underlying_buffer[kLastFlag * 128];
- Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
-#if DEBUG
- int offset = 0;
- const char* separator = "";
- const char* comma = ", ";
- buffer[0] = 0;
- uint32_t set_depends_on = 0;
- uint32_t set_changes = 0;
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if ((flags.ToIntegral() & (1 << bit)) != 0) {
- if (bit % 2 == 0) {
- set_changes++;
- } else {
- set_depends_on++;
- }
- }
- }
- bool positive_changes = set_changes < (kLastFlag / 2);
- bool positive_depends_on = set_depends_on < (kLastFlag / 2);
- if (set_changes > 0) {
- if (positive_changes) {
- offset += OS::SNPrintF(buffer + offset, "changes [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "changes all except [");
- }
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_changes) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kChanges##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
- if (set_depends_on > 0) {
- separator = "";
- if (set_changes > 0) {
- offset += OS::SNPrintF(buffer + offset, ", ");
- }
- if (positive_depends_on) {
- offset += OS::SNPrintF(buffer + offset, "depends on [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "depends on all except [");
- }
- for (int bit = 0; bit < kLastFlag; ++bit) {
- if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_depends_on) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kDependsOn##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
-#else
- OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
-#endif
- size_t string_len = strlen(underlying_buffer) + 1;
- ASSERT(string_len <= sizeof(underlying_buffer));
- char* result = new char[strlen(underlying_buffer) + 1];
- OS::MemCopy(result, underlying_buffer, string_len);
- return SmartArrayPointer<char>(result);
-}
-
-
void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
graph()->use_optimistic_licm() ? "yes" : "no");
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
HBasicBlock* block = graph()->blocks()->at(i);
if (block->IsLoopHeader()) {
- GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
- TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
- block->block_id(),
- *GetGVNFlagsString(side_effects));
-
- GVNFlagSet accumulated_first_time_depends;
- GVNFlagSet accumulated_first_time_changes;
+ SideEffects side_effects = loop_side_effects_[block->block_id()];
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Try loop invariant motion for block B%d changes ",
+ block->block_id());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, side_effects);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
HBasicBlock* last = block->loop_information()->GetLastBackEdge();
for (int j = block->block_id(); j <= last->block_id(); ++j) {
- ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects,
- &accumulated_first_time_depends,
- &accumulated_first_time_changes);
+ ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects);
}
}
}
@@ -535,25 +559,37 @@ void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
void HGlobalValueNumberingPhase::ProcessLoopBlock(
HBasicBlock* block,
HBasicBlock* loop_header,
- GVNFlagSet loop_kills,
- GVNFlagSet* first_time_depends,
- GVNFlagSet* first_time_changes) {
+ SideEffects loop_kills) {
HBasicBlock* pre_header = loop_header->predecessors()->at(0);
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
- TRACE_GVN_2("Loop invariant motion for B%d %s\n",
- block->block_id(),
- *GetGVNFlagsString(depends_flags));
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Loop invariant code motion for B%d depends on ",
+ block->block_id());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
HInstruction* instr = block->first();
while (instr != NULL) {
HInstruction* next = instr->next();
- bool hoisted = false;
if (instr->CheckFlag(HValue::kUseGVN)) {
- TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
- instr->id(),
- instr->Mnemonic(),
- *GetGVNFlagsString(instr->gvn_flags()),
- *GetGVNFlagsString(loop_kills));
- bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
+ SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
+ SideEffects depends_on = side_effects_tracker_.ComputeDependsOn(instr);
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Checking instruction i%d (%s) changes ",
+ instr->id(), instr->Mnemonic());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, changes);
+ stream.Add(", depends on ");
+ side_effects_tracker_.PrintSideEffectsTo(&stream, depends_on);
+ stream.Add(". Loop changes ");
+ side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
+ bool can_hoist = !depends_on.ContainsAnyOf(loop_kills);
if (can_hoist && !graph()->use_optimistic_licm()) {
can_hoist = block->IsLoopSuccessorDominator();
}
@@ -567,31 +603,15 @@ void HGlobalValueNumberingPhase::ProcessLoopBlock(
}
if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
- TRACE_GVN_1("Hoisting loop invariant instruction %d\n", instr->id());
+ TRACE_GVN_2("Hoisting loop invariant instruction i%d to block B%d\n",
+ instr->id(), pre_header->block_id());
// Move the instruction out of the loop.
instr->Unlink();
instr->InsertBefore(pre_header->end());
if (instr->HasSideEffects()) removed_side_effects_ = true;
- hoisted = true;
}
}
}
- if (!hoisted) {
- // If an instruction is not hoisted, we have to account for its side
- // effects when hoisting later HTransitionElementsKind instructions.
- GVNFlagSet previous_depends = *first_time_depends;
- GVNFlagSet previous_changes = *first_time_changes;
- first_time_depends->Add(instr->DependsOnFlags());
- first_time_changes->Add(instr->ChangesFlags());
- if (!(previous_depends == *first_time_depends)) {
- TRACE_GVN_1("Updated first-time accumulated %s\n",
- *GetGVNFlagsString(*first_time_depends));
- }
- if (!(previous_changes == *first_time_changes)) {
- TRACE_GVN_1("Updated first-time accumulated %s\n",
- *GetGVNFlagsString(*first_time_changes));
- }
- }
instr = next;
}
}
@@ -611,10 +631,10 @@ bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
}
-GVNFlagSet
+SideEffects
HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator, HBasicBlock* dominated) {
- GVNFlagSet side_effects;
+ SideEffects side_effects;
for (int i = 0; i < dominated->predecessors()->length(); ++i) {
HBasicBlock* block = dominated->predecessors()->at(i);
if (dominator->block_id() < block->block_id() &&
@@ -643,13 +663,13 @@ class GvnBasicBlockState: public ZoneObject {
public:
static GvnBasicBlockState* CreateEntry(Zone* zone,
HBasicBlock* entry_block,
- HValueMap* entry_map) {
+ HInstructionMap* entry_map) {
return new(zone)
GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
}
HBasicBlock* block() { return block_; }
- HValueMap* map() { return map_; }
+ HInstructionMap* map() { return map_; }
HSideEffectMap* dominators() { return &dominators_; }
GvnBasicBlockState* next_in_dominator_tree_traversal(
@@ -676,7 +696,7 @@ class GvnBasicBlockState: public ZoneObject {
private:
void Initialize(HBasicBlock* block,
- HValueMap* map,
+ HInstructionMap* map,
HSideEffectMap* dominators,
bool copy_map,
Zone* zone) {
@@ -692,7 +712,7 @@ class GvnBasicBlockState: public ZoneObject {
GvnBasicBlockState(GvnBasicBlockState* previous,
HBasicBlock* block,
- HValueMap* map,
+ HInstructionMap* map,
HSideEffectMap* dominators,
Zone* zone)
: previous_(previous), next_(NULL) {
@@ -739,7 +759,7 @@ class GvnBasicBlockState: public ZoneObject {
GvnBasicBlockState* previous_;
GvnBasicBlockState* next_;
HBasicBlock* block_;
- HValueMap* map_;
+ HInstructionMap* map_;
HSideEffectMap dominators_;
int dominated_index_;
int length_;
@@ -752,13 +772,14 @@ class GvnBasicBlockState: public ZoneObject {
// GvnBasicBlockState instances.
void HGlobalValueNumberingPhase::AnalyzeGraph() {
HBasicBlock* entry_block = graph()->entry_block();
- HValueMap* entry_map = new(zone()) HValueMap(zone());
+ HInstructionMap* entry_map =
+ new(zone()) HInstructionMap(zone(), &side_effects_tracker_);
GvnBasicBlockState* current =
GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
while (current != NULL) {
HBasicBlock* block = current->block();
- HValueMap* map = current->map();
+ HInstructionMap* map = current->map();
HSideEffectMap* dominators = current->dominators();
TRACE_GVN_2("Analyzing block B%d%s\n",
@@ -777,38 +798,44 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
HValue* other = dominators->at(i);
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
- if (instr->DependsOnFlags().Contains(depends_on_flag) &&
- (other != NULL)) {
+ GVNFlag flag = GVNFlagFromInt(i);
+ if (instr->DependsOnFlags().Contains(flag) && other != NULL) {
TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
i,
instr->id(),
instr->Mnemonic(),
other->id(),
other->Mnemonic());
- instr->HandleSideEffectDominator(changes_flag, other);
+ if (instr->HandleSideEffectDominator(flag, other)) {
+ removed_side_effects_ = true;
+ }
}
}
}
// Instruction was unlinked during graph traversal.
if (!instr->IsLinked()) continue;
- GVNFlagSet flags = instr->ChangesFlags();
- if (!flags.IsEmpty()) {
+ SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
+ if (!changes.IsEmpty()) {
// Clear all instructions in the map that are affected by side effects.
// Store instruction as the dominating one for tracked side effects.
- map->Kill(flags);
- dominators->Store(flags, instr);
- TRACE_GVN_2("Instruction %d %s\n", instr->id(),
- *GetGVNFlagsString(flags));
+ map->Kill(changes);
+ dominators->Store(changes, instr);
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Instruction i%d changes ", instr->id());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, changes);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
}
if (instr->CheckFlag(HValue::kUseGVN)) {
ASSERT(!instr->HasObservableSideEffects());
- HValue* other = map->Lookup(instr);
+ HInstruction* other = map->Lookup(instr);
if (other != NULL) {
ASSERT(instr->Equals(other) && other->Equals(instr));
- TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
+ TRACE_GVN_4("Replacing instruction i%d (%s) with i%d (%s)\n",
instr->id(),
instr->Mnemonic(),
other->id(),
@@ -828,7 +855,7 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if (next != NULL) {
HBasicBlock* dominated = next->block();
- HValueMap* successor_map = next->map();
+ HInstructionMap* successor_map = next->map();
HSideEffectMap* successor_dominators = next->dominators();
// Kill everything killed on any path between this block and the
@@ -839,7 +866,7 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
dominator_block->block_id() + 1 < dominated->block_id()) {
visited_on_paths_.Clear();
- GVNFlagSet side_effects_on_all_paths =
+ SideEffects side_effects_on_all_paths =
CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
dominated);
successor_map->Kill(side_effects_on_all_paths);
diff --git a/deps/v8/src/hydrogen-gvn.h b/deps/v8/src/hydrogen-gvn.h
index fdbad99c6b..cb83354a7e 100644
--- a/deps/v8/src/hydrogen-gvn.h
+++ b/deps/v8/src/hydrogen-gvn.h
@@ -36,25 +36,77 @@
namespace v8 {
namespace internal {
+// This class extends GVNFlagSet with additional "special" dynamic side effects,
+// which can be used to represent side effects that cannot be expressed using
+// the GVNFlags of an HInstruction. These special side effects are tracked by a
+// SideEffectsTracker (see below).
+class SideEffects V8_FINAL {
+ public:
+ static const int kNumberOfSpecials = 64 - kNumberOfFlags;
+
+ SideEffects() : bits_(0) {
+ ASSERT(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT);
+ }
+ explicit SideEffects(GVNFlagSet flags) : bits_(flags.ToIntegral()) {}
+ bool IsEmpty() const { return bits_ == 0; }
+ bool ContainsFlag(GVNFlag flag) const {
+ return (bits_ & MaskFlag(flag)) != 0;
+ }
+ bool ContainsSpecial(int special) const {
+ return (bits_ & MaskSpecial(special)) != 0;
+ }
+ bool ContainsAnyOf(SideEffects set) const { return (bits_ & set.bits_) != 0; }
+ void Add(SideEffects set) { bits_ |= set.bits_; }
+ void AddSpecial(int special) { bits_ |= MaskSpecial(special); }
+ void AddAllSpecial() { bits_ |= ~static_cast<uint64_t>(0) << kNumberOfFlags; }
+ void RemoveFlag(GVNFlag flag) { bits_ &= ~MaskFlag(flag); }
+ void RemoveAll() { bits_ = 0; }
+ uint64_t ToIntegral() const { return bits_; }
+ void PrintTo(StringStream* stream) const;
+
+ private:
+ uint64_t MaskFlag(GVNFlag flag) const {
+ return static_cast<uint64_t>(1) << static_cast<unsigned>(flag);
+ }
+ uint64_t MaskSpecial(int special) const {
+ ASSERT(special >= 0);
+ ASSERT(special < kNumberOfSpecials);
+ return static_cast<uint64_t>(1) << static_cast<unsigned>(
+ special + kNumberOfFlags);
+ }
+
+ uint64_t bits_;
+};
+
+
+// Tracks inobject field loads/stores in a fine grained fashion, and represents
+// them using the "special" dynamic side effects of the SideEffects class (see
+// above). This way unrelated inobject field stores don't prevent hoisting and
+// merging of inobject field loads.
+class SideEffectsTracker V8_FINAL BASE_EMBEDDED {
+ public:
+ SideEffectsTracker() : num_inobject_fields_(0) {}
+ SideEffects ComputeChanges(HInstruction* instr);
+ SideEffects ComputeDependsOn(HInstruction* instr);
+ void PrintSideEffectsTo(StringStream* stream, SideEffects side_effects) const;
+
+ private:
+ bool ComputeInobjectField(HObjectAccess access, int* index);
+
+ HObjectAccess inobject_fields_[SideEffects::kNumberOfSpecials];
+ int num_inobject_fields_;
+};
+
+
// Perform common subexpression elimination and loop-invariant code motion.
-class HGlobalValueNumberingPhase : public HPhase {
+class HGlobalValueNumberingPhase V8_FINAL : public HPhase {
public:
explicit HGlobalValueNumberingPhase(HGraph* graph);
- void Run() {
- Analyze();
- // Trigger a second analysis pass to further eliminate duplicate values
- // that could only be discovered by removing side-effect-generating
- // instructions during the first pass.
- if (FLAG_smi_only_arrays && removed_side_effects_) {
- Analyze();
- // TODO(danno): Turn this into a fixpoint iteration.
- }
- }
+ void Run();
private:
- void Analyze();
- GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
+ SideEffects CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator,
HBasicBlock* dominated);
void AnalyzeGraph();
@@ -62,19 +114,18 @@ class HGlobalValueNumberingPhase : public HPhase {
void LoopInvariantCodeMotion();
void ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* before_loop,
- GVNFlagSet loop_kills,
- GVNFlagSet* accumulated_first_time_depends,
- GVNFlagSet* accumulated_first_time_changes);
+ SideEffects loop_kills);
bool AllowCodeMotion();
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
+ SideEffectsTracker side_effects_tracker_;
bool removed_side_effects_;
// A map of block IDs to their side effects.
- ZoneList<GVNFlagSet> block_side_effects_;
+ ZoneList<SideEffects> block_side_effects_;
// A map of loop header block IDs to their loop's side effects.
- ZoneList<GVNFlagSet> loop_side_effects_;
+ ZoneList<SideEffects> loop_side_effects_;
// Used when collecting side effects on paths from dominator to
// dominated.
@@ -83,7 +134,6 @@ class HGlobalValueNumberingPhase : public HPhase {
DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
};
-
} } // namespace v8::internal
#endif // V8_HYDROGEN_GVN_H_
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index ee3468e8aa..5795385728 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -35,6 +35,8 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -54,14 +56,6 @@ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
-int HValue::LoopWeight() const {
- const int w = FLAG_loop_weight;
- static const int weights[] = { 1, w, w*w, w*w*w, w*w*w*w };
- return weights[Min(block()->LoopNestingDepth(),
- static_cast<int>(ARRAY_SIZE(weights)-1))];
-}
-
-
Isolate* HValue::isolate() const {
ASSERT(block() != NULL);
return block()->isolate();
@@ -106,7 +100,7 @@ Representation HValue::RepresentationFromUses() {
id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
(use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
}
- use_count[rep.kind()] += use->LoopWeight();
+ use_count[rep.kind()] += 1;
}
if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]);
int tagged_count = use_count[Representation::kTagged];
@@ -612,11 +606,11 @@ void HValue::PrintChangesTo(StringStream* stream) {
stream->Add("*");
} else {
bool add_comma = false;
-#define PRINT_DO(type) \
- if (changes_flags.Contains(kChanges##type)) { \
- if (add_comma) stream->Add(","); \
- add_comma = true; \
- stream->Add(#type); \
+#define PRINT_DO(Type) \
+ if (changes_flags.Contains(k##Type)) { \
+ if (add_comma) stream->Add(","); \
+ add_comma = true; \
+ stream->Add(#Type); \
}
GVN_TRACKED_FLAG_LIST(PRINT_DO);
GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
@@ -688,6 +682,19 @@ void HValue::ComputeInitialRange(Zone* zone) {
}
+void HSourcePosition::PrintTo(FILE* out) {
+ if (IsUnknown()) {
+ PrintF(out, "<?>");
+ } else {
+ if (FLAG_hydrogen_track_positions) {
+ PrintF(out, "<%d:%d>", inlining_id(), position());
+ } else {
+ PrintF(out, "<0:%d>", raw());
+ }
+ }
+}
+
+
void HInstruction::PrintTo(StringStream* stream) {
PrintMnemonicTo(stream);
PrintDataTo(stream);
@@ -697,6 +704,9 @@ void HInstruction::PrintTo(StringStream* stream) {
if (CheckFlag(HValue::kHasNoObservableSideEffects)) {
stream->Add(" [noOSE]");
}
+ if (CheckFlag(HValue::kIsDead)) {
+ stream->Add(" [dead]");
+ }
}
@@ -741,8 +751,7 @@ void HInstruction::InsertBefore(HInstruction* next) {
next_ = next;
previous_ = prev;
SetBlock(next->block());
- if (position() == RelocInfo::kNoPosition &&
- next->position() != RelocInfo::kNoPosition) {
+ if (!has_position() && next->has_position()) {
set_position(next->position());
}
}
@@ -779,8 +788,7 @@ void HInstruction::InsertAfter(HInstruction* previous) {
if (block->last() == previous) {
block->set_last(this);
}
- if (position() == RelocInfo::kNoPosition &&
- previous->position() != RelocInfo::kNoPosition) {
+ if (!has_position() && previous->has_position()) {
set_position(previous->position());
}
}
@@ -849,6 +857,37 @@ void HUnaryCall::PrintDataTo(StringStream* stream) {
}
+void HCallJSFunction::PrintDataTo(StringStream* stream) {
+ function()->PrintNameTo(stream);
+ stream->Add(" ");
+ stream->Add("#%d", argument_count());
+}
+
+
+HCallJSFunction* HCallJSFunction::New(
+ Zone* zone,
+ HValue* context,
+ HValue* function,
+ int argument_count,
+ bool pass_argument_count) {
+ bool has_stack_check = false;
+ if (function->IsConstant()) {
+ HConstant* fun_const = HConstant::cast(function);
+ Handle<JSFunction> jsfun =
+ Handle<JSFunction>::cast(fun_const->handle(zone->isolate()));
+ has_stack_check = !jsfun.is_null() &&
+ (jsfun->code()->kind() == Code::FUNCTION ||
+ jsfun->code()->kind() == Code::OPTIMIZED_FUNCTION);
+ }
+
+ return new(zone) HCallJSFunction(
+ function, argument_count, pass_argument_count,
+ has_stack_check);
+}
+
+
+
+
void HBinaryCall::PrintDataTo(StringStream* stream) {
first()->PrintNameTo(stream);
stream->Add(" ");
@@ -947,38 +986,38 @@ void HBoundsCheck::InferRepresentation(HInferRepresentationPhase* h_infer) {
}
-void HBoundsCheckBaseIndexInformation::PrintDataTo(StringStream* stream) {
- stream->Add("base: ");
- base_index()->PrintNameTo(stream);
- stream->Add(", check: ");
- base_index()->PrintNameTo(stream);
-}
+Range* HBoundsCheck::InferRange(Zone* zone) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32() && length()->HasRange()) {
+ int upper = length()->range()->upper() - (allow_equality() ? 0 : 1);
+ int lower = 0;
+ Range* result = new(zone) Range(lower, upper);
+ if (index()->HasRange()) {
+ result->Intersect(index()->range());
+ }
-void HCallConstantFunction::PrintDataTo(StringStream* stream) {
- if (IsApplyFunction()) {
- stream->Add("optimized apply ");
- } else {
- stream->Add("%o ", function()->shared()->DebugName());
+ // In case of Smi representation, clamp result to Smi::kMaxValue.
+ if (r.IsSmi()) result->ClampToSmi();
+ return result;
}
- stream->Add("#%d", argument_count());
-}
-
-
-void HCallNamed::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", *name());
- HUnaryCall::PrintDataTo(stream);
+ return HValue::InferRange(zone);
}
-void HCallGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", *name());
- HUnaryCall::PrintDataTo(stream);
+void HBoundsCheckBaseIndexInformation::PrintDataTo(StringStream* stream) {
+ stream->Add("base: ");
+ base_index()->PrintNameTo(stream);
+ stream->Add(", check: ");
+ base_index()->PrintNameTo(stream);
}
-void HCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("%o ", target()->shared()->DebugName());
+void HCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < OperandCount(); i++) {
+ OperandAt(i)->PrintNameTo(stream);
+ stream->Add(" ");
+ }
stream->Add("#%d", argument_count());
}
@@ -1091,6 +1130,11 @@ void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map().handle());
HControlInstruction::PrintDataTo(stream);
+ if (known_successor_index() == 0) {
+ stream->Add(" [true]");
+ } else if (known_successor_index() == 1) {
+ stream->Add(" [false]");
+ }
}
@@ -1100,9 +1144,6 @@ const char* HUnaryMathOperation::OpName() const {
case kMathRound: return "round";
case kMathAbs: return "abs";
case kMathLog: return "log";
- case kMathSin: return "sin";
- case kMathCos: return "cos";
- case kMathTan: return "tan";
case kMathExp: return "exp";
case kMathSqrt: return "sqrt";
case kMathPowHalf: return "pow-half";
@@ -1177,6 +1218,20 @@ void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
+bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (value()->representation().IsSpecialization()) {
+ if (compares_number_type()) {
+ *block = FirstSuccessor();
+ } else {
+ *block = SecondSuccessor();
+ }
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
void HCheckMapValue::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
@@ -1248,6 +1303,26 @@ HValue* HBitwise::Canonicalize() {
}
+Representation HAdd::RepresentationFromInputs() {
+ Representation left_rep = left()->representation();
+ if (left_rep.IsExternal()) {
+ return Representation::External();
+ }
+ return HArithmeticBinaryOperation::RepresentationFromInputs();
+}
+
+
+Representation HAdd::RequiredInputRepresentation(int index) {
+ if (index == 2) {
+ Representation left_rep = left()->representation();
+ if (left_rep.IsExternal()) {
+ return Representation::Integer32();
+ }
+ }
+ return HArithmeticBinaryOperation::RequiredInputRepresentation(index);
+}
+
+
static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
return arg1->representation().IsSpecialization() &&
arg2->EqualsInteger32Constant(identity);
@@ -1321,6 +1396,23 @@ void HTypeof::PrintDataTo(StringStream* stream) {
}
+HInstruction* HForceRepresentation::New(Zone* zone, HValue* context,
+ HValue* value, Representation required_representation) {
+ if (FLAG_fold_constants && value->IsConstant()) {
+ HConstant* c = HConstant::cast(value);
+ if (c->HasNumberValue()) {
+ double double_res = c->DoubleValue();
+ if (IsInt32Double(double_res)) {
+ return HConstant::New(zone, context,
+ static_cast<int32_t>(double_res),
+ required_representation);
+ }
+ }
+ }
+ return new(zone) HForceRepresentation(value, required_representation);
+}
+
+
void HForceRepresentation::PrintDataTo(StringStream* stream) {
stream->Add("%s ", representation().Mnemonic());
value()->PrintNameTo(stream);
@@ -1337,81 +1429,51 @@ void HChange::PrintDataTo(StringStream* stream) {
}
-static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- }
- // A change from an integer32 can be replaced by the integer32 value.
- if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
HValue* HUnaryMathOperation::Canonicalize() {
if (op() == kMathRound || op() == kMathFloor) {
HValue* val = value();
if (val->IsChange()) val = HChange::cast(val)->value();
-
- // If the input is smi or integer32 then we replace the instruction with its
- // input.
if (val->representation().IsSmiOrInteger32()) {
- if (!val->representation().Equals(representation())) {
- HChange* result = new(block()->zone()) HChange(
- val, representation(), false, false);
- result->InsertBefore(this);
- return result;
- }
- return val;
+ if (val->representation().Equals(representation())) return val;
+ return Prepend(new(block()->zone()) HChange(
+ val, representation(), false, false));
}
}
+ if (op() == kMathFloor && value()->IsDiv() && value()->UseCount() == 1) {
+ HDiv* hdiv = HDiv::cast(value());
+
+ HValue* left = hdiv->left();
+ if (left->representation().IsInteger32()) {
+ // A value with an integer representation does not need to be transformed.
+ } else if (left->IsChange() && HChange::cast(left)->from().IsInteger32()) {
+ // A change from an integer32 can be replaced by the integer32 value.
+ left = HChange::cast(left)->value();
+ } else if (hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
+ left = Prepend(new(block()->zone()) HChange(
+ left, Representation::Integer32(), false, false));
+ } else {
+ return this;
+ }
- if (op() == kMathFloor) {
- HValue* val = value();
- if (val->IsDiv() && (val->UseCount() == 1)) {
- HDiv* hdiv = HDiv::cast(val);
- HValue* left = hdiv->left();
- HValue* right = hdiv->right();
- // Try to simplify left and right values of the division.
- HValue* new_left = SimplifiedDividendForMathFloorOfDiv(left);
- if (new_left == NULL &&
- hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
- new_left = new(block()->zone()) HChange(
- left, Representation::Integer32(), false, false);
- HChange::cast(new_left)->InsertBefore(this);
- }
- HValue* new_right =
- LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(right);
- if (new_right == NULL &&
-#if V8_TARGET_ARCH_ARM
- CpuFeatures::IsSupported(SUDIV) &&
-#endif
- hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
- new_right = new(block()->zone()) HChange(
- right, Representation::Integer32(), false, false);
- HChange::cast(new_right)->InsertBefore(this);
- }
-
- // Return if left or right are not optimizable.
- if ((new_left == NULL) || (new_right == NULL)) return this;
-
- // Insert the new values in the graph.
- if (new_left->IsInstruction() &&
- !HInstruction::cast(new_left)->IsLinked()) {
- HInstruction::cast(new_left)->InsertBefore(this);
- }
- if (new_right->IsInstruction() &&
- !HInstruction::cast(new_right)->IsLinked()) {
- HInstruction::cast(new_right)->InsertBefore(this);
- }
- HMathFloorOfDiv* instr =
- HMathFloorOfDiv::New(block()->zone(), context(), new_left, new_right);
- instr->InsertBefore(this);
- return instr;
+ HValue* right = hdiv->right();
+ if (right->IsInteger32Constant()) {
+ right = Prepend(HConstant::cast(right)->CopyToRepresentation(
+ Representation::Integer32(), right->block()->zone()));
+ } else if (right->representation().IsInteger32()) {
+ // A value with an integer representation does not need to be transformed.
+ } else if (right->IsChange() &&
+ HChange::cast(right)->from().IsInteger32()) {
+ // A change from an integer32 can be replaced by the integer32 value.
+ right = HChange::cast(right)->value();
+ } else if (hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
+ right = Prepend(new(block()->zone()) HChange(
+ right, Representation::Integer32(), false, false));
+ } else {
+ return this;
}
+
+ return Prepend(HMathFloorOfDiv::New(
+ block()->zone(), context(), left, right));
}
return this;
}
@@ -1456,7 +1518,7 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
*tag = kStringTag;
return;
case IS_INTERNALIZED_STRING:
- *mask = kIsNotInternalizedMask;
+ *mask = kIsNotStringMask | kIsNotInternalizedMask;
*tag = kInternalizedTag;
return;
default:
@@ -1465,21 +1527,22 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
}
-void HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
+bool HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
- ASSERT(side_effect == kChangesMaps);
+ ASSERT(side_effect == kMaps);
// TODO(mstarzinger): For now we specialize on HStoreNamedField, but once
// type information is rich enough we should generalize this to any HType
// for which the map is known.
if (HasNoUses() && dominator->IsStoreNamedField()) {
HStoreNamedField* store = HStoreNamedField::cast(dominator);
- if (!store->has_transition() || store->object() != value()) return;
+ if (!store->has_transition() || store->object() != value()) return false;
HConstant* transition = HConstant::cast(store->transition());
if (map_set_.Contains(transition->GetUnique())) {
DeleteAndReplaceWith(NULL);
- return;
+ return true;
}
}
+ return false;
}
@@ -1574,7 +1637,7 @@ Range* HChange::InferRange(Zone* zone) {
input_range != NULL &&
input_range->IsInSmiRange()))) {
set_type(HType::Smi());
- ClearGVNFlag(kChangesNewSpacePromotion);
+ ClearChangesFlag(kNewSpacePromotion);
}
Range* result = (input_range != NULL)
? input_range->Copy(zone)
@@ -1597,7 +1660,7 @@ Range* HConstant::InferRange(Zone* zone) {
}
-int HPhi::position() const {
+HSourcePosition HPhi::position() const {
return block()->first()->position();
}
@@ -1699,10 +1762,7 @@ Range* HDiv::InferRange(Zone* zone) {
result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
(a->CanBeMinusZero() ||
(a->CanBeZero() && b->CanBeNegative())));
- if (!a->Includes(kMinInt) ||
- !b->Includes(-1) ||
- CheckFlag(kAllUsesTruncatingToInt32)) {
- // It is safe to clear kCanOverflow when kAllUsesTruncatingToInt32.
+ if (!a->Includes(kMinInt) || !b->Includes(-1)) {
ClearFlag(HValue::kCanOverflow);
}
@@ -2283,7 +2343,7 @@ void HPhi::InitRealUses(int phi_id) {
HValue* value = it.value();
if (!value->IsPhi()) {
Representation rep = value->observed_input_representation(it.index());
- non_phi_uses_[rep.kind()] += value->LoopWeight();
+ non_phi_uses_[rep.kind()] += 1;
if (FLAG_trace_representation) {
PrintF("#%d Phi is used by real #%d %s as %s\n",
id(), value->id(), value->Mnemonic(), rep.Mnemonic());
@@ -2421,7 +2481,7 @@ void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
void HEnterInlined::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name = function()->debug_name()->ToCString();
- stream->Add("%s, id=%d", *name, function()->id().ToInt());
+ stream->Add("%s, id=%d", name.get(), function()->id().ToInt());
}
@@ -2570,6 +2630,41 @@ void HConstant::Initialize(Representation r) {
}
+bool HConstant::ImmortalImmovable() const {
+ if (has_int32_value_) {
+ return false;
+ }
+ if (has_double_value_) {
+ if (IsSpecialDouble()) {
+ return true;
+ }
+ return false;
+ }
+ if (has_external_reference_value_) {
+ return false;
+ }
+
+ ASSERT(!object_.handle().is_null());
+ Heap* heap = isolate()->heap();
+ ASSERT(!object_.IsKnownGlobal(heap->minus_zero_value()));
+ ASSERT(!object_.IsKnownGlobal(heap->nan_value()));
+ return
+#define IMMORTAL_IMMOVABLE_ROOT(name) \
+ object_.IsKnownGlobal(heap->name()) ||
+ IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
+#undef IMMORTAL_IMMOVABLE_ROOT
+#define INTERNALIZED_STRING(name, value) \
+ object_.IsKnownGlobal(heap->name()) ||
+ INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
+#undef INTERNALIZED_STRING
+#define STRING_TYPE(NAME, size, name, Name) \
+ object_.IsKnownGlobal(heap->name##_map()) ||
+ STRING_TYPE_LIST(STRING_TYPE)
+#undef STRING_TYPE
+ false;
+}
+
+
bool HConstant::EmitAtUses() {
ASSERT(IsLinked());
if (block()->graph()->has_osr() &&
@@ -2850,8 +2945,17 @@ Range* HShl::InferRange(Zone* zone) {
Range* HLoadNamedField::InferRange(Zone* zone) {
- if (access().representation().IsByte()) {
- return new(zone) Range(0, 255);
+ if (access().representation().IsInteger8()) {
+ return new(zone) Range(kMinInt8, kMaxInt8);
+ }
+ if (access().representation().IsUInteger8()) {
+ return new(zone) Range(kMinUInt8, kMaxUInt8);
+ }
+ if (access().representation().IsInteger16()) {
+ return new(zone) Range(kMinInt16, kMaxInt16);
+ }
+ if (access().representation().IsUInteger16()) {
+ return new(zone) Range(kMinUInt16, kMaxUInt16);
}
if (access().IsStringLength()) {
return new(zone) Range(0, String::kMaxLength);
@@ -2862,16 +2966,15 @@ Range* HLoadNamedField::InferRange(Zone* zone) {
Range* HLoadKeyed::InferRange(Zone* zone) {
switch (elements_kind()) {
- case EXTERNAL_PIXEL_ELEMENTS:
- return new(zone) Range(0, 255);
- case EXTERNAL_BYTE_ELEMENTS:
- return new(zone) Range(-128, 127);
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return new(zone) Range(0, 255);
- case EXTERNAL_SHORT_ELEMENTS:
- return new(zone) Range(-32768, 32767);
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return new(zone) Range(0, 65535);
+ case EXTERNAL_INT8_ELEMENTS:
+ return new(zone) Range(kMinInt8, kMaxInt8);
+ case EXTERNAL_UINT8_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ return new(zone) Range(kMinUInt8, kMaxUInt8);
+ case EXTERNAL_INT16_ELEMENTS:
+ return new(zone) Range(kMinInt16, kMaxInt16);
+ case EXTERNAL_UINT16_ELEMENTS:
+ return new(zone) Range(kMinUInt16, kMaxUInt16);
default:
return HValue::InferRange(zone);
}
@@ -2930,6 +3033,24 @@ void HCompareHoleAndBranch::InferRepresentation(
}
+bool HCompareMinusZeroAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (value()->representation().IsSmiOrInteger32()) {
+ // A Smi or Integer32 cannot contain minus zero.
+ *block = SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+void HCompareMinusZeroAndBranch::InferRepresentation(
+ HInferRepresentationPhase* h_infer) {
+ ChangeRepresentation(value()->representation());
+}
+
+
+
void HGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", SuccessorAt(0)->block_id());
}
@@ -2982,6 +3103,11 @@ void HParameter::PrintDataTo(StringStream* stream) {
void HLoadNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
access_.PrintTo(stream);
+
+ if (HasDependency()) {
+ stream->Add(" ");
+ dependency()->PrintNameTo(stream);
+ }
}
@@ -2992,7 +3118,7 @@ HCheckMaps* HCheckMaps::New(Zone* zone,
CompilationInfo* info,
HValue* typecheck) {
HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
- check_map->Add(map, zone);
+ check_map->Add(map, info, zone);
if (map->CanOmitMapChecks() &&
value->IsConstant() &&
HConstant::cast(value)->HasMap(map)) {
@@ -3010,7 +3136,7 @@ HCheckMaps* HCheckMaps::New(Zone* zone,
void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
}
@@ -3132,10 +3258,8 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
key_load->elements_kind());
map_check->InsertBefore(this);
index->InsertBefore(this);
- HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
- object(), index);
- load->InsertBefore(this);
- return load;
+ return Prepend(new(block()->zone()) HLoadFieldByIndex(
+ object(), index));
}
}
}
@@ -3148,7 +3272,7 @@ void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
ASSERT(name()->IsString());
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" = ");
value()->PrintNameTo(stream);
}
@@ -3249,12 +3373,6 @@ void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
}
-void HStoreGlobalGeneric::PrintDataTo(StringStream* stream) {
- stream->Add("%o = ", *name());
- value()->PrintNameTo(stream);
-}
-
-
void HLoadContextSlot::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add("[%d]", slot_index());
@@ -3305,11 +3423,11 @@ Representation HUnaryMathOperation::RepresentationFromInputs() {
}
-void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
+bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
- ASSERT(side_effect == kChangesNewSpacePromotion);
+ ASSERT(side_effect == kNewSpacePromotion);
Zone* zone = block()->zone();
- if (!FLAG_use_allocation_folding) return;
+ if (!FLAG_use_allocation_folding) return false;
// Try to fold allocations together with their dominating allocations.
if (!dominator->IsAllocate()) {
@@ -3317,7 +3435,16 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
PrintF("#%d (%s) cannot fold into #%d (%s)\n",
id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
}
- return;
+ return false;
+ }
+
+ // Check whether we are folding within the same block for local folding.
+ if (FLAG_use_local_allocation_folding && dominator->block() != block()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), crosses basic blocks\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return false;
}
HAllocate* dominator_allocate = HAllocate::cast(dominator);
@@ -3331,12 +3458,12 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
PrintF("#%d (%s) cannot fold into #%d (%s), dynamic allocation size\n",
id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
}
- return;
+ return false;
}
dominator_allocate = GetFoldableDominator(dominator_allocate);
if (dominator_allocate == NULL) {
- return;
+ return false;
}
ASSERT((IsNewSpaceAllocation() &&
@@ -3365,13 +3492,15 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
}
}
- if (new_dominator_size > Page::kMaxNonCodeHeapObjectSize) {
+ // Since we clear the first word after folded memory, we cannot use the
+ // whole Page::kMaxRegularHeapObjectSize memory.
+ if (new_dominator_size > Page::kMaxRegularHeapObjectSize - kPointerSize) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
id(), Mnemonic(), dominator_allocate->id(),
dominator_allocate->Mnemonic(), new_dominator_size);
}
- return;
+ return false;
}
HInstruction* new_dominator_size_constant = HConstant::CreateAndInsertBefore(
@@ -3396,14 +3525,21 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
dominator_allocate->ClearNextMapWord(original_object_size);
#endif
- dominator_allocate->clear_next_map_word_ = clear_next_map_word_;
+ dominator_allocate->UpdateClearNextMapWord(MustClearNextMapWord());
// After that replace the dominated allocate instruction.
+ HInstruction* inner_offset = HConstant::CreateAndInsertBefore(
+ zone,
+ context(),
+ dominator_size_constant,
+ Representation::None(),
+ this);
+
HInstruction* dominated_allocate_instr =
HInnerAllocatedObject::New(zone,
context(),
dominator_allocate,
- dominator_size_constant,
+ inner_offset,
type());
dominated_allocate_instr->InsertBefore(this);
DeleteAndReplaceWith(dominated_allocate_instr);
@@ -3412,6 +3548,7 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
id(), Mnemonic(), dominator_allocate->id(),
dominator_allocate->Mnemonic());
}
+ return true;
}
@@ -3499,11 +3636,9 @@ void HAllocate::UpdateFreeSpaceFiller(int32_t free_space_size) {
void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
ASSERT(filler_free_space_size_ == NULL);
Zone* zone = block()->zone();
- int32_t dominator_size =
- HConstant::cast(dominating_allocate_->size())->GetInteger32Constant();
HInstruction* free_space_instr =
HInnerAllocatedObject::New(zone, context(), dominating_allocate_,
- dominator_size, type());
+ dominating_allocate_->size(), type());
free_space_instr->InsertBefore(this);
HConstant* filler_map = HConstant::New(
zone,
@@ -3523,8 +3658,9 @@ void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
zone, context(), free_space_size, Representation::Smi(), store_map);
// Must force Smi representation for x64 (see comment above).
HObjectAccess access =
- HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset,
- Representation::Smi());
+ HObjectAccess::ForMapAndOffset(isolate()->factory()->free_space_map(),
+ FreeSpace::kSizeOffset,
+ Representation::Smi());
HStoreNamedField* store_size = HStoreNamedField::New(zone, context(),
free_space_instr, access, filler_size);
store_size->SetFlag(HValue::kHasNoObservableSideEffects);
@@ -3534,12 +3670,13 @@ void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
void HAllocate::ClearNextMapWord(int offset) {
- if (clear_next_map_word_) {
+ if (MustClearNextMapWord()) {
Zone* zone = block()->zone();
- HObjectAccess access = HObjectAccess::ForJSObjectOffset(offset);
+ HObjectAccess access =
+ HObjectAccess::ForObservableJSObjectOffset(offset);
HStoreNamedField* clear_next_map =
HStoreNamedField::New(zone, context(), this, access,
- block()->graph()->GetConstantNull());
+ block()->graph()->GetConstant0());
clear_next_map->ClearAllSideEffects();
clear_next_map->InsertAfter(this);
}
@@ -3693,7 +3830,7 @@ HInstruction* HInstr::New( \
HConstant* c_right = HConstant::cast(right); \
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
- if (TypeInfo::IsInt32Double(double_res)) { \
+ if (IsInt32Double(double_res)) { \
return H_CONSTANT_INT(double_res); \
} \
return H_CONSTANT_DOUBLE(double_res); \
@@ -3714,7 +3851,9 @@ HInstruction* HStringAdd::New(Zone* zone,
HValue* context,
HValue* left,
HValue* right,
- StringAddFlags flags) {
+ PretenureFlag pretenure_flag,
+ StringAddFlags flags,
+ Handle<AllocationSite> allocation_site) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_right = HConstant::cast(right);
HConstant* c_left = HConstant::cast(left);
@@ -3724,7 +3863,23 @@ HInstruction* HStringAdd::New(Zone* zone,
return HConstant::New(zone, context, concat);
}
}
- return new(zone) HStringAdd(context, left, right, flags);
+ return new(zone) HStringAdd(
+ context, left, right, pretenure_flag, flags, allocation_site);
+}
+
+
+void HStringAdd::PrintDataTo(StringStream* stream) {
+ if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ stream->Add("_CheckBoth");
+ } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_LEFT) {
+ stream->Add("_CheckLeft");
+ } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_RIGHT) {
+ stream->Add("_CheckRight");
+ }
+ stream->Add(" (");
+ if (pretenure_flag() == NOT_TENURED) stream->Add("N");
+ else if (pretenure_flag() == TENURED) stream->Add("D");
+ stream->Add(")");
}
@@ -3759,10 +3914,6 @@ HInstruction* HUnaryMathOperation::New(
}
if (std::isinf(d)) { // +Infinity and -Infinity.
switch (op) {
- case kMathSin:
- case kMathCos:
- case kMathTan:
- return H_CONSTANT_DOUBLE(OS::nan_value());
case kMathExp:
return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
case kMathLog:
@@ -3780,16 +3931,10 @@ HInstruction* HUnaryMathOperation::New(
}
}
switch (op) {
- case kMathSin:
- return H_CONSTANT_DOUBLE(fast_sin(d));
- case kMathCos:
- return H_CONSTANT_DOUBLE(fast_cos(d));
- case kMathTan:
- return H_CONSTANT_DOUBLE(fast_tan(d));
case kMathExp:
return H_CONSTANT_DOUBLE(fast_exp(d));
case kMathLog:
- return H_CONSTANT_DOUBLE(fast_log(d));
+ return H_CONSTANT_DOUBLE(std::log(d));
case kMathSqrt:
return H_CONSTANT_DOUBLE(fast_sqrt(d));
case kMathPowHalf:
@@ -3802,9 +3947,9 @@ HInstruction* HUnaryMathOperation::New(
// Doubles are represented as Significant * 2 ^ Exponent. If the
// Exponent is not negative, the double value is already an integer.
if (Double(d).Exponent() >= 0) return H_CONSTANT_DOUBLE(d);
- return H_CONSTANT_DOUBLE(floor(d + 0.5));
+ return H_CONSTANT_DOUBLE(std::floor(d + 0.5));
case kMathFloor:
- return H_CONSTANT_DOUBLE(floor(d));
+ return H_CONSTANT_DOUBLE(std::floor(d));
default:
UNREACHABLE();
break;
@@ -3867,8 +4012,7 @@ HInstruction* HMathMinMax::New(
HInstruction* HMod::New(Zone* zone,
HValue* context,
HValue* left,
- HValue* right,
- Maybe<int> fixed_right_arg) {
+ HValue* right) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -3887,7 +4031,7 @@ HInstruction* HMod::New(Zone* zone,
}
}
}
- return new(zone) HMod(context, left, right, fixed_right_arg);
+ return new(zone) HMod(context, left, right);
}
@@ -3900,7 +4044,7 @@ HInstruction* HDiv::New(
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
if (c_right->DoubleValue() != 0) {
double double_res = c_left->DoubleValue() / c_right->DoubleValue();
- if (TypeInfo::IsInt32Double(double_res)) {
+ if (IsInt32Double(double_res)) {
return H_CONSTANT_INT(double_res);
}
return H_CONSTANT_DOUBLE(double_res);
@@ -3985,6 +4129,26 @@ HInstruction* HShr::New(
}
+HInstruction* HSeqStringGetChar::New(Zone* zone,
+ HValue* context,
+ String::Encoding encoding,
+ HValue* string,
+ HValue* index) {
+ if (FLAG_fold_constants && string->IsConstant() && index->IsConstant()) {
+ HConstant* c_string = HConstant::cast(string);
+ HConstant* c_index = HConstant::cast(index);
+ if (c_string->HasStringValue() && c_index->HasInteger32Value()) {
+ Handle<String> s = c_string->StringValue();
+ int32_t i = c_index->Integer32Value();
+ ASSERT_LE(0, i);
+ ASSERT_LT(i, s->length());
+ return H_CONSTANT_INT(s->Get(i));
+ }
+ }
+ return new(zone) HSeqStringGetChar(encoding, string, index);
+}
+
+
#undef H_CONSTANT_INT
#undef H_CONSTANT_DOUBLE
@@ -4136,7 +4300,7 @@ HObjectAccess HObjectAccess::ForFixedArrayHeader(int offset) {
}
-HObjectAccess HObjectAccess::ForJSObjectOffset(int offset,
+HObjectAccess HObjectAccess::ForMapAndOffset(Handle<Map> map, int offset,
Representation representation) {
ASSERT(offset >= 0);
Portion portion = kInobject;
@@ -4146,7 +4310,34 @@ HObjectAccess HObjectAccess::ForJSObjectOffset(int offset,
} else if (offset == JSObject::kMapOffset) {
portion = kMaps;
}
- return HObjectAccess(portion, offset, representation);
+ bool existing_inobject_property = true;
+ if (!map.is_null()) {
+ existing_inobject_property = (offset <
+ map->instance_size() - map->unused_property_fields() * kPointerSize);
+ }
+ return HObjectAccess(portion, offset, representation, Handle<String>::null(),
+ false, existing_inobject_property);
+}
+
+
+HObjectAccess HObjectAccess::ForAllocationSiteOffset(int offset) {
+ switch (offset) {
+ case AllocationSite::kTransitionInfoOffset:
+ return HObjectAccess(kInobject, offset, Representation::Tagged());
+ case AllocationSite::kNestedSiteOffset:
+ return HObjectAccess(kInobject, offset, Representation::Tagged());
+ case AllocationSite::kPretenureDataOffset:
+ return HObjectAccess(kInobject, offset, Representation::Smi());
+ case AllocationSite::kPretenureCreateCountOffset:
+ return HObjectAccess(kInobject, offset, Representation::Smi());
+ case AllocationSite::kDependentCodeOffset:
+ return HObjectAccess(kInobject, offset, Representation::Tagged());
+ case AllocationSite::kWeakNextOffset:
+ return HObjectAccess(kInobject, offset, Representation::Tagged());
+ default:
+ UNREACHABLE();
+ }
+ return HObjectAccess(kInobject, offset);
}
@@ -4177,20 +4368,22 @@ HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset,
Representation representation) {
ASSERT(offset >= 0);
- return HObjectAccess(kBackingStore, offset, representation);
+ return HObjectAccess(kBackingStore, offset, representation,
+ Handle<String>::null(), false, false);
}
HObjectAccess HObjectAccess::ForField(Handle<Map> map,
- LookupResult *lookup, Handle<String> name) {
- ASSERT(lookup->IsField() || lookup->IsTransitionToField(*map));
+ LookupResult* lookup,
+ Handle<String> name) {
+ ASSERT(lookup->IsField() || lookup->IsTransitionToField());
int index;
Representation representation;
if (lookup->IsField()) {
index = lookup->GetLocalFieldIndexFromMap(*map);
representation = lookup->representation();
} else {
- Map* transition = lookup->GetTransitionMapFromMap(*map);
+ Map* transition = lookup->GetTransitionTarget();
int descriptor = transition->LastAdded();
index = transition->instance_descriptors()->GetFieldIndex(descriptor) -
map->inobject_properties();
@@ -4202,11 +4395,12 @@ HObjectAccess HObjectAccess::ForField(Handle<Map> map,
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
int offset = (index * kPointerSize) + map->instance_size();
- return HObjectAccess(kInobject, offset, representation);
+ return HObjectAccess(kInobject, offset, representation, name, false, true);
} else {
// Non-negative property indices are in the properties array.
int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- return HObjectAccess(kBackingStore, offset, representation, name);
+ return HObjectAccess(kBackingStore, offset, representation, name,
+ false, false);
}
}
@@ -4218,56 +4412,80 @@ HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
}
-void HObjectAccess::SetGVNFlags(HValue *instr, bool is_store) {
+void HObjectAccess::SetGVNFlags(HValue *instr, PropertyAccessType access_type) {
// set the appropriate GVN flags for a given load or store instruction
- if (is_store) {
+ if (access_type == STORE) {
// track dominating allocations in order to eliminate write barriers
- instr->SetGVNFlag(kDependsOnNewSpacePromotion);
+ instr->SetDependsOnFlag(::v8::internal::kNewSpacePromotion);
instr->SetFlag(HValue::kTrackSideEffectDominators);
} else {
// try to GVN loads, but don't hoist above map changes
instr->SetFlag(HValue::kUseGVN);
- instr->SetGVNFlag(kDependsOnMaps);
+ instr->SetDependsOnFlag(::v8::internal::kMaps);
}
switch (portion()) {
case kArrayLengths:
- instr->SetGVNFlag(is_store
- ? kChangesArrayLengths : kDependsOnArrayLengths);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kArrayLengths);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kArrayLengths);
+ }
break;
case kStringLengths:
- instr->SetGVNFlag(is_store
- ? kChangesStringLengths : kDependsOnStringLengths);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kStringLengths);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kStringLengths);
+ }
break;
case kInobject:
- instr->SetGVNFlag(is_store
- ? kChangesInobjectFields : kDependsOnInobjectFields);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kInobjectFields);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kInobjectFields);
+ }
break;
case kDouble:
- instr->SetGVNFlag(is_store
- ? kChangesDoubleFields : kDependsOnDoubleFields);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kDoubleFields);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kDoubleFields);
+ }
break;
case kBackingStore:
- instr->SetGVNFlag(is_store
- ? kChangesBackingStoreFields : kDependsOnBackingStoreFields);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kBackingStoreFields);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kBackingStoreFields);
+ }
break;
case kElementsPointer:
- instr->SetGVNFlag(is_store
- ? kChangesElementsPointer : kDependsOnElementsPointer);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kElementsPointer);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kElementsPointer);
+ }
break;
case kMaps:
- instr->SetGVNFlag(is_store
- ? kChangesMaps : kDependsOnMaps);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kMaps);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kMaps);
+ }
break;
case kExternalMemory:
- instr->SetGVNFlag(is_store
- ? kChangesExternalMemory : kDependsOnExternalMemory);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kExternalMemory);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kExternalMemory);
+ }
break;
}
}
-void HObjectAccess::PrintTo(StringStream* stream) {
+void HObjectAccess::PrintTo(StringStream* stream) const {
stream->Add(".");
switch (portion()) {
@@ -4283,11 +4501,15 @@ void HObjectAccess::PrintTo(StringStream* stream) {
break;
case kDouble: // fall through
case kInobject:
- if (!name_.is_null()) stream->Add(*String::cast(*name_)->ToCString());
+ if (!name_.is_null()) {
+ stream->Add(String::cast(*name_)->ToCString().get());
+ }
stream->Add("[in-object]");
break;
case kBackingStore:
- if (!name_.is_null()) stream->Add(*String::cast(*name_)->ToCString());
+ if (!name_.is_null()) {
+ stream->Add(String::cast(*name_)->ToCString().get());
+ }
stream->Add("[backing-store]");
break;
case kExternalMemory:
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index da5cdeced4..4976f7b90c 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -46,6 +46,7 @@ namespace internal {
// Forward declarations.
class HBasicBlock;
+class HDiv;
class HEnvironment;
class HInferRepresentationPhase;
class HInstruction;
@@ -77,12 +78,9 @@ class LChunkBuilder;
V(BoundsCheck) \
V(BoundsCheckBaseIndexInformation) \
V(Branch) \
- V(CallConstantFunction) \
+ V(CallWithDescriptor) \
+ V(CallJSFunction) \
V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@@ -100,6 +98,7 @@ class LChunkBuilder;
V(CompareNumericAndBranch) \
V(CompareHoleAndBranch) \
V(CompareGeneric) \
+ V(CompareMinusZeroAndBranch) \
V(CompareObjectEqAndBranch) \
V(CompareMap) \
V(Constant) \
@@ -110,7 +109,6 @@ class LChunkBuilder;
V(Deoptimize) \
V(Div) \
V(DummyUse) \
- V(ElementsKind) \
V(EnterInlined) \
V(EnvironmentMarker) \
V(ForceRepresentation) \
@@ -118,8 +116,6 @@ class LChunkBuilder;
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@@ -134,7 +130,6 @@ class LChunkBuilder;
V(IsUndetectableAndBranch) \
V(LeaveInlined) \
V(LoadContextSlot) \
- V(LoadExternalArrayPointer) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -150,15 +145,14 @@ class LChunkBuilder;
V(Mod) \
V(Mul) \
V(OsrEntry) \
- V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
- V(Random) \
V(RegExpLiteral) \
V(Return) \
V(Ror) \
V(Sar) \
+ V(SeqStringGetChar) \
V(SeqStringSetChar) \
V(Shl) \
V(Shr) \
@@ -167,7 +161,6 @@ class LChunkBuilder;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -178,7 +171,6 @@ class LChunkBuilder;
V(StringCompareAndBranch) \
V(Sub) \
V(ThisFunction) \
- V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
@@ -187,7 +179,6 @@ class LChunkBuilder;
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(UseConst) \
- V(ValueOf) \
V(WrapReceiver)
#define GVN_TRACKED_FLAG_LIST(V) \
@@ -208,7 +199,9 @@ class LChunkBuilder;
V(GlobalVars) \
V(InobjectFields) \
V(OsrEntries) \
- V(ExternalMemory)
+ V(ExternalMemory) \
+ V(StringChars) \
+ V(TypedArrayElements)
#define DECLARE_ABSTRACT_INSTRUCTION(type) \
@@ -231,6 +224,9 @@ class LChunkBuilder;
}
+enum PropertyAccessType { LOAD, STORE };
+
+
class Range V8_FINAL : public ZoneObject {
public:
Range()
@@ -480,23 +476,28 @@ class HUseIterator V8_FINAL BASE_EMBEDDED {
};
-// There must be one corresponding kDepends flag for every kChanges flag and
-// the order of the kChanges flags must be exactly the same as of the kDepends
-// flags. All tracked flags should appear before untracked ones.
+// All tracked flags should appear before untracked ones.
enum GVNFlag {
// Declare global value numbering flags.
-#define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
+#define DECLARE_FLAG(Type) k##Type,
GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
#undef DECLARE_FLAG
- kAfterLastFlag,
- kLastFlag = kAfterLastFlag - 1,
-#define COUNT_FLAG(type) + 1
- kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG)
+#define COUNT_FLAG(Type) + 1
+ kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG),
+ kNumberOfUntrackedSideEffects = 0 GVN_UNTRACKED_FLAG_LIST(COUNT_FLAG),
#undef COUNT_FLAG
+ kNumberOfFlags = kNumberOfTrackedSideEffects + kNumberOfUntrackedSideEffects
};
+static inline GVNFlag GVNFlagFromInt(int i) {
+ ASSERT(i >= 0);
+ ASSERT(i < kNumberOfFlags);
+ return static_cast<GVNFlag>(i);
+}
+
+
class DecompositionResult V8_FINAL BASE_EMBEDDED {
public:
DecompositionResult() : base_(NULL), offset_(0), scale_(0) {}
@@ -542,7 +543,62 @@ class DecompositionResult V8_FINAL BASE_EMBEDDED {
};
-typedef EnumSet<GVNFlag> GVNFlagSet;
+typedef EnumSet<GVNFlag, int32_t> GVNFlagSet;
+
+
+// This class encapsulates encoding and decoding of sources positions from
+// which hydrogen values originated.
+// When FLAG_track_hydrogen_positions is set this object encodes the
+// identifier of the inlining and absolute offset from the start of the
+// inlined function.
+// When the flag is not set we simply track absolute offset from the
+// script start.
+class HSourcePosition {
+ public:
+ HSourcePosition(const HSourcePosition& other) : value_(other.value_) { }
+
+ static HSourcePosition Unknown() {
+ return HSourcePosition(RelocInfo::kNoPosition);
+ }
+
+ bool IsUnknown() const { return value_ == RelocInfo::kNoPosition; }
+
+ int position() const { return PositionField::decode(value_); }
+ void set_position(int position) {
+ if (FLAG_hydrogen_track_positions) {
+ value_ = static_cast<int>(PositionField::update(value_, position));
+ } else {
+ value_ = position;
+ }
+ }
+
+ int inlining_id() const { return InliningIdField::decode(value_); }
+ void set_inlining_id(int inlining_id) {
+ if (FLAG_hydrogen_track_positions) {
+ value_ = static_cast<int>(InliningIdField::update(value_, inlining_id));
+ }
+ }
+
+ int raw() const { return value_; }
+
+ void PrintTo(FILE* f);
+
+ private:
+ typedef BitField<int, 0, 9> InliningIdField;
+
+ // Offset from the start of the inlined function.
+ typedef BitField<int, 9, 22> PositionField;
+
+ // On HPositionInfo can use this constructor.
+ explicit HSourcePosition(int value) : value_(value) { }
+
+ friend class HPositionInfo;
+
+ // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
+ // and PositionField.
+ // Otherwise contains absolute offset from the script start.
+ int value_;
+};
class HValue : public ZoneObject {
@@ -574,7 +630,7 @@ class HValue : public ZoneObject {
kIsDead,
// Instructions that are allowed to produce full range unsigned integer
// values are marked with kUint32 flag. If arithmetic shift or a load from
- // EXTERNAL_UNSIGNED_INT_ELEMENTS array is not marked with this flag
+ // EXTERNAL_UINT32_ELEMENTS array is not marked with this flag
// it will deoptimize if result does not fit into signed integer range.
// HGraph::ComputeSafeUint32Operations is responsible for setting this
// flag.
@@ -593,18 +649,6 @@ class HValue : public ZoneObject {
STATIC_ASSERT(kLastFlag < kBitsPerInt);
- static const int kChangesToDependsFlagsLeftShift = 1;
-
- static GVNFlag ChangesFlagFromInt(int x) {
- return static_cast<GVNFlag>(x * 2);
- }
- static GVNFlag DependsOnFlagFromInt(int x) {
- return static_cast<GVNFlag>(x * 2 + 1);
- }
- static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
- return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
- }
-
static HValue* cast(HValue* value) { return value; }
enum Opcode {
@@ -638,11 +682,15 @@ class HValue : public ZoneObject {
flags_(0) {}
virtual ~HValue() {}
- virtual int position() const { return RelocInfo::kNoPosition; }
+ virtual HSourcePosition position() const {
+ return HSourcePosition::Unknown();
+ }
+ virtual HSourcePosition operand_position(int index) const {
+ return position();
+ }
HBasicBlock* block() const { return block_; }
void SetBlock(HBasicBlock* block);
- int LoopWeight() const;
// Note: Never call this method for an unlinked value.
Isolate* isolate() const;
@@ -780,43 +828,38 @@ class HValue : public ZoneObject {
// of uses is non-empty.
bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const;
- GVNFlagSet gvn_flags() const { return gvn_flags_; }
- void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
- void ClearGVNFlag(GVNFlag f) { gvn_flags_.Remove(f); }
- bool CheckGVNFlag(GVNFlag f) const { return gvn_flags_.Contains(f); }
- void SetAllSideEffects() { gvn_flags_.Add(AllSideEffectsFlagSet()); }
+ GVNFlagSet ChangesFlags() const { return changes_flags_; }
+ GVNFlagSet DependsOnFlags() const { return depends_on_flags_; }
+ void SetChangesFlag(GVNFlag f) { changes_flags_.Add(f); }
+ void SetDependsOnFlag(GVNFlag f) { depends_on_flags_.Add(f); }
+ void ClearChangesFlag(GVNFlag f) { changes_flags_.Remove(f); }
+ void ClearDependsOnFlag(GVNFlag f) { depends_on_flags_.Remove(f); }
+ bool CheckChangesFlag(GVNFlag f) const {
+ return changes_flags_.Contains(f);
+ }
+ bool CheckDependsOnFlag(GVNFlag f) const {
+ return depends_on_flags_.Contains(f);
+ }
+ void SetAllSideEffects() { changes_flags_.Add(AllSideEffectsFlagSet()); }
void ClearAllSideEffects() {
- gvn_flags_.Remove(AllSideEffectsFlagSet());
+ changes_flags_.Remove(AllSideEffectsFlagSet());
}
bool HasSideEffects() const {
- return gvn_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
+ return changes_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
}
bool HasObservableSideEffects() const {
return !CheckFlag(kHasNoObservableSideEffects) &&
- gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
- }
-
- GVNFlagSet DependsOnFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllDependsOnFlagSet());
- return result;
+ changes_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
}
GVNFlagSet SideEffectFlags() const {
- GVNFlagSet result = gvn_flags_;
+ GVNFlagSet result = ChangesFlags();
result.Intersect(AllSideEffectsFlagSet());
return result;
}
- GVNFlagSet ChangesFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllChangesFlagSet());
- return result;
- }
-
GVNFlagSet ObservableChangesFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllChangesFlagSet());
+ GVNFlagSet result = ChangesFlags();
result.Intersect(AllObservableSideEffectsFlagSet());
return result;
}
@@ -882,9 +925,11 @@ class HValue : public ZoneObject {
// This function must be overridden for instructions which have the
// kTrackSideEffectDominators flag set, to track instructions that are
// dominating side effects.
- virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ // It returns true if it removed an instruction which had side effects.
+ virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
UNREACHABLE();
+ return false;
}
// Check if this instruction has some reason that prevents elimination.
@@ -955,20 +1000,9 @@ class HValue : public ZoneObject {
representation_ = r;
}
- static GVNFlagSet AllDependsOnFlagSet() {
+ static GVNFlagSet AllFlagSet() {
GVNFlagSet result;
- // Create changes mask.
-#define ADD_FLAG(type) result.Add(kDependsOn##type);
- GVN_TRACKED_FLAG_LIST(ADD_FLAG)
- GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
-#undef ADD_FLAG
- return result;
- }
-
- static GVNFlagSet AllChangesFlagSet() {
- GVNFlagSet result;
- // Create changes mask.
-#define ADD_FLAG(type) result.Add(kChanges##type);
+#define ADD_FLAG(Type) result.Add(k##Type);
GVN_TRACKED_FLAG_LIST(ADD_FLAG)
GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
#undef ADD_FLAG
@@ -977,19 +1011,19 @@ class HValue : public ZoneObject {
// A flag mask to mark an instruction as having arbitrary side effects.
static GVNFlagSet AllSideEffectsFlagSet() {
- GVNFlagSet result = AllChangesFlagSet();
- result.Remove(kChangesOsrEntries);
+ GVNFlagSet result = AllFlagSet();
+ result.Remove(kOsrEntries);
return result;
}
// A flag mask of all side effects that can make observable changes in
// an executing program (i.e. are not safe to repeat, move or remove);
static GVNFlagSet AllObservableSideEffectsFlagSet() {
- GVNFlagSet result = AllChangesFlagSet();
- result.Remove(kChangesNewSpacePromotion);
- result.Remove(kChangesElementsKind);
- result.Remove(kChangesElementsPointer);
- result.Remove(kChangesMaps);
+ GVNFlagSet result = AllFlagSet();
+ result.Remove(kNewSpacePromotion);
+ result.Remove(kElementsKind);
+ result.Remove(kElementsPointer);
+ result.Remove(kMaps);
return result;
}
@@ -1010,7 +1044,8 @@ class HValue : public ZoneObject {
HUseListNode* use_list_;
Range* range_;
int flags_;
- GVNFlagSet gvn_flags_;
+ GVNFlagSet changes_flags_;
+ GVNFlagSet depends_on_flags_;
private:
virtual bool IsDeletable() const { return false; }
@@ -1102,6 +1137,99 @@ class HValue : public ZoneObject {
}
+// A helper class to represent per-operand position information attached to
+// the HInstruction in the compact form. Uses tagging to distinguish between
+// case when only instruction's position is available and case when operands'
+// positions are also available.
+// In the first case it contains intruction's position as a tagged value.
+// In the second case it points to an array which contains instruction's
+// position and operands' positions.
+class HPositionInfo {
+ public:
+ explicit HPositionInfo(int pos) : data_(TagPosition(pos)) { }
+
+ HSourcePosition position() const {
+ if (has_operand_positions()) {
+ return operand_positions()[kInstructionPosIndex];
+ }
+ return HSourcePosition(static_cast<int>(UntagPosition(data_)));
+ }
+
+ void set_position(HSourcePosition pos) {
+ if (has_operand_positions()) {
+ operand_positions()[kInstructionPosIndex] = pos;
+ } else {
+ data_ = TagPosition(pos.raw());
+ }
+ }
+
+ void ensure_storage_for_operand_positions(Zone* zone, int operand_count) {
+ if (has_operand_positions()) {
+ return;
+ }
+
+ const int length = kFirstOperandPosIndex + operand_count;
+ HSourcePosition* positions =
+ zone->NewArray<HSourcePosition>(length);
+ for (int i = 0; i < length; i++) {
+ positions[i] = HSourcePosition::Unknown();
+ }
+
+ const HSourcePosition pos = position();
+ data_ = reinterpret_cast<intptr_t>(positions);
+ set_position(pos);
+
+ ASSERT(has_operand_positions());
+ }
+
+ HSourcePosition operand_position(int idx) const {
+ if (!has_operand_positions()) {
+ return position();
+ }
+ return *operand_position_slot(idx);
+ }
+
+ void set_operand_position(int idx, HSourcePosition pos) {
+ *operand_position_slot(idx) = pos;
+ }
+
+ private:
+ static const intptr_t kInstructionPosIndex = 0;
+ static const intptr_t kFirstOperandPosIndex = 1;
+
+ HSourcePosition* operand_position_slot(int idx) const {
+ ASSERT(has_operand_positions());
+ return &(operand_positions()[kFirstOperandPosIndex + idx]);
+ }
+
+ bool has_operand_positions() const {
+ return !IsTaggedPosition(data_);
+ }
+
+ HSourcePosition* operand_positions() const {
+ ASSERT(has_operand_positions());
+ return reinterpret_cast<HSourcePosition*>(data_);
+ }
+
+ static const intptr_t kPositionTag = 1;
+ static const intptr_t kPositionShift = 1;
+ static bool IsTaggedPosition(intptr_t val) {
+ return (val & kPositionTag) != 0;
+ }
+ static intptr_t UntagPosition(intptr_t val) {
+ ASSERT(IsTaggedPosition(val));
+ return val >> kPositionShift;
+ }
+ static intptr_t TagPosition(intptr_t val) {
+ const intptr_t result = (val << kPositionShift) | kPositionTag;
+ ASSERT(UntagPosition(result) == val);
+ return result;
+ }
+
+ intptr_t data_;
+};
+
+
class HInstruction : public HValue {
public:
HInstruction* next() const { return next_; }
@@ -1112,16 +1240,42 @@ class HInstruction : public HValue {
bool IsLinked() const { return block() != NULL; }
void Unlink();
+
void InsertBefore(HInstruction* next);
+
+ template<class T> T* Prepend(T* instr) {
+ instr->InsertBefore(this);
+ return instr;
+ }
+
void InsertAfter(HInstruction* previous);
+ template<class T> T* Append(T* instr) {
+ instr->InsertAfter(this);
+ return instr;
+ }
+
// The position is a write-once variable.
- virtual int position() const V8_OVERRIDE { return position_; }
- bool has_position() const { return position_ != RelocInfo::kNoPosition; }
- void set_position(int position) {
+ virtual HSourcePosition position() const V8_OVERRIDE {
+ return HSourcePosition(position_.position());
+ }
+ bool has_position() const {
+ return !position().IsUnknown();
+ }
+ void set_position(HSourcePosition position) {
ASSERT(!has_position());
- ASSERT(position != RelocInfo::kNoPosition);
- position_ = position;
+ ASSERT(!position.IsUnknown());
+ position_.set_position(position);
+ }
+
+ virtual HSourcePosition operand_position(int index) const V8_OVERRIDE {
+ const HSourcePosition pos = position_.operand_position(index);
+ return pos.IsUnknown() ? position() : pos;
+ }
+ void set_operand_position(Zone* zone, int index, HSourcePosition pos) {
+ ASSERT(0 <= index && index < OperandCount());
+ position_.ensure_storage_for_operand_positions(zone, OperandCount());
+ position_.set_operand_position(index, pos);
}
bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
@@ -1132,7 +1286,7 @@ class HInstruction : public HValue {
virtual void Verify() V8_OVERRIDE;
#endif
- virtual bool IsCall() { return false; }
+ virtual bool HasStackCheck() { return false; }
DECLARE_ABSTRACT_INSTRUCTION(Instruction)
@@ -1142,7 +1296,7 @@ class HInstruction : public HValue {
next_(NULL),
previous_(NULL),
position_(RelocInfo::kNoPosition) {
- SetGVNFlag(kDependsOnOsrEntries);
+ SetDependsOnFlag(kOsrEntries);
}
virtual void DeleteFromGraph() V8_OVERRIDE { Unlink(); }
@@ -1157,7 +1311,7 @@ class HInstruction : public HValue {
HInstruction* next_;
HInstruction* previous_;
- int position_;
+ HPositionInfo position_;
friend class HBasicBlock;
};
@@ -1321,11 +1475,11 @@ class HGoto V8_FINAL : public HTemplateControlInstruction<1, 0> {
class HDeoptimize V8_FINAL : public HTemplateControlInstruction<1, 0> {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- const char* reason,
- Deoptimizer::BailoutType type,
- HBasicBlock* unreachable_continuation) {
+ static HDeoptimize* New(Zone* zone,
+ HValue* context,
+ const char* reason,
+ Deoptimizer::BailoutType type,
+ HBasicBlock* unreachable_continuation) {
return new(zone) HDeoptimize(reason, type, unreachable_continuation);
}
@@ -1410,18 +1564,39 @@ class HBranch V8_FINAL : public HUnaryControlInstruction {
class HCompareMap V8_FINAL : public HUnaryControlInstruction {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HCompareMap, HValue*, Handle<Map>);
- DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>,
+ DECLARE_INSTRUCTION_FACTORY_P3(HCompareMap, HValue*, Handle<Map>,
+ CompilationInfo*);
+ DECLARE_INSTRUCTION_FACTORY_P5(HCompareMap, HValue*, Handle<Map>,
+ CompilationInfo*,
HBasicBlock*, HBasicBlock*);
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+ if (known_successor_index() != kNoKnownSuccessorIndex) {
+ *block = SuccessorAt(known_successor_index());
+ return true;
+ }
+ *block = NULL;
+ return false;
+ }
+
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ static const int kNoKnownSuccessorIndex = -1;
+ int known_successor_index() const { return known_successor_index_; }
+ void set_known_successor_index(int known_successor_index) {
+ known_successor_index_ = known_successor_index;
+ }
+
Unique<Map> map() const { return map_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
+ bool is_stable() const {
+ return is_stable_;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CompareMap)
protected:
@@ -1430,13 +1605,22 @@ class HCompareMap V8_FINAL : public HUnaryControlInstruction {
private:
HCompareMap(HValue* value,
Handle<Map> map,
+ CompilationInfo* info,
HBasicBlock* true_target = NULL,
HBasicBlock* false_target = NULL)
: HUnaryControlInstruction(value, true_target, false_target),
- map_(Unique<Map>(map)) {
+ known_successor_index_(kNoKnownSuccessorIndex), map_(Unique<Map>(map)) {
ASSERT(!map.is_null());
+ is_stable_ = map->is_stable();
+
+ if (is_stable_) {
+ map->AddDependentCompilationInfo(
+ DependentCode::kPrototypeCheckGroup, info);
+ }
}
+ int known_successor_index_;
+ bool is_stable_;
Unique<Map> map_;
};
@@ -1524,28 +1708,6 @@ class HUnaryOperation : public HTemplateInstruction<1> {
};
-class HThrow V8_FINAL : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HThrow, HValue*);
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw)
-
- private:
- HThrow(HValue* context, HValue* value) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- SetAllSideEffects();
- }
-};
-
-
class HUseConst V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HUseConst, HValue*);
@@ -1563,7 +1725,8 @@ class HUseConst V8_FINAL : public HUnaryOperation {
class HForceRepresentation V8_FINAL : public HTemplateInstruction<1> {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HForceRepresentation, HValue*, Representation);
+ static HInstruction* New(Zone* zone, HValue* context, HValue* value,
+ Representation required_representation);
HValue* value() { return OperandAt(0); }
@@ -1607,7 +1770,7 @@ class HChange V8_FINAL : public HUnaryOperation {
set_type(HType::Smi());
} else {
set_type(HType::TaggedNumber());
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
}
@@ -1853,7 +2016,7 @@ class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
private:
HStackCheck(HValue* context, Type type) : type_(type) {
SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
Type type_;
@@ -1861,8 +2024,7 @@ class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
enum InliningKind {
- NORMAL_RETURN, // Normal function/method call and return.
- DROP_EXTRA_ON_RETURN, // Drop an extra value from the environment on return.
+ NORMAL_RETURN, // Drop the function from the environment on return.
CONSTRUCT_CALL_RETURN, // Either use allocated receiver or return value.
GETTER_CALL_RETURN, // Returning from a getter, need to restore context.
SETTER_CALL_RETURN // Use the RHS of the assignment as the return value.
@@ -1881,11 +2043,10 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
FunctionLiteral* function,
InliningKind inlining_kind,
Variable* arguments_var,
- HArgumentsObject* arguments_object,
- bool undefined_receiver) {
+ HArgumentsObject* arguments_object) {
return new(zone) HEnterInlined(closure, arguments_count, function,
inlining_kind, arguments_var,
- arguments_object, undefined_receiver, zone);
+ arguments_object, zone);
}
void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
@@ -1899,7 +2060,6 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
void set_arguments_pushed() { arguments_pushed_ = true; }
FunctionLiteral* function() const { return function_; }
InliningKind inlining_kind() const { return inlining_kind_; }
- bool undefined_receiver() const { return undefined_receiver_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
@@ -1917,7 +2077,6 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
InliningKind inlining_kind,
Variable* arguments_var,
HArgumentsObject* arguments_object,
- bool undefined_receiver,
Zone* zone)
: closure_(closure),
arguments_count_(arguments_count),
@@ -1926,7 +2085,6 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
inlining_kind_(inlining_kind),
arguments_var_(arguments_var),
arguments_object_(arguments_object),
- undefined_receiver_(undefined_receiver),
return_targets_(2, zone) {
}
@@ -1937,7 +2095,6 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
InliningKind inlining_kind_;
Variable* arguments_var_;
HArgumentsObject* arguments_object_;
- bool undefined_receiver_;
ZoneList<HBasicBlock*> return_targets_;
};
@@ -2008,29 +2165,6 @@ class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
};
-class HOuterContext V8_FINAL : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HOuterContext, HValue*);
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext);
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
-
- private:
- explicit HOuterContext(HValue* inner) : HUnaryOperation(inner) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
-};
-
-
class HDeclareGlobals V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HDeclareGlobals,
@@ -2063,53 +2197,6 @@ class HDeclareGlobals V8_FINAL : public HUnaryOperation {
};
-class HGlobalObject V8_FINAL : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(HGlobalObject);
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
-
- private:
- explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
-};
-
-
-class HGlobalReceiver V8_FINAL : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HGlobalReceiver, HValue*);
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
-
- private:
- explicit HGlobalReceiver(HValue* global_object)
- : HUnaryOperation(global_object) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
-};
-
-
template <int V>
class HCall : public HTemplateInstruction<V> {
public:
@@ -2131,8 +2218,6 @@ class HCall : public HTemplateInstruction<V> {
return -argument_count();
}
- virtual bool IsCall() V8_FINAL V8_OVERRIDE { return true; }
-
private:
int argument_count_;
};
@@ -2176,174 +2261,208 @@ class HBinaryCall : public HCall<2> {
};
-class HInvokeFunction V8_FINAL : public HBinaryCall {
+class HCallJSFunction V8_FINAL : public HCall<1> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
-
- HInvokeFunction(HValue* context,
- HValue* function,
- Handle<JSFunction> known_function,
- int argument_count)
- : HBinaryCall(context, function, argument_count),
- known_function_(known_function) {
- formal_parameter_count_ = known_function.is_null()
- ? 0 : known_function->shared()->formal_parameter_count();
- }
-
- static HInvokeFunction* New(Zone* zone,
+ static HCallJSFunction* New(Zone* zone,
HValue* context,
HValue* function,
- Handle<JSFunction> known_function,
- int argument_count) {
- return new(zone) HInvokeFunction(context, function,
- known_function, argument_count);
+ int argument_count,
+ bool pass_argument_count);
+
+ HValue* function() { return OperandAt(0); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
+ ASSERT(index == 0);
+ return Representation::Tagged();
}
- HValue* context() { return first(); }
- HValue* function() { return second(); }
- Handle<JSFunction> known_function() { return known_function_; }
- int formal_parameter_count() const { return formal_parameter_count_; }
+ bool pass_argument_count() const { return pass_argument_count_; }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
+ virtual bool HasStackCheck() V8_FINAL V8_OVERRIDE {
+ return has_stack_check_;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction)
private:
- HInvokeFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count) {
+ // The argument count includes the receiver.
+ HCallJSFunction(HValue* function,
+ int argument_count,
+ bool pass_argument_count,
+ bool has_stack_check)
+ : HCall<1>(argument_count),
+ pass_argument_count_(pass_argument_count),
+ has_stack_check_(has_stack_check) {
+ SetOperandAt(0, function);
}
- Handle<JSFunction> known_function_;
- int formal_parameter_count_;
+ bool pass_argument_count_;
+ bool has_stack_check_;
};
-class HCallConstantFunction V8_FINAL : public HCall<0> {
+class HCallWithDescriptor V8_FINAL : public HInstruction {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HCallConstantFunction,
- Handle<JSFunction>,
- int);
-
- Handle<JSFunction> function() const { return function_; }
- int formal_parameter_count() const { return formal_parameter_count_; }
-
- bool IsApplyFunction() const {
- return function_->code() ==
- function_->GetIsolate()->builtins()->builtin(Builtins::kFunctionApply);
+ static HCallWithDescriptor* New(Zone* zone, HValue* context,
+ HValue* target,
+ int argument_count,
+ const CallInterfaceDescriptor* descriptor,
+ Vector<HValue*>& operands) {
+ ASSERT(operands.length() == descriptor->environment_length());
+ HCallWithDescriptor* res =
+ new(zone) HCallWithDescriptor(target, argument_count,
+ descriptor, operands, zone);
+ return res;
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::None();
+ virtual int OperandCount() V8_FINAL V8_OVERRIDE { return values_.length(); }
+ virtual HValue* OperandAt(int index) const V8_FINAL V8_OVERRIDE {
+ return values_[index];
}
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction)
-
- private:
- HCallConstantFunction(Handle<JSFunction> function, int argument_count)
- : HCall<0>(argument_count),
- function_(function),
- formal_parameter_count_(function->shared()->formal_parameter_count()) {}
-
- Handle<JSFunction> function_;
- int formal_parameter_count_;
-};
-
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
+ if (index == 0) {
+ return Representation::Tagged();
+ } else {
+ int par_index = index - 1;
+ ASSERT(par_index < descriptor_->environment_length());
+ return descriptor_->GetParameterRepresentation(par_index);
+ }
+ }
-class HCallKeyed V8_FINAL : public HBinaryCall {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallKeyed, HValue*, int);
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor)
- HValue* context() { return first(); }
- HValue* key() { return second(); }
+ virtual HType CalculateInferredType() V8_FINAL V8_OVERRIDE {
+ return HType::Tagged();
+ }
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed)
+ virtual int argument_count() const {
+ return argument_count_;
+ }
- private:
- HCallKeyed(HValue* context, HValue* key, int argument_count)
- : HBinaryCall(context, key, argument_count) {
+ virtual int argument_delta() const V8_OVERRIDE {
+ return -argument_count_;
}
-};
+ const CallInterfaceDescriptor* descriptor() const {
+ return descriptor_;
+ }
-class HCallNamed V8_FINAL : public HUnaryCall {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNamed, Handle<String>, int);
+ HValue* target() {
+ return OperandAt(0);
+ }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- HValue* context() { return value(); }
- Handle<String> name() const { return name_; }
+ private:
+ // The argument count includes the receiver.
+ HCallWithDescriptor(HValue* target,
+ int argument_count,
+ const CallInterfaceDescriptor* descriptor,
+ Vector<HValue*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ values_(descriptor->environment_length() + 1, zone) {
+ argument_count_ = argument_count;
+ AddOperand(target, zone);
+ for (int i = 0; i < operands.length(); i++) {
+ AddOperand(operands[i], zone);
+ }
+ this->set_representation(Representation::Tagged());
+ this->SetAllSideEffects();
+ }
- DECLARE_CONCRETE_INSTRUCTION(CallNamed)
+ void AddOperand(HValue* v, Zone* zone) {
+ values_.Add(NULL, zone);
+ SetOperandAt(values_.length() - 1, v);
+ }
- private:
- HCallNamed(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
+ void InternalSetOperandAt(int index,
+ HValue* value) V8_FINAL V8_OVERRIDE {
+ values_[index] = value;
}
- Handle<String> name_;
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<HValue*> values_;
+ int argument_count_;
};
-class HCallFunction V8_FINAL : public HBinaryCall {
+class HInvokeFunction V8_FINAL : public HBinaryCall {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int);
-
- HValue* context() { return first(); }
- HValue* function() { return second(); }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction)
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
- private:
- HCallFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count) {
+ HInvokeFunction(HValue* context,
+ HValue* function,
+ Handle<JSFunction> known_function,
+ int argument_count)
+ : HBinaryCall(context, function, argument_count),
+ known_function_(known_function) {
+ formal_parameter_count_ = known_function.is_null()
+ ? 0 : known_function->shared()->formal_parameter_count();
+ has_stack_check_ = !known_function.is_null() &&
+ (known_function->code()->kind() == Code::FUNCTION ||
+ known_function->code()->kind() == Code::OPTIMIZED_FUNCTION);
}
-};
-
-class HCallGlobal V8_FINAL : public HUnaryCall {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallGlobal, Handle<String>, int);
+ static HInvokeFunction* New(Zone* zone,
+ HValue* context,
+ HValue* function,
+ Handle<JSFunction> known_function,
+ int argument_count) {
+ return new(zone) HInvokeFunction(context, function,
+ known_function, argument_count);
+ }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ HValue* context() { return first(); }
+ HValue* function() { return second(); }
+ Handle<JSFunction> known_function() { return known_function_; }
+ int formal_parameter_count() const { return formal_parameter_count_; }
- HValue* context() { return value(); }
- Handle<String> name() const { return name_; }
+ virtual bool HasStackCheck() V8_FINAL V8_OVERRIDE {
+ return has_stack_check_;
+ }
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
private:
- HCallGlobal(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
+ HInvokeFunction(HValue* context, HValue* function, int argument_count)
+ : HBinaryCall(context, function, argument_count),
+ has_stack_check_(false) {
}
- Handle<String> name_;
+ Handle<JSFunction> known_function_;
+ int formal_parameter_count_;
+ bool has_stack_check_;
};
-class HCallKnownGlobal V8_FINAL : public HCall<0> {
+class HCallFunction V8_FINAL : public HBinaryCall {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HCallKnownGlobal, Handle<JSFunction>, int);
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(
+ HCallFunction, HValue*, int, CallFunctionFlags);
- Handle<JSFunction> target() const { return target_; }
- int formal_parameter_count() const { return formal_parameter_count_; }
+ HValue* context() { return first(); }
+ HValue* function() { return second(); }
+ CallFunctionFlags function_flags() const { return function_flags_; }
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::None();
- }
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction)
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal)
+ virtual int argument_delta() const V8_OVERRIDE { return -argument_count(); }
private:
- HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
- : HCall<0>(argument_count),
- target_(target),
- formal_parameter_count_(target->shared()->formal_parameter_count()) { }
-
- Handle<JSFunction> target_;
- int formal_parameter_count_;
+ HCallFunction(HValue* context,
+ HValue* function,
+ int argument_count,
+ CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS)
+ : HBinaryCall(context, function, argument_count), function_flags_(flags) {
+ }
+ CallFunctionFlags function_flags_;
};
@@ -2364,10 +2483,9 @@ class HCallNew V8_FINAL : public HBinaryCall {
class HCallNewArray V8_FINAL : public HBinaryCall {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HCallNewArray,
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallNewArray,
HValue*,
int,
- Handle<Cell>,
ElementsKind);
HValue* context() { return first(); }
@@ -2375,23 +2493,17 @@ class HCallNewArray V8_FINAL : public HBinaryCall {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Cell> property_cell() const {
- return type_cell_;
- }
-
ElementsKind elements_kind() const { return elements_kind_; }
DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
private:
HCallNewArray(HValue* context, HValue* constructor, int argument_count,
- Handle<Cell> type_cell, ElementsKind elements_kind)
+ ElementsKind elements_kind)
: HBinaryCall(context, constructor, argument_count),
- elements_kind_(elements_kind),
- type_cell_(type_cell) {}
+ elements_kind_(elements_kind) {}
ElementsKind elements_kind_;
- Handle<Cell> type_cell_;
};
@@ -2452,35 +2564,13 @@ class HMapEnumLength V8_FINAL : public HUnaryOperation {
: HUnaryOperation(value, HType::Smi()) {
set_representation(Representation::Smi());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
+ SetDependsOnFlag(kMaps);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HElementsKind V8_FINAL : public HUnaryOperation {
- public:
- explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnElementsKind);
- }
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind)
-
- protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
-
- private:
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
-};
-
-
class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
public:
static HInstruction* New(Zone* zone,
@@ -2507,9 +2597,6 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
case kMathPowHalf:
case kMathLog:
case kMathExp:
- case kMathSin:
- case kMathCos:
- case kMathTan:
return Representation::Double();
case kMathAbs:
return representation();
@@ -2551,16 +2638,9 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
SetFlag(kFlexibleRepresentation);
// TODO(svenpanne) This flag is actually only needed if representation()
// is tagged, and not when it is an unboxed double or unboxed integer.
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
break;
case kMathLog:
- case kMathSin:
- case kMathCos:
- case kMathTan:
- set_representation(Representation::Double());
- // These operations use the TranscendentalCache, so they may allocate.
- SetGVNFlag(kChangesNewSpacePromotion);
- break;
case kMathExp:
case kMathSqrt:
case kMathPowHalf:
@@ -2575,6 +2655,9 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ HValue* SimplifiedDividendForMathFloorOfDiv(HDiv* hdiv);
+ HValue* SimplifiedDivisorForMathFloorOfDiv(HDiv* hdiv);
+
BuiltinFunctionId op_;
};
@@ -2604,7 +2687,7 @@ class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
SetFlag(kUseGVN);
// TODO(bmeurer): We'll need kDependsOnRoots once we add the
// corresponding HStoreRoot instruction.
- SetGVNFlag(kDependsOnCalls);
+ SetDependsOnFlag(kCalls);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -2613,49 +2696,18 @@ class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
};
-class HLoadExternalArrayPointer V8_FINAL : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HLoadExternalArrayPointer, HValue*);
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType() V8_OVERRIDE {
- return HType::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer)
-
- protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
-
- private:
- explicit HLoadExternalArrayPointer(HValue* value)
- : HUnaryOperation(value) {
- set_representation(Representation::External());
- // The result of this instruction is idempotent as long as its inputs don't
- // change. The external array of a specialized array elements object cannot
- // change once set, so it's no necessary to introduce any additional
- // dependencies on top of the inputs.
- SetFlag(kUseGVN);
- }
-
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
-};
-
-
class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
public:
static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
Handle<Map> map, CompilationInfo* info,
- HValue *typecheck = NULL);
+ HValue* typecheck = NULL);
static HCheckMaps* New(Zone* zone, HValue* context,
HValue* value, SmallMapList* maps,
- HValue *typecheck = NULL) {
+ CompilationInfo* info,
+ HValue* typecheck = NULL) {
HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
for (int i = 0; i < maps->length(); i++) {
- check_map->Add(maps->at(i), zone);
+ check_map->Add(maps->at(i), info, zone);
}
return check_map;
}
@@ -2666,19 +2718,31 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE;
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
+ HValue* typecheck() { return OperandAt(1); }
Unique<Map> first_map() const { return map_set_.at(0); }
UniqueSet<Map> map_set() const { return map_set_; }
+ void set_map_set(UniqueSet<Map>* maps, Zone *zone) {
+ map_set_.Clear();
+ for (int i = 0; i < maps->size(); i++) {
+ map_set_.Add(maps->at(i), zone);
+ }
+ }
+
bool has_migration_target() const {
return has_migration_target_;
}
+ bool is_stable() const {
+ return is_stable_;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
@@ -2689,30 +2753,38 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
virtual int RedefinedOperandIndex() { return 0; }
private:
- void Add(Handle<Map> map, Zone* zone) {
+ void Add(Handle<Map> map, CompilationInfo* info, Zone* zone) {
map_set_.Add(Unique<Map>(map), zone);
+ is_stable_ = is_stable_ && map->is_stable();
+ if (is_stable_) {
+ map->AddDependentCompilationInfo(
+ DependentCode::kPrototypeCheckGroup, info);
+ } else {
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kElementsKind);
+ }
+
if (!has_migration_target_ && map->is_migration_target()) {
has_migration_target_ = true;
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
}
// Clients should use one of the static New* methods above.
HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
: HTemplateInstruction<2>(value->type()),
- omit_(false), has_migration_target_(false) {
+ omit_(false), has_migration_target_(false), is_stable_(true) {
SetOperandAt(0, value);
// Use the object value for the dependency if NULL is passed.
SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
}
bool omit_;
bool has_migration_target_;
+ bool is_stable_;
UniqueSet<Map> map_set_;
};
@@ -3147,7 +3219,7 @@ class HPhi V8_FINAL : public HValue {
bool IsReceiver() const { return merged_index_ == 0; }
bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
- virtual int position() const V8_OVERRIDE;
+ virtual HSourcePosition position() const V8_OVERRIDE;
int merged_index() const { return merged_index_; }
@@ -3312,7 +3384,7 @@ class HCapturedObject V8_FINAL : public HDematerializedObject {
void ReuseSideEffectsFromStore(HInstruction* store) {
ASSERT(store->HasObservableSideEffects());
ASSERT(store->IsStoreNamedField());
- gvn_flags_.Add(store->gvn_flags());
+ changes_flags_.Add(store->ChangesFlags());
}
// Replay effects of this instruction on the given environment.
@@ -3345,10 +3417,8 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
int32_t value,
Representation representation,
HInstruction* instruction) {
- HConstant* new_constant =
- HConstant::New(zone, context, value, representation);
- new_constant->InsertAfter(instruction);
- return new_constant;
+ return instruction->Append(HConstant::New(
+ zone, context, value, representation));
}
static HConstant* CreateAndInsertBefore(Zone* zone,
@@ -3356,21 +3426,17 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
int32_t value,
Representation representation,
HInstruction* instruction) {
- HConstant* new_constant =
- HConstant::New(zone, context, value, representation);
- new_constant->InsertBefore(instruction);
- return new_constant;
+ return instruction->Prepend(HConstant::New(
+ zone, context, value, representation));
}
static HConstant* CreateAndInsertBefore(Zone* zone,
Unique<Object> unique,
bool is_not_in_new_space,
HInstruction* instruction) {
- HConstant* new_constant = new(zone) HConstant(unique,
- Representation::Tagged(), HType::Tagged(), false, is_not_in_new_space,
- false, false);
- new_constant->InsertBefore(instruction);
- return new_constant;
+ return instruction->Prepend(new(zone) HConstant(
+ unique, Representation::Tagged(), HType::Tagged(), false,
+ is_not_in_new_space, false, false));
}
Handle<Object> handle(Isolate* isolate) {
@@ -3402,33 +3468,7 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return is_not_in_new_space_;
}
- bool ImmortalImmovable() const {
- if (has_int32_value_) {
- return false;
- }
- if (has_double_value_) {
- if (IsSpecialDouble()) {
- return true;
- }
- return false;
- }
- if (has_external_reference_value_) {
- return false;
- }
-
- ASSERT(!object_.handle().is_null());
- Heap* heap = isolate()->heap();
- ASSERT(!object_.IsKnownGlobal(heap->minus_zero_value()));
- ASSERT(!object_.IsKnownGlobal(heap->nan_value()));
- return
- object_.IsKnownGlobal(heap->undefined_value()) ||
- object_.IsKnownGlobal(heap->null_value()) ||
- object_.IsKnownGlobal(heap->true_value()) ||
- object_.IsKnownGlobal(heap->false_value()) ||
- object_.IsKnownGlobal(heap->the_hole_value()) ||
- object_.IsKnownGlobal(heap->empty_string()) ||
- object_.IsKnownGlobal(heap->empty_fixed_array());
- }
+ bool ImmortalImmovable() const;
bool IsCell() const {
return is_cell_;
@@ -3684,6 +3724,13 @@ class HBinaryOperation : public HTemplateInstruction<3> {
return representation();
}
+ void SetOperandPositions(Zone* zone,
+ HSourcePosition left_pos,
+ HSourcePosition right_pos) {
+ set_operand_position(zone, 1, left_pos);
+ set_operand_position(zone, 2, right_pos);
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
private:
@@ -3698,6 +3745,8 @@ class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HWrapReceiver, HValue*, HValue*);
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -3708,15 +3757,21 @@ class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> {
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ bool known_function() const { return known_function_; }
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
private:
HWrapReceiver(HValue* receiver, HValue* function) {
+ known_function_ = function->IsConstant() &&
+ HConstant::cast(function)->handle(function->isolate())->IsJSFunction();
set_representation(Representation::Tagged());
SetOperandAt(0, receiver);
SetOperandAt(1, function);
+ SetFlag(kUseGVN);
}
+
+ bool known_function_;
};
@@ -3893,6 +3948,8 @@ class HBoundsCheck V8_FINAL : public HTemplateInstruction<2> {
protected:
friend class HBoundsCheckBaseIndexInformation;
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
bool skip_check_;
HValue* base_;
@@ -3962,7 +4019,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
@@ -4039,7 +4096,7 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
@@ -4050,7 +4107,14 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
}
}
+ bool RightIsPowerOf2() {
+ if (!right()->IsInteger32Constant()) return false;
+ int32_t value = right()->GetInteger32Constant();
+ return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
+
private:
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -4117,6 +4181,13 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ void SetOperandPositions(Zone* zone,
+ HSourcePosition left_pos,
+ HSourcePosition right_pos) {
+ set_operand_position(zone, 0, left_pos);
+ set_operand_position(zone, 1, right_pos);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
private:
@@ -4165,6 +4236,28 @@ class HCompareHoleAndBranch V8_FINAL : public HUnaryControlInstruction {
};
+class HCompareMinusZeroAndBranch V8_FINAL : public HUnaryControlInstruction {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P1(HCompareMinusZeroAndBranch, HValue*);
+
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return representation();
+ }
+
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch)
+
+ private:
+ explicit HCompareMinusZeroAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) {
+ }
+};
+
+
class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
public:
HCompareObjectEqAndBranch(HValue* left,
@@ -4240,6 +4333,9 @@ class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
+ protected:
+ virtual int RedefinedOperandIndex() { return 0; }
+
private:
HIsStringAndBranch(HValue* value,
HBasicBlock* true_target = NULL,
@@ -4262,6 +4358,7 @@ class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual int RedefinedOperandIndex() { return 0; }
private:
HIsSmiAndBranch(HValue* value,
@@ -4326,7 +4423,7 @@ class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
SetOperandAt(1, left);
SetOperandAt(2, right);
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
Token::Value token_;
@@ -4445,20 +4542,27 @@ class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
Handle<String> type_literal() { return type_literal_; }
+ bool compares_number_type() { return compares_number_type_; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
+ return Representation::None();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
private:
HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
: HUnaryControlInstruction(value, NULL, NULL),
- type_literal_(type_literal) { }
+ type_literal_(type_literal) {
+ Heap* heap = type_literal->GetHeap();
+ compares_number_type_ = type_literal->Equals(heap->number_string());
+ }
Handle<String> type_literal_;
+ bool compares_number_type_ : 1;
};
@@ -4544,7 +4648,7 @@ class HPower V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, right);
set_representation(Representation::Double());
SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE {
@@ -4553,28 +4657,6 @@ class HPower V8_FINAL : public HTemplateInstruction<2> {
};
-class HRandom V8_FINAL : public HTemplateInstruction<1> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HRandom, HValue*);
-
- HValue* global_object() { return OperandAt(0); }
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Random)
-
- private:
- explicit HRandom(HValue* global_object) {
- SetOperandAt(0, global_object);
- set_representation(Representation::Double());
- }
-
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
-};
-
-
class HAdd V8_FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
@@ -4584,8 +4666,9 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
// Add is only commutative if two integer values are added and not if two
// tagged values are added (because it might be a String concatenation).
+ // We also do not commute (pointer + offset).
virtual bool IsCommutative() const V8_OVERRIDE {
- return !representation().IsTagged();
+ return !representation().IsTagged() && !representation().IsExternal();
}
virtual HValue* EnsureAndPropagateNotMinusZero(
@@ -4607,7 +4690,7 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
if (to.IsTagged()) {
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
ClearFlag(kAllowUndefinedAsNaN);
}
if (to.IsTagged() &&
@@ -4621,6 +4704,10 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
}
}
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(Add)
protected:
@@ -4730,20 +4817,7 @@ class HMod V8_FINAL : public HArithmeticBinaryOperation {
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
- HValue* right,
- Maybe<int> fixed_right_arg);
-
- Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
-
- bool HasPowerOf2Divisor() {
- if (right()->IsConstant() &&
- HConstant::cast(right())->HasInteger32Value()) {
- int32_t value = HConstant::cast(right())->Integer32Value();
- return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
- }
-
- return false;
- }
+ HValue* right);
virtual HValue* EnsureAndPropagateNotMinusZero(
BitVector* visited) V8_OVERRIDE;
@@ -4767,15 +4841,10 @@ class HMod V8_FINAL : public HArithmeticBinaryOperation {
private:
HMod(HValue* context,
HValue* left,
- HValue* right,
- Maybe<int> fixed_right_arg)
- : HArithmeticBinaryOperation(context, left, right),
- fixed_right_arg_(fixed_right_arg) {
+ HValue* right) : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
}
-
- const Maybe<int> fixed_right_arg_;
};
@@ -4786,15 +4855,6 @@ class HDiv V8_FINAL : public HArithmeticBinaryOperation {
HValue* left,
HValue* right);
- bool HasPowerOf2Divisor() {
- if (right()->IsInteger32Constant()) {
- int32_t value = right()->GetInteger32Constant();
- return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
- }
-
- return false;
- }
-
virtual HValue* EnsureAndPropagateNotMinusZero(
BitVector* visited) V8_OVERRIDE;
@@ -5087,8 +5147,8 @@ class HOsrEntry V8_FINAL : public HTemplateInstruction<0> {
private:
explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
- SetGVNFlag(kChangesOsrEntries);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kOsrEntries);
+ SetChangesFlag(kNewSpacePromotion);
}
BailoutId ast_id_;
@@ -5146,13 +5206,6 @@ class HCallStub V8_FINAL : public HUnaryCall {
HValue* context() { return value(); }
- void set_transcendental_type(TranscendentalCache::Type transcendental_type) {
- transcendental_type_ = transcendental_type;
- }
- TranscendentalCache::Type transcendental_type() {
- return transcendental_type_;
- }
-
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CallStub)
@@ -5160,12 +5213,10 @@ class HCallStub V8_FINAL : public HUnaryCall {
private:
HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
: HUnaryCall(context, argument_count),
- major_key_(major_key),
- transcendental_type_(TranscendentalCache::kNumberOfCaches) {
+ major_key_(major_key) {
}
CodeStub::Major major_key_;
- TranscendentalCache::Type transcendental_type_;
};
@@ -5239,7 +5290,7 @@ class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
: cell_(Unique<Cell>::CreateUninitialized(cell)), details_(details) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnGlobalVars);
+ SetDependsOnFlag(kGlobalVars);
}
virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
@@ -5287,14 +5338,22 @@ class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
class HAllocate V8_FINAL : public HTemplateInstruction<2> {
public:
+ static bool CompatibleInstanceTypes(InstanceType type1,
+ InstanceType type2) {
+ return ComputeFlags(TENURED, type1) == ComputeFlags(TENURED, type2) &&
+ ComputeFlags(NOT_TENURED, type1) == ComputeFlags(NOT_TENURED, type2);
+ }
+
static HAllocate* New(Zone* zone,
HValue* context,
HValue* size,
HType type,
PretenureFlag pretenure_flag,
- InstanceType instance_type) {
+ InstanceType instance_type,
+ Handle<AllocationSite> allocation_site =
+ Handle<AllocationSite>::null()) {
return new(zone) HAllocate(context, size, type, pretenure_flag,
- instance_type);
+ instance_type, allocation_site);
}
// Maximum instance size for which allocations will be inlined.
@@ -5343,11 +5402,15 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
flags_ = static_cast<HAllocate::Flags>(flags_ | PREFILL_WITH_FILLER);
}
+ bool MustClearNextMapWord() const {
+ return (flags_ & CLEAR_NEXT_MAP_WORD) != 0;
+ }
+
void MakeDoubleAligned() {
flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
}
- virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE;
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -5360,30 +5423,45 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
ALLOCATE_IN_OLD_DATA_SPACE = 1 << 1,
ALLOCATE_IN_OLD_POINTER_SPACE = 1 << 2,
ALLOCATE_DOUBLE_ALIGNED = 1 << 3,
- PREFILL_WITH_FILLER = 1 << 4
+ PREFILL_WITH_FILLER = 1 << 4,
+ CLEAR_NEXT_MAP_WORD = 1 << 5
};
HAllocate(HValue* context,
HValue* size,
HType type,
PretenureFlag pretenure_flag,
- InstanceType instance_type)
+ InstanceType instance_type,
+ Handle<AllocationSite> allocation_site =
+ Handle<AllocationSite>::null())
: HTemplateInstruction<2>(type),
+ flags_(ComputeFlags(pretenure_flag, instance_type)),
dominating_allocate_(NULL),
- filler_free_space_size_(NULL),
- clear_next_map_word_(false) {
+ filler_free_space_size_(NULL) {
SetOperandAt(0, context);
SetOperandAt(1, size);
set_representation(Representation::Tagged());
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kChangesNewSpacePromotion);
- SetGVNFlag(kDependsOnNewSpacePromotion);
- flags_ = pretenure_flag == TENURED
+ SetChangesFlag(kNewSpacePromotion);
+ SetDependsOnFlag(kNewSpacePromotion);
+
+ if (FLAG_trace_pretenuring) {
+ PrintF("HAllocate with AllocationSite %p %s\n",
+ allocation_site.is_null()
+ ? static_cast<void*>(NULL)
+ : static_cast<void*>(*allocation_site),
+ pretenure_flag == TENURED ? "tenured" : "not tenured");
+ }
+ }
+
+ static Flags ComputeFlags(PretenureFlag pretenure_flag,
+ InstanceType instance_type) {
+ Flags flags = pretenure_flag == TENURED
? (Heap::TargetSpaceId(instance_type) == OLD_POINTER_SPACE
? ALLOCATE_IN_OLD_POINTER_SPACE : ALLOCATE_IN_OLD_DATA_SPACE)
: ALLOCATE_IN_NEW_SPACE;
if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
- flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
+ flags = static_cast<Flags>(flags | ALLOCATE_DOUBLE_ALIGNED);
}
// We have to fill the allocated object with one word fillers if we do
// not use allocation folding since some allocations may depend on each
@@ -5391,10 +5469,19 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
// allocations may leave such objects behind in a not completely initialized
// state.
if (!FLAG_use_gvn || !FLAG_use_allocation_folding) {
- flags_ = static_cast<HAllocate::Flags>(flags_ | PREFILL_WITH_FILLER);
+ flags = static_cast<Flags>(flags | PREFILL_WITH_FILLER);
}
- clear_next_map_word_ = pretenure_flag == NOT_TENURED &&
- AllocationSite::CanTrack(instance_type);
+ if (pretenure_flag == NOT_TENURED &&
+ AllocationSite::CanTrack(instance_type)) {
+ flags = static_cast<Flags>(flags | CLEAR_NEXT_MAP_WORD);
+ }
+ return flags;
+ }
+
+ void UpdateClearNextMapWord(bool clear_next_map_word) {
+ flags_ = static_cast<Flags>(clear_next_map_word
+ ? flags_ | CLEAR_NEXT_MAP_WORD
+ : flags_ & ~CLEAR_NEXT_MAP_WORD);
}
void UpdateSize(HValue* size) {
@@ -5420,7 +5507,6 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
Handle<Map> known_initial_map_;
HAllocate* dominating_allocate_;
HStoreNamedField* filler_free_space_size_;
- bool clear_next_map_word_;
};
@@ -5450,21 +5536,21 @@ class HStoreCodeEntry V8_FINAL: public HTemplateInstruction<2> {
};
-class HInnerAllocatedObject V8_FINAL: public HTemplateInstruction<1> {
+class HInnerAllocatedObject V8_FINAL : public HTemplateInstruction<2> {
public:
static HInnerAllocatedObject* New(Zone* zone,
HValue* context,
HValue* value,
- int offset,
+ HValue* offset,
HType type = HType::Tagged()) {
return new(zone) HInnerAllocatedObject(value, offset, type);
}
HValue* base_object() { return OperandAt(0); }
- int offset() { return offset_; }
+ HValue* offset() { return OperandAt(1); }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
+ return index == 0 ? Representation::Tagged() : Representation::Integer32();
}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -5472,15 +5558,16 @@ class HInnerAllocatedObject V8_FINAL: public HTemplateInstruction<1> {
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
private:
- HInnerAllocatedObject(HValue* value, int offset, HType type = HType::Tagged())
- : HTemplateInstruction<1>(type), offset_(offset) {
+ HInnerAllocatedObject(HValue* value,
+ HValue* offset,
+ HType type = HType::Tagged())
+ : HTemplateInstruction<2>(type) {
ASSERT(value->IsAllocate());
SetOperandAt(0, value);
+ SetOperandAt(1, offset);
set_type(type);
set_representation(Representation::Tagged());
}
-
- int offset_;
};
@@ -5492,11 +5579,10 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) {
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
+ HValue* value,
HValue* new_space_dominator) {
- if (object->IsInnerAllocatedObject()) {
- return ReceiverObjectNeedsWriteBarrier(
- HInnerAllocatedObject::cast(object)->base_object(),
- new_space_dominator);
+ while (object->IsInnerAllocatedObject()) {
+ object = HInnerAllocatedObject::cast(object)->base_object();
}
if (object->IsConstant() && HConstant::cast(object)->IsCell()) {
return false;
@@ -5508,7 +5594,17 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
}
if (object != new_space_dominator) return true;
if (object->IsAllocate()) {
- return !HAllocate::cast(object)->IsNewSpaceAllocation();
+ // Stores to new space allocations require no write barriers if the object
+ // is the new space dominator.
+ if (HAllocate::cast(object)->IsNewSpaceAllocation()) {
+ return false;
+ }
+ // Likewise we don't need a write barrier if we store a value that
+ // originates from the same allocation (via allocation folding).
+ while (value->IsInnerAllocatedObject()) {
+ value = HInnerAllocatedObject::cast(value)->base_object();
+ }
+ return object != value;
}
return true;
}
@@ -5545,7 +5641,7 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
: HUnaryOperation(value),
cell_(Unique<PropertyCell>::CreateUninitialized(cell)),
details_(details) {
- SetGVNFlag(kChangesGlobalVars);
+ SetChangesFlag(kGlobalVars);
}
Unique<PropertyCell> cell_;
@@ -5553,52 +5649,6 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
};
-class HStoreGlobalGeneric : public HTemplateInstruction<3> {
- public:
- inline static HStoreGlobalGeneric* New(Zone* zone,
- HValue* context,
- HValue* global_object,
- Handle<Object> name,
- HValue* value,
- StrictModeFlag strict_mode_flag) {
- return new(zone) HStoreGlobalGeneric(context, global_object,
- name, value, strict_mode_flag);
- }
-
- HValue* context() { return OperandAt(0); }
- HValue* global_object() { return OperandAt(1); }
- Handle<Object> name() const { return name_; }
- HValue* value() { return OperandAt(2); }
- StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric)
-
- private:
- HStoreGlobalGeneric(HValue* context,
- HValue* global_object,
- Handle<Object> name,
- HValue* value,
- StrictModeFlag strict_mode_flag)
- : name_(name),
- strict_mode_flag_(strict_mode_flag) {
- SetOperandAt(0, context);
- SetOperandAt(1, global_object);
- SetOperandAt(2, value);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- Handle<Object> name_;
- StrictModeFlag strict_mode_flag_;
-};
-
-
class HLoadContextSlot V8_FINAL : public HUnaryOperation {
public:
enum Mode {
@@ -5630,7 +5680,7 @@ class HLoadContextSlot V8_FINAL : public HUnaryOperation {
}
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnContextSlots);
+ SetDependsOnFlag(kContextSlots);
}
int slot_index() const { return slot_index_; }
@@ -5714,7 +5764,7 @@ class HStoreContextSlot V8_FINAL : public HTemplateInstruction<2> {
: slot_index_(slot_index), mode_(mode) {
SetOperandAt(0, context);
SetOperandAt(1, value);
- SetGVNFlag(kChangesContextSlots);
+ SetChangesFlag(kContextSlots);
}
int slot_index_;
@@ -5750,8 +5800,19 @@ class HObjectAccess V8_FINAL {
return name_;
}
+ inline bool immutable() const {
+ return ImmutableField::decode(value_);
+ }
+
+ // Returns true if access is being made to an in-object property that
+ // was already added to the object.
+ inline bool existing_inobject_property() const {
+ return ExistingInobjectPropertyField::decode(value_);
+ }
+
inline HObjectAccess WithRepresentation(Representation representation) {
- return HObjectAccess(portion(), offset(), representation, name());
+ return HObjectAccess(portion(), offset(), representation, name(),
+ immutable(), existing_inobject_property());
}
static HObjectAccess ForHeapNumberValue() {
@@ -5792,13 +5853,11 @@ class HObjectAccess V8_FINAL {
? Representation::Smi() : Representation::Tagged());
}
- static HObjectAccess ForAllocationSiteOffset(int offset) {
- ASSERT(offset >= HeapObject::kHeaderSize && offset < AllocationSite::kSize);
- return HObjectAccess(kInobject, offset);
- }
+ static HObjectAccess ForAllocationSiteOffset(int offset);
static HObjectAccess ForAllocationSiteList() {
- return HObjectAccess(kExternalMemory, 0, Representation::Tagged());
+ return HObjectAccess(kExternalMemory, 0, Representation::Tagged(),
+ Handle<String>::null(), false, false);
}
static HObjectAccess ForFixedArrayLength() {
@@ -5808,6 +5867,12 @@ class HObjectAccess V8_FINAL {
FLAG_track_fields ? Representation::Smi() : Representation::Tagged());
}
+ static HObjectAccess ForStringHashField() {
+ return HObjectAccess(kInobject,
+ String::kHashFieldOffset,
+ Representation::Integer32());
+ }
+
static HObjectAccess ForStringLength() {
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
return HObjectAccess(
@@ -5816,6 +5881,14 @@ class HObjectAccess V8_FINAL {
FLAG_track_fields ? Representation::Smi() : Representation::Tagged());
}
+ static HObjectAccess ForConsStringFirst() {
+ return HObjectAccess(kInobject, ConsString::kFirstOffset);
+ }
+
+ static HObjectAccess ForConsStringSecond() {
+ return HObjectAccess(kInobject, ConsString::kSecondOffset);
+ }
+
static HObjectAccess ForPropertiesPointer() {
return HObjectAccess(kInobject, JSObject::kPropertiesOffset);
}
@@ -5844,6 +5917,10 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(kInobject, SharedFunctionInfo::kFirstContextSlot);
}
+ static HObjectAccess ForFirstOsrAstIdSlot() {
+ return HObjectAccess(kInobject, SharedFunctionInfo::kFirstOsrAstIdSlot);
+ }
+
static HObjectAccess ForOptimizedCodeMap() {
return HObjectAccess(kInobject,
SharedFunctionInfo::kOptimizedCodeMapOffset);
@@ -5860,7 +5937,13 @@ class HObjectAccess V8_FINAL {
static HObjectAccess ForMapInstanceSize() {
return HObjectAccess(kInobject,
Map::kInstanceSizeOffset,
- Representation::Byte());
+ Representation::UInteger8());
+ }
+
+ static HObjectAccess ForMapInstanceType() {
+ return HObjectAccess(kInobject,
+ Map::kInstanceTypeOffset,
+ Representation::UInteger8());
}
static HObjectAccess ForPropertyCellValue() {
@@ -5876,16 +5959,29 @@ class HObjectAccess V8_FINAL {
}
static HObjectAccess ForCounter() {
- return HObjectAccess(kExternalMemory, 0, Representation::Integer32());
+ return HObjectAccess(kExternalMemory, 0, Representation::Integer32(),
+ Handle<String>::null(), false, false);
}
// Create an access to an offset in a fixed array header.
static HObjectAccess ForFixedArrayHeader(int offset);
// Create an access to an in-object property in a JSObject.
- static HObjectAccess ForJSObjectOffset(int offset,
+ // This kind of access must be used when the object |map| is known and
+ // in-object properties are being accessed. Accesses of the in-object
+ // properties can have different semantics depending on whether corresponding
+ // property was added to the map or not.
+ static HObjectAccess ForMapAndOffset(Handle<Map> map, int offset,
Representation representation = Representation::Tagged());
+ // Create an access to an in-object property in a JSObject.
+ // This kind of access can be used for accessing object header fields or
+ // in-object properties if the map of the object is not known.
+ static HObjectAccess ForObservableJSObjectOffset(int offset,
+ Representation representation = Representation::Tagged()) {
+ return ForMapAndOffset(Handle<Map>::null(), offset, representation);
+ }
+
// Create an access to an in-object property in a JSArray.
static HObjectAccess ForJSArrayOffset(int offset);
@@ -5902,14 +5998,58 @@ class HObjectAccess V8_FINAL {
// Create an access for the payload of a Cell or JSGlobalPropertyCell.
static HObjectAccess ForCellPayload(Isolate* isolate);
- void PrintTo(StringStream* stream);
+ static HObjectAccess ForJSTypedArrayLength() {
+ return HObjectAccess::ForObservableJSObjectOffset(
+ JSTypedArray::kLengthOffset);
+ }
+
+ static HObjectAccess ForJSArrayBufferBackingStore() {
+ return HObjectAccess::ForObservableJSObjectOffset(
+ JSArrayBuffer::kBackingStoreOffset, Representation::External());
+ }
+
+ static HObjectAccess ForExternalArrayExternalPointer() {
+ return HObjectAccess::ForObservableJSObjectOffset(
+ ExternalArray::kExternalPointerOffset, Representation::External());
+ }
+
+ static HObjectAccess ForJSArrayBufferViewWeakNext() {
+ return HObjectAccess::ForObservableJSObjectOffset(
+ JSArrayBufferView::kWeakNextOffset);
+ }
+
+ static HObjectAccess ForJSArrayBufferWeakFirstView() {
+ return HObjectAccess::ForObservableJSObjectOffset(
+ JSArrayBuffer::kWeakFirstViewOffset);
+ }
+
+ static HObjectAccess ForJSArrayBufferViewBuffer() {
+ return HObjectAccess::ForObservableJSObjectOffset(
+ JSArrayBufferView::kBufferOffset);
+ }
+
+ static HObjectAccess ForJSArrayBufferViewByteOffset() {
+ return HObjectAccess::ForObservableJSObjectOffset(
+ JSArrayBufferView::kByteOffsetOffset);
+ }
+
+ static HObjectAccess ForJSArrayBufferViewByteLength() {
+ return HObjectAccess::ForObservableJSObjectOffset(
+ JSArrayBufferView::kByteLengthOffset);
+ }
+
+ static HObjectAccess ForGlobalObjectNativeContext() {
+ return HObjectAccess(kInobject, GlobalObject::kNativeContextOffset);
+ }
+
+ void PrintTo(StringStream* stream) const;
inline bool Equals(HObjectAccess that) const {
return value_ == that.value_; // portion and offset must match
}
protected:
- void SetGVNFlags(HValue *instr, bool is_store);
+ void SetGVNFlags(HValue *instr, PropertyAccessType access_type);
private:
// internal use only; different parts of an object or array
@@ -5924,28 +6064,41 @@ class HObjectAccess V8_FINAL {
kExternalMemory // some field in external memory
};
+ HObjectAccess() : value_(0) {}
+
HObjectAccess(Portion portion, int offset,
Representation representation = Representation::Tagged(),
- Handle<String> name = Handle<String>::null())
+ Handle<String> name = Handle<String>::null(),
+ bool immutable = false,
+ bool existing_inobject_property = true)
: value_(PortionField::encode(portion) |
RepresentationField::encode(representation.kind()) |
+ ImmutableField::encode(immutable ? 1 : 0) |
+ ExistingInobjectPropertyField::encode(
+ existing_inobject_property ? 1 : 0) |
OffsetField::encode(offset)),
name_(name) {
// assert that the fields decode correctly
ASSERT(this->offset() == offset);
ASSERT(this->portion() == portion);
+ ASSERT(this->immutable() == immutable);
+ ASSERT(this->existing_inobject_property() == existing_inobject_property);
ASSERT(RepresentationField::decode(value_) == representation.kind());
+ ASSERT(!this->existing_inobject_property() || IsInobject());
}
class PortionField : public BitField<Portion, 0, 3> {};
- class RepresentationField : public BitField<Representation::Kind, 3, 3> {};
- class OffsetField : public BitField<int, 6, 26> {};
+ class RepresentationField : public BitField<Representation::Kind, 3, 4> {};
+ class ImmutableField : public BitField<bool, 7, 1> {};
+ class ExistingInobjectPropertyField : public BitField<bool, 8, 1> {};
+ class OffsetField : public BitField<int, 9, 23> {};
- uint32_t value_; // encodes portion, representation, and offset
+ uint32_t value_; // encodes portion, representation, immutable, and offset
Handle<String> name_;
friend class HLoadNamedField;
friend class HStoreNamedField;
+ friend class SideEffectsTracker;
inline Portion portion() const {
return PortionField::decode(value_);
@@ -5953,12 +6106,17 @@ class HObjectAccess V8_FINAL {
};
-class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
+class HLoadNamedField V8_FINAL : public HTemplateInstruction<2> {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HLoadNamedField, HValue*, HObjectAccess);
+ DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*, HValue*,
+ HObjectAccess);
HValue* object() { return OperandAt(0); }
- bool HasTypeCheck() { return object()->IsCheckMaps(); }
+ HValue* dependency() {
+ ASSERT(HasDependency());
+ return OperandAt(1);
+ }
+ bool HasDependency() const { return OperandAt(0) != OperandAt(1); }
HObjectAccess access() const { return access_; }
Representation field_representation() const {
return access_.representation();
@@ -5987,16 +6145,26 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
}
private:
- HLoadNamedField(HValue* object, HObjectAccess access) : access_(access) {
+ HLoadNamedField(HValue* object,
+ HValue* dependency,
+ HObjectAccess access) : access_(access) {
ASSERT(object != NULL);
SetOperandAt(0, object);
+ SetOperandAt(1, dependency != NULL ? dependency : object);
Representation representation = access.representation();
- if (representation.IsByte()) {
+ if (representation.IsInteger8() ||
+ representation.IsUInteger8() ||
+ representation.IsInteger16() ||
+ representation.IsUInteger16()) {
set_representation(Representation::Integer32());
} else if (representation.IsSmi()) {
set_type(HType::Smi());
- set_representation(representation);
+ if (SmiValuesAre32Bits()) {
+ set_representation(Representation::Integer32());
+ } else {
+ set_representation(representation);
+ }
} else if (representation.IsDouble() ||
representation.IsExternal() ||
representation.IsInteger32()) {
@@ -6008,7 +6176,7 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
} else {
set_representation(Representation::Tagged());
}
- access.SetGVNFlags(this, false);
+ access.SetGVNFlags(this, LOAD);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -6067,7 +6235,7 @@ class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
: HUnaryOperation(function) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnCalls);
+ SetDependsOnFlag(kCalls);
}
};
@@ -6105,6 +6273,12 @@ class HLoadKeyed V8_FINAL
bool is_external() const {
return IsExternalArrayElementsKind(elements_kind());
}
+ bool is_fixed_typed_array() const {
+ return IsFixedTypedArrayElementsKind(elements_kind());
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* dependency() {
@@ -6133,9 +6307,10 @@ class HLoadKeyed V8_FINAL
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- // kind_fast: tagged[int32] (none)
- // kind_double: tagged[int32] (none)
- // kind_external: external[int32] (none)
+ // kind_fast: tagged[int32] (none)
+ // kind_double: tagged[int32] (none)
+ // kind_fixed_typed_array: tagged[int32] (none)
+ // kind_external: external[int32] (none)
if (index == 0) {
return is_external() ? Representation::External()
: Representation::Tagged();
@@ -6185,7 +6360,7 @@ class HLoadKeyed V8_FINAL
SetOperandAt(1, key);
SetOperandAt(2, dependency != NULL ? dependency : obj);
- if (!is_external()) {
+ if (!is_typed_elements()) {
// I can detect the case between storing double (holey and fast) and
// smi/object by looking at elements_kind_.
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
@@ -6196,27 +6371,39 @@ class HLoadKeyed V8_FINAL
(!IsHoleyElementsKind(elements_kind) ||
mode == NEVER_RETURN_HOLE)) {
set_type(HType::Smi());
- set_representation(Representation::Smi());
+ if (SmiValuesAre32Bits() && !RequiresHoleCheck()) {
+ set_representation(Representation::Integer32());
+ } else {
+ set_representation(Representation::Smi());
+ }
} else {
set_representation(Representation::Tagged());
}
- SetGVNFlag(kDependsOnArrayElements);
+ SetDependsOnFlag(kArrayElements);
} else {
set_representation(Representation::Double());
- SetGVNFlag(kDependsOnDoubleArrayElements);
+ SetDependsOnFlag(kDoubleArrayElements);
}
} else {
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
set_representation(Representation::Double());
} else {
set_representation(Representation::Integer32());
}
- SetGVNFlag(kDependsOnExternalMemory);
+ if (is_external()) {
+ SetDependsOnFlag(kExternalMemory);
+ } else if (is_fixed_typed_array()) {
+ SetDependsOnFlag(kTypedArrayElements);
+ } else {
+ UNREACHABLE();
+ }
// Native code could change the specialized array.
- SetGVNFlag(kDependsOnCalls);
+ SetDependsOnFlag(kCalls);
}
SetFlag(kUseGVN);
@@ -6288,10 +6475,23 @@ class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> {
};
+// Indicates whether the store is a store to an entry that was previously
+// initialized or not.
+enum StoreFieldOrKeyedMode {
+ // The entry could be either previously initialized or not.
+ INITIALIZING_STORE,
+ // At the time of this store it is guaranteed that the entry is already
+ // initialized.
+ STORE_TO_INITIALIZED_ENTRY
+};
+
+
class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HStoreNamedField, HValue*,
HObjectAccess, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P4(HStoreNamedField, HValue*,
+ HObjectAccess, HValue*, StoreFieldOrKeyedMode);
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
@@ -6306,20 +6506,31 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
// object must be external in case of external memory access
return Representation::External();
} else if (index == 1) {
- if (field_representation().IsByte() ||
+ if (field_representation().IsInteger8() ||
+ field_representation().IsUInteger8() ||
+ field_representation().IsInteger16() ||
+ field_representation().IsUInteger16() ||
field_representation().IsInteger32()) {
return Representation::Integer32();
- } else if (field_representation().IsDouble() ||
- field_representation().IsSmi()) {
+ } else if (field_representation().IsDouble()) {
return field_representation();
+ } else if (field_representation().IsSmi()) {
+ if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
+ return Representation::Integer32();
+ }
+ return field_representation();
+ } else if (field_representation().IsExternal()) {
+ return Representation::External();
}
}
return Representation::Tagged();
}
- virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE {
- ASSERT(side_effect == kChangesNewSpacePromotion);
+ ASSERT(side_effect == kNewSpacePromotion);
+ if (!FLAG_use_write_barrier_elimination) return false;
new_space_dominator_ = dominator;
+ return false;
}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -6335,6 +6546,7 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
HObjectAccess access() const { return access_; }
HValue* new_space_dominator() const { return new_space_dominator_; }
bool has_transition() const { return has_transition_; }
+ StoreFieldOrKeyedMode store_mode() const { return store_mode_; }
Handle<Map> transition_map() const {
if (has_transition()) {
@@ -6353,6 +6565,16 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
}
SetOperandAt(2, map_constant);
has_transition_ = true;
+ is_stable_ = map->is_stable();
+
+ if (is_stable_) {
+ map->AddDependentCompilationInfo(
+ DependentCode::kPrototypeCheckGroup, info);
+ }
+ }
+
+ bool is_stable() const {
+ return is_stable_;
}
bool NeedsWriteBarrier() {
@@ -6364,12 +6586,14 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
if (field_representation().IsInteger32()) return false;
if (field_representation().IsExternal()) return false;
return StoringValueNeedsWriteBarrier(value()) &&
- ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
+ ReceiverObjectNeedsWriteBarrier(object(), value(),
+ new_space_dominator());
}
bool NeedsWriteBarrierForMap() {
if (IsSkipWriteBarrier()) return false;
- return ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
+ return ReceiverObjectNeedsWriteBarrier(object(), transition(),
+ new_space_dominator());
}
Representation field_representation() const {
@@ -6383,21 +6607,30 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
private:
HStoreNamedField(HValue* obj,
HObjectAccess access,
- HValue* val)
+ HValue* val,
+ StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE)
: access_(access),
new_space_dominator_(NULL),
write_barrier_mode_(UPDATE_WRITE_BARRIER),
- has_transition_(false) {
+ has_transition_(false),
+ is_stable_(false),
+ store_mode_(store_mode) {
+ // Stores to a non existing in-object property are allowed only to the
+ // newly allocated objects (via HAllocate or HInnerAllocatedObject).
+ ASSERT(!access.IsInobject() || access.existing_inobject_property() ||
+ obj->IsAllocate() || obj->IsInnerAllocatedObject());
SetOperandAt(0, obj);
SetOperandAt(1, val);
SetOperandAt(2, obj);
- access.SetGVNFlags(this, true);
+ access.SetGVNFlags(this, STORE);
}
HObjectAccess access_;
HValue* new_space_dominator_;
WriteBarrierMode write_barrier_mode_ : 1;
bool has_transition_ : 1;
+ bool is_stable_ : 1;
+ StoreFieldOrKeyedMode store_mode_ : 1;
};
@@ -6444,12 +6677,15 @@ class HStoreKeyed V8_FINAL
public:
DECLARE_INSTRUCTION_FACTORY_P4(HStoreKeyed, HValue*, HValue*, HValue*,
ElementsKind);
+ DECLARE_INSTRUCTION_FACTORY_P5(HStoreKeyed, HValue*, HValue*, HValue*,
+ ElementsKind, StoreFieldOrKeyedMode);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- // kind_fast: tagged[int32] = tagged
- // kind_double: tagged[int32] = double
- // kind_smi : tagged[int32] = smi
- // kind_external: external[int32] = (double | int32)
+ // kind_fast: tagged[int32] = tagged
+ // kind_double: tagged[int32] = double
+ // kind_smi : tagged[int32] = smi
+ // kind_fixed_typed_array: tagged[int32] = (double | int32)
+ // kind_external: external[int32] = (double | int32)
if (index == 0) {
return is_external() ? Representation::External()
: Representation::Tagged();
@@ -6462,31 +6698,45 @@ class HStoreKeyed V8_FINAL
if (IsDoubleOrFloatElementsKind(elements_kind())) {
return Representation::Double();
}
-
+ if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
+ return Representation::Integer32();
+ }
if (IsFastSmiElementsKind(elements_kind())) {
return Representation::Smi();
}
- return is_external() ? Representation::Integer32()
- : Representation::Tagged();
+ return is_external() || is_fixed_typed_array()
+ ? Representation::Integer32()
+ : Representation::Tagged();
}
bool is_external() const {
return IsExternalArrayElementsKind(elements_kind());
}
+ bool is_fixed_typed_array() const {
+ return IsFixedTypedArrayElementsKind(elements_kind());
+ }
+
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+
virtual Representation observed_input_representation(int index) V8_OVERRIDE {
if (index < 2) return RequiredInputRepresentation(index);
if (IsUninitialized()) {
return Representation::None();
}
- if (IsFastSmiElementsKind(elements_kind())) {
- return Representation::Smi();
- }
if (IsDoubleOrFloatElementsKind(elements_kind())) {
return Representation::Double();
}
- if (is_external()) {
+ if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
+ return Representation::Integer32();
+ }
+ if (IsFastSmiElementsKind(elements_kind())) {
+ return Representation::Smi();
+ }
+ if (is_typed_elements()) {
return Representation::Integer32();
}
// For fast object elements kinds, don't assume anything.
@@ -6499,6 +6749,7 @@ class HStoreKeyed V8_FINAL
bool value_is_smi() const {
return IsFastSmiElementsKind(elements_kind_);
}
+ StoreFieldOrKeyedMode store_mode() const { return store_mode_; }
ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
@@ -6518,10 +6769,11 @@ class HStoreKeyed V8_FINAL
return value()->IsConstant() && HConstant::cast(value())->IsTheHole();
}
- virtual void HandleSideEffectDominator(GVNFlag side_effect,
+ virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE {
- ASSERT(side_effect == kChangesNewSpacePromotion);
+ ASSERT(side_effect == kNewSpacePromotion);
new_space_dominator_ = dominator;
+ return false;
}
HValue* new_space_dominator() const { return new_space_dominator_; }
@@ -6531,7 +6783,8 @@ class HStoreKeyed V8_FINAL
return false;
} else {
return StoringValueNeedsWriteBarrier(value()) &&
- ReceiverObjectNeedsWriteBarrier(elements(), new_space_dominator());
+ ReceiverObjectNeedsWriteBarrier(elements(), value(),
+ new_space_dominator());
}
}
@@ -6543,34 +6796,44 @@ class HStoreKeyed V8_FINAL
private:
HStoreKeyed(HValue* obj, HValue* key, HValue* val,
- ElementsKind elements_kind)
+ ElementsKind elements_kind,
+ StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE)
: elements_kind_(elements_kind),
index_offset_(0),
is_dehoisted_(false),
is_uninitialized_(false),
+ store_mode_(store_mode),
new_space_dominator_(NULL) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
+ ASSERT(store_mode != STORE_TO_INITIALIZED_ENTRY ||
+ elements_kind == FAST_SMI_ELEMENTS);
+
if (IsFastObjectElementsKind(elements_kind)) {
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnNewSpacePromotion);
+ SetDependsOnFlag(kNewSpacePromotion);
}
if (is_external()) {
- SetGVNFlag(kChangesExternalMemory);
+ SetChangesFlag(kExternalMemory);
SetFlag(kAllowUndefinedAsNaN);
} else if (IsFastDoubleElementsKind(elements_kind)) {
- SetGVNFlag(kChangesDoubleArrayElements);
+ SetChangesFlag(kDoubleArrayElements);
} else if (IsFastSmiElementsKind(elements_kind)) {
- SetGVNFlag(kChangesArrayElements);
+ SetChangesFlag(kArrayElements);
+ } else if (is_fixed_typed_array()) {
+ SetChangesFlag(kTypedArrayElements);
+ SetFlag(kAllowUndefinedAsNaN);
} else {
- SetGVNFlag(kChangesArrayElements);
+ SetChangesFlag(kArrayElements);
}
// EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
- if (elements_kind >= EXTERNAL_BYTE_ELEMENTS &&
- elements_kind <= EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ if ((elements_kind >= EXTERNAL_INT8_ELEMENTS &&
+ elements_kind <= EXTERNAL_UINT32_ELEMENTS) ||
+ (elements_kind >= UINT8_ELEMENTS &&
+ elements_kind <= INT32_ELEMENTS)) {
SetFlag(kTruncatingToInt32);
}
}
@@ -6579,6 +6842,7 @@ class HStoreKeyed V8_FINAL
uint32_t index_offset_;
bool is_dehoisted_ : 1;
bool is_uninitialized_ : 1;
+ StoreFieldOrKeyedMode store_mode_: 1;
HValue* new_space_dominator_;
};
@@ -6666,10 +6930,10 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(0, object);
SetOperandAt(1, context);
SetFlag(kUseGVN);
- SetGVNFlag(kChangesElementsKind);
+ SetChangesFlag(kElementsKind);
if (!IsSimpleMapChangeTransition(from_kind_, to_kind_)) {
- SetGVNFlag(kChangesElementsPointer);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kElementsPointer);
+ SetChangesFlag(kNewSpacePromotion);
}
set_representation(Representation::Tagged());
}
@@ -6687,45 +6951,55 @@ class HStringAdd V8_FINAL : public HBinaryOperation {
HValue* context,
HValue* left,
HValue* right,
- StringAddFlags flags = STRING_ADD_CHECK_NONE);
+ PretenureFlag pretenure_flag = NOT_TENURED,
+ StringAddFlags flags = STRING_ADD_CHECK_BOTH,
+ Handle<AllocationSite> allocation_site =
+ Handle<AllocationSite>::null());
StringAddFlags flags() const { return flags_; }
+ PretenureFlag pretenure_flag() const { return pretenure_flag_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(StringAdd)
protected:
- virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ return flags_ == HStringAdd::cast(other)->flags_ &&
+ pretenure_flag_ == HStringAdd::cast(other)->pretenure_flag_;
+ }
private:
- HStringAdd(HValue* context, HValue* left, HValue* right, StringAddFlags flags)
- : HBinaryOperation(context, left, right, HType::String()), flags_(flags) {
+ HStringAdd(HValue* context,
+ HValue* left,
+ HValue* right,
+ PretenureFlag pretenure_flag,
+ StringAddFlags flags,
+ Handle<AllocationSite> allocation_site)
+ : HBinaryOperation(context, left, right, HType::String()),
+ flags_(flags), pretenure_flag_(pretenure_flag) {
set_representation(Representation::Tagged());
- if (MightHaveSideEffects()) {
- SetAllSideEffects();
- } else {
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetFlag(kUseGVN);
+ SetDependsOnFlag(kMaps);
+ SetChangesFlag(kNewSpacePromotion);
+ if (FLAG_trace_pretenuring) {
+ PrintF("HStringAdd with AllocationSite %p %s\n",
+ allocation_site.is_null()
+ ? static_cast<void*>(NULL)
+ : static_cast<void*>(*allocation_site),
+ pretenure_flag == TENURED ? "tenured" : "not tenured");
}
}
- bool MightHaveSideEffects() const {
- return flags_ != STRING_ADD_CHECK_NONE &&
- (left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved());
- }
-
// No side-effects except possible allocation:
- // NOTE: this instruction does not call ToString() on its inputs, when flags_
- // is set to STRING_ADD_CHECK_NONE.
- virtual bool IsDeletable() const V8_OVERRIDE {
- return !MightHaveSideEffects();
- }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
const StringAddFlags flags_;
+ const PretenureFlag pretenure_flag_;
};
@@ -6762,8 +7036,9 @@ class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
SetOperandAt(2, index);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kStringChars);
+ SetChangesFlag(kNewSpacePromotion);
}
// No side effects: runtime function assumes string + number inputs.
@@ -6797,7 +7072,7 @@ class HStringCharFromCode V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, char_code);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE {
@@ -6906,7 +7181,7 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
language_mode_(shared->language_mode()) {
SetOperandAt(0, context);
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -6977,7 +7252,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
private:
explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
// This instruction is not marked as kChangesMaps, but does
// change the map of the input operand. Use it only when creating
@@ -6993,73 +7268,109 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
};
-class HValueOf V8_FINAL : public HUnaryOperation {
+class HDateField V8_FINAL : public HUnaryOperation {
public:
- DECLARE_INSTRUCTION_FACTORY_P1(HValueOf, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P2(HDateField, HValue*, Smi*);
+
+ Smi* index() const { return index_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField)
private:
- explicit HValueOf(HValue* value) : HUnaryOperation(value) {
+ HDateField(HValue* date, Smi* index)
+ : HUnaryOperation(date), index_(index) {
set_representation(Representation::Tagged());
}
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ Smi* index_;
};
-class HDateField V8_FINAL : public HUnaryOperation {
+class HSeqStringGetChar V8_FINAL : public HTemplateInstruction<2> {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HDateField, HValue*, Smi*);
-
- Smi* index() const { return index_; }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ String::Encoding encoding,
+ HValue* string,
+ HValue* index);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
+ return (index == 0) ? Representation::Tagged()
+ : Representation::Integer32();
}
- DECLARE_CONCRETE_INSTRUCTION(DateField)
+ String::Encoding encoding() const { return encoding_; }
+ HValue* string() const { return OperandAt(0); }
+ HValue* index() const { return OperandAt(1); }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ return encoding() == HSeqStringGetChar::cast(other)->encoding();
+ }
+
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE {
+ if (encoding() == String::ONE_BYTE_ENCODING) {
+ return new(zone) Range(0, String::kMaxOneByteCharCode);
+ } else {
+ ASSERT_EQ(String::TWO_BYTE_ENCODING, encoding());
+ return new(zone) Range(0, String::kMaxUtf16CodeUnit);
+ }
+ }
private:
- HDateField(HValue* date, Smi* index)
- : HUnaryOperation(date), index_(index) {
- set_representation(Representation::Tagged());
+ HSeqStringGetChar(String::Encoding encoding,
+ HValue* string,
+ HValue* index) : encoding_(encoding) {
+ SetOperandAt(0, string);
+ SetOperandAt(1, index);
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ SetDependsOnFlag(kStringChars);
}
- Smi* index_;
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ String::Encoding encoding_;
};
-class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<3> {
+class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<4> {
public:
- DECLARE_INSTRUCTION_FACTORY_P4(HSeqStringSetChar, String::Encoding,
- HValue*, HValue*, HValue*);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(
+ HSeqStringSetChar, String::Encoding,
+ HValue*, HValue*, HValue*);
String::Encoding encoding() { return encoding_; }
- HValue* string() { return OperandAt(0); }
- HValue* index() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
+ HValue* context() { return OperandAt(0); }
+ HValue* string() { return OperandAt(1); }
+ HValue* index() { return OperandAt(2); }
+ HValue* value() { return OperandAt(3); }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return (index == 0) ? Representation::Tagged()
+ return (index <= 1) ? Representation::Tagged()
: Representation::Integer32();
}
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar)
private:
- HSeqStringSetChar(String::Encoding encoding,
+ HSeqStringSetChar(HValue* context,
+ String::Encoding encoding,
HValue* string,
HValue* index,
HValue* value) : encoding_(encoding) {
- SetOperandAt(0, string);
- SetOperandAt(1, index);
- SetOperandAt(2, value);
+ SetOperandAt(0, context);
+ SetOperandAt(1, string);
+ SetOperandAt(2, index);
+ SetOperandAt(3, value);
set_representation(Representation::Tagged());
+ SetChangesFlag(kStringChars);
}
String::Encoding encoding_;
@@ -7086,6 +7397,8 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
protected:
+ virtual int RedefinedOperandIndex() { return 0; }
+
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
return true;
}
@@ -7097,8 +7410,8 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, map);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kElementsKind);
}
};
diff --git a/deps/v8/src/hydrogen-load-elimination.cc b/deps/v8/src/hydrogen-load-elimination.cc
index 3337188f9a..222811678b 100644
--- a/deps/v8/src/hydrogen-load-elimination.cc
+++ b/deps/v8/src/hydrogen-load-elimination.cc
@@ -43,7 +43,6 @@ static const int kMaxTrackedObjects = 5;
class HFieldApproximation : public ZoneObject {
public: // Just a data blob.
HValue* object_;
- HLoadNamedField* last_load_;
HValue* last_value_;
HFieldApproximation* next_;
@@ -52,7 +51,6 @@ class HFieldApproximation : public ZoneObject {
if (this == NULL) return NULL;
HFieldApproximation* copy = new(zone) HFieldApproximation();
copy->object_ = this->object_;
- copy->last_load_ = this->last_load_;
copy->last_value_ = this->last_value_;
copy->next_ = this->next_->Copy(zone);
return copy;
@@ -100,26 +98,33 @@ class HLoadEliminationTable : public ZoneObject {
}
break;
}
+ case HValue::kTransitionElementsKind: {
+ HTransitionElementsKind* t = HTransitionElementsKind::cast(instr);
+ HValue* object = t->object()->ActualValue();
+ KillFieldInternal(object, FieldOf(JSArray::kElementsOffset), NULL);
+ KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
+ break;
+ }
default: {
- if (instr->CheckGVNFlag(kChangesInobjectFields)) {
+ if (instr->CheckChangesFlag(kInobjectFields)) {
TRACE((" kill-all i%d\n", instr->id()));
Kill();
break;
}
- if (instr->CheckGVNFlag(kChangesMaps)) {
+ if (instr->CheckChangesFlag(kMaps)) {
TRACE((" kill-maps i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
}
- if (instr->CheckGVNFlag(kChangesElementsKind)) {
+ if (instr->CheckChangesFlag(kElementsKind)) {
TRACE((" kill-elements-kind i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
KillOffset(JSObject::kElementsOffset);
}
- if (instr->CheckGVNFlag(kChangesElementsPointer)) {
+ if (instr->CheckChangesFlag(kElementsPointer)) {
TRACE((" kill-elements i%d\n", instr->id()));
KillOffset(JSObject::kElementsOffset);
}
- if (instr->CheckGVNFlag(kChangesOsrEntries)) {
+ if (instr->CheckChangesFlag(kOsrEntries)) {
TRACE((" kill-osr i%d\n", instr->id()));
Kill();
}
@@ -134,8 +139,34 @@ class HLoadEliminationTable : public ZoneObject {
return this;
}
- // Support for global analysis with HFlowEngine: Copy state to sucessor block.
- HLoadEliminationTable* Copy(HBasicBlock* succ, Zone* zone) {
+ // Support for global analysis with HFlowEngine: Merge given state with
+ // the other incoming state.
+ static HLoadEliminationTable* Merge(HLoadEliminationTable* succ_state,
+ HBasicBlock* succ_block,
+ HLoadEliminationTable* pred_state,
+ HBasicBlock* pred_block,
+ Zone* zone) {
+ ASSERT(pred_state != NULL);
+ if (succ_state == NULL) {
+ return pred_state->Copy(succ_block, pred_block, zone);
+ } else {
+ return succ_state->Merge(succ_block, pred_state, pred_block, zone);
+ }
+ }
+
+ // Support for global analysis with HFlowEngine: Given state merged with all
+ // the other incoming states, prepare it for use.
+ static HLoadEliminationTable* Finish(HLoadEliminationTable* state,
+ HBasicBlock* block,
+ Zone* zone) {
+ ASSERT(state != NULL);
+ return state;
+ }
+
+ private:
+ // Copy state to successor block.
+ HLoadEliminationTable* Copy(HBasicBlock* succ, HBasicBlock* from_block,
+ Zone* zone) {
HLoadEliminationTable* copy =
new(zone) HLoadEliminationTable(zone, aliasing_);
copy->EnsureFields(fields_.length());
@@ -149,10 +180,9 @@ class HLoadEliminationTable : public ZoneObject {
return copy;
}
- // Support for global analysis with HFlowEngine: Merge this state with
- // the other incoming state.
- HLoadEliminationTable* Merge(HBasicBlock* succ,
- HLoadEliminationTable* that, Zone* zone) {
+ // Merge this state with the other incoming state.
+ HLoadEliminationTable* Merge(HBasicBlock* succ, HLoadEliminationTable* that,
+ HBasicBlock* that_block, Zone* zone) {
if (that->fields_.length() < fields_.length()) {
// Drop fields not in the other table.
fields_.Rewind(that->fields_.length());
@@ -178,6 +208,10 @@ class HLoadEliminationTable : public ZoneObject {
approx = approx->next_;
}
}
+ if (FLAG_trace_load_elimination) {
+ TRACE((" merge-to B%d\n", succ->block_id()));
+ Print();
+ }
return this;
}
@@ -189,6 +223,10 @@ class HLoadEliminationTable : public ZoneObject {
// load or store for this object and field exists, return the new value with
// which the load should be replaced. Otherwise, return {instr}.
HValue* load(HLoadNamedField* instr) {
+ // There must be no loads from non observable in-object properties.
+ ASSERT(!instr->access().IsInobject() ||
+ instr->access().existing_inobject_property());
+
int field = FieldOf(instr->access());
if (field < 0) return instr;
@@ -197,12 +235,14 @@ class HLoadEliminationTable : public ZoneObject {
if (approx->last_value_ == NULL) {
// Load is not redundant. Fill out a new entry.
- approx->last_load_ = instr;
approx->last_value_ = instr;
return instr;
- } else {
+ } else if (approx->last_value_->block()->EqualToOrDominates(
+ instr->block())) {
// Eliminate the load. Reuse previously stored value or load instruction.
return approx->last_value_;
+ } else {
+ return instr;
}
}
@@ -211,18 +251,26 @@ class HLoadEliminationTable : public ZoneObject {
// the stored values are the same), return NULL indicating that this store
// instruction is redundant. Otherwise, return {instr}.
HValue* store(HStoreNamedField* instr) {
+ if (instr->access().IsInobject() &&
+ !instr->access().existing_inobject_property()) {
+ TRACE((" skipping non existing property initialization store\n"));
+ return instr;
+ }
+
int field = FieldOf(instr->access());
if (field < 0) return KillIfMisaligned(instr);
HValue* object = instr->object()->ActualValue();
HValue* value = instr->value();
- // Kill non-equivalent may-alias entries.
- KillFieldInternal(object, field, value);
if (instr->has_transition()) {
- // A transition store alters the map of the object.
- // TODO(titzer): remember the new map (a constant) for the object.
+ // A transition introduces a new field and alters the map of the object.
+ // Since the field in the object is new, it cannot alias existing entries.
+ // TODO(titzer): introduce a constant for the new map and remember it.
KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
+ } else {
+ // Kill non-equivalent may-alias entries.
+ KillFieldInternal(object, field, value);
}
HFieldApproximation* approx = FindOrCreate(object, field);
@@ -231,7 +279,6 @@ class HLoadEliminationTable : public ZoneObject {
return NULL;
} else {
// The store is not redundant. Update the entry.
- approx->last_load_ = NULL;
approx->last_value_ = value;
return instr;
}
@@ -272,9 +319,7 @@ class HLoadEliminationTable : public ZoneObject {
KillFieldInternal(object, field, NULL);
// Kill the next field in case of overlap.
- int size = kPointerSize;
- if (access.representation().IsByte()) size = 1;
- else if (access.representation().IsInteger32()) size = 4;
+ int size = access.representation().size();
int next_field = (offset + size - 1) / kPointerSize;
if (next_field != field) KillFieldInternal(object, next_field, NULL);
}
@@ -316,7 +361,6 @@ class HLoadEliminationTable : public ZoneObject {
// Insert the entry at the head of the list.
approx->object_ = object;
- approx->last_load_ = NULL;
approx->last_value_ = NULL;
approx->next_ = fields_[field];
fields_[field] = approx;
@@ -351,7 +395,9 @@ class HLoadEliminationTable : public ZoneObject {
bool Equal(HValue* a, HValue* b) {
if (a == b) return true;
- if (a != NULL && b != NULL) return a->Equals(b);
+ if (a != NULL && b != NULL && a->CheckFlag(HValue::kUseGVN)) {
+ return a->Equals(b);
+ }
return false;
}
@@ -397,7 +443,6 @@ class HLoadEliminationTable : public ZoneObject {
PrintF(" field %d: ", i);
for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) {
PrintF("[o%d =", a->object_->id());
- if (a->last_load_ != NULL) PrintF(" L%d", a->last_load_->id());
if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id());
PrintF("] ");
}
@@ -415,11 +460,7 @@ class HLoadEliminationTable : public ZoneObject {
class HLoadEliminationEffects : public ZoneObject {
public:
explicit HLoadEliminationEffects(Zone* zone)
- : zone_(zone),
- maps_stored_(false),
- fields_stored_(false),
- elements_stored_(false),
- stores_(5, zone) { }
+ : zone_(zone), stores_(5, zone) { }
inline bool Disabled() {
return false; // Effects are _not_ disabled.
@@ -427,37 +468,25 @@ class HLoadEliminationEffects : public ZoneObject {
// Process a possibly side-effecting instruction.
void Process(HInstruction* instr, Zone* zone) {
- switch (instr->opcode()) {
- case HValue::kStoreNamedField: {
- stores_.Add(HStoreNamedField::cast(instr), zone_);
- break;
- }
- case HValue::kOsrEntry: {
- // Kill everything. Loads must not be hoisted past the OSR entry.
- maps_stored_ = true;
- fields_stored_ = true;
- elements_stored_ = true;
- }
- default: {
- fields_stored_ |= instr->CheckGVNFlag(kChangesInobjectFields);
- maps_stored_ |= instr->CheckGVNFlag(kChangesMaps);
- maps_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
- elements_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
- elements_stored_ |= instr->CheckGVNFlag(kChangesElementsPointer);
- }
+ if (instr->IsStoreNamedField()) {
+ stores_.Add(HStoreNamedField::cast(instr), zone_);
+ } else {
+ flags_.Add(instr->ChangesFlags());
}
}
// Apply these effects to the given load elimination table.
void Apply(HLoadEliminationTable* table) {
- if (fields_stored_) {
+ // Loads must not be hoisted past the OSR entry, therefore we kill
+ // everything if we see an OSR entry.
+ if (flags_.Contains(kInobjectFields) || flags_.Contains(kOsrEntries)) {
table->Kill();
return;
}
- if (maps_stored_) {
+ if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
table->KillOffset(JSObject::kMapOffset);
}
- if (elements_stored_) {
+ if (flags_.Contains(kElementsKind) || flags_.Contains(kElementsPointer)) {
table->KillOffset(JSObject::kElementsOffset);
}
@@ -469,9 +498,7 @@ class HLoadEliminationEffects : public ZoneObject {
// Union these effects with the other effects.
void Union(HLoadEliminationEffects* that, Zone* zone) {
- maps_stored_ |= that->maps_stored_;
- fields_stored_ |= that->fields_stored_;
- elements_stored_ |= that->elements_stored_;
+ flags_.Add(that->flags_);
for (int i = 0; i < that->stores_.length(); i++) {
stores_.Add(that->stores_[i], zone);
}
@@ -479,9 +506,7 @@ class HLoadEliminationEffects : public ZoneObject {
private:
Zone* zone_;
- bool maps_stored_ : 1;
- bool fields_stored_ : 1;
- bool elements_stored_ : 1;
+ GVNFlagSet flags_;
ZoneList<HStoreNamedField*> stores_;
};
diff --git a/deps/v8/src/hydrogen-mark-unreachable.h b/deps/v8/src/hydrogen-mark-unreachable.h
index e9459d5208..9ecc6e9f16 100644
--- a/deps/v8/src/hydrogen-mark-unreachable.h
+++ b/deps/v8/src/hydrogen-mark-unreachable.h
@@ -37,7 +37,7 @@ namespace internal {
class HMarkUnreachableBlocksPhase : public HPhase {
public:
explicit HMarkUnreachableBlocksPhase(HGraph* graph)
- : HPhase("H_Mark unrechable blocks", graph) { }
+ : HPhase("H_Mark unreachable blocks", graph) { }
void Run();
diff --git a/deps/v8/src/hydrogen-minus-zero.cc b/deps/v8/src/hydrogen-minus-zero.cc
index 28ae6eba40..316e0f5077 100644
--- a/deps/v8/src/hydrogen-minus-zero.cc
+++ b/deps/v8/src/hydrogen-minus-zero.cc
@@ -49,6 +49,14 @@ void HComputeMinusZeroChecksPhase::Run() {
PropagateMinusZeroChecks(change->value());
visited_.Clear();
}
+ } else if (current->IsCompareMinusZeroAndBranch()) {
+ HCompareMinusZeroAndBranch* check =
+ HCompareMinusZeroAndBranch::cast(current);
+ if (check->value()->representation().IsSmiOrInteger32()) {
+ ASSERT(visited_.IsEmpty());
+ PropagateMinusZeroChecks(check->value());
+ visited_.Clear();
+ }
}
}
}
diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/hydrogen-representation-changes.cc
index d0c9b58258..7d0720c604 100644
--- a/deps/v8/src/hydrogen-representation-changes.cc
+++ b/deps/v8/src/hydrogen-representation-changes.cc
@@ -61,10 +61,11 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
if (new_value == NULL) {
new_value = new(graph()->zone()) HChange(
value, to, is_truncating_to_smi, is_truncating_to_int);
- if (use_value->position() != RelocInfo::kNoPosition) {
- new_value->set_position(use_value->position());
+ if (!use_value->operand_position(use_index).IsUnknown()) {
+ new_value->set_position(use_value->operand_position(use_index));
} else {
- ASSERT(!FLAG_emit_opt_code_positions || !graph()->info()->IsOptimizing());
+ ASSERT(!FLAG_hydrogen_track_positions ||
+ !graph()->info()->IsOptimizing());
}
}
diff --git a/deps/v8/src/hydrogen-sce.cc b/deps/v8/src/hydrogen-sce.cc
index a6995f647a..70b0a0c082 100644
--- a/deps/v8/src/hydrogen-sce.cc
+++ b/deps/v8/src/hydrogen-sce.cc
@@ -43,7 +43,7 @@ void HStackCheckEliminationPhase::Run() {
HBasicBlock* dominator = back_edge;
while (true) {
for (HInstructionIterator it(dominator); !it.Done(); it.Advance()) {
- if (it.Current()->IsCall()) {
+ if (it.Current()->HasStackCheck()) {
block->loop_information()->stack_check()->Eliminate();
break;
}
diff --git a/deps/v8/src/hydrogen-uint32-analysis.cc b/deps/v8/src/hydrogen-uint32-analysis.cc
index 8de887d6f8..63b1de697e 100644
--- a/deps/v8/src/hydrogen-uint32-analysis.cc
+++ b/deps/v8/src/hydrogen-uint32-analysis.cc
@@ -54,9 +54,9 @@ bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) {
// operation.
if (store->value() == val) {
// Clamping or a conversion to double should have beed inserted.
- ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS);
- ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS);
- ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_UINT8_CLAMPED_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_FLOAT32_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_FLOAT64_ELEMENTS);
return true;
}
}
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 9587dd3440..b471faa9b2 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -58,6 +58,7 @@
#include "hydrogen-uint32-analysis.h"
#include "lithium-allocator.h"
#include "parser.h"
+#include "runtime.h"
#include "scopeinfo.h"
#include "scopes.h"
#include "stub-cache.h"
@@ -67,6 +68,8 @@
#include "ia32/lithium-codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-codegen-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/lithium-codegen-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -140,12 +143,13 @@ void HBasicBlock::RemovePhi(HPhi* phi) {
}
-void HBasicBlock::AddInstruction(HInstruction* instr, int position) {
+void HBasicBlock::AddInstruction(HInstruction* instr,
+ HSourcePosition position) {
ASSERT(!IsStartBlock() || !IsFinished());
ASSERT(!instr->IsLinked());
ASSERT(!IsFinished());
- if (position != RelocInfo::kNoPosition) {
+ if (!position.IsUnknown()) {
instr->set_position(position);
}
if (first_ == NULL) {
@@ -153,10 +157,10 @@ void HBasicBlock::AddInstruction(HInstruction* instr, int position) {
ASSERT(!last_environment()->ast_id().IsNone());
HBlockEntry* entry = new(zone()) HBlockEntry();
entry->InitializeAsFirst(this);
- if (position != RelocInfo::kNoPosition) {
+ if (!position.IsUnknown()) {
entry->set_position(position);
} else {
- ASSERT(!FLAG_emit_opt_code_positions ||
+ ASSERT(!FLAG_hydrogen_track_positions ||
!graph()->info()->IsOptimizing());
}
first_ = last_ = entry;
@@ -209,7 +213,7 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
}
-void HBasicBlock::Finish(HControlInstruction* end, int position) {
+void HBasicBlock::Finish(HControlInstruction* end, HSourcePosition position) {
ASSERT(!IsFinished());
AddInstruction(end, position);
end_ = end;
@@ -220,11 +224,11 @@ void HBasicBlock::Finish(HControlInstruction* end, int position) {
void HBasicBlock::Goto(HBasicBlock* block,
- int position,
+ HSourcePosition position,
FunctionState* state,
bool add_simulate) {
bool drop_extra = state != NULL &&
- state->inlining_kind() == DROP_EXTRA_ON_RETURN;
+ state->inlining_kind() == NORMAL_RETURN;
if (block->IsInlineReturnTarget()) {
HEnvironment* env = last_environment();
@@ -243,9 +247,9 @@ void HBasicBlock::Goto(HBasicBlock* block,
void HBasicBlock::AddLeaveInlined(HValue* return_value,
FunctionState* state,
- int position) {
+ HSourcePosition position) {
HBasicBlock* target = state->function_return();
- bool drop_extra = state->inlining_kind() == DROP_EXTRA_ON_RETURN;
+ bool drop_extra = state->inlining_kind() == NORMAL_RETURN;
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
@@ -301,6 +305,12 @@ bool HBasicBlock::Dominates(HBasicBlock* other) const {
}
+bool HBasicBlock::EqualToOrDominates(HBasicBlock* other) const {
+ if (this == other) return true;
+ return Dominates(other);
+}
+
+
int HBasicBlock::LoopNestingDepth() const {
const HBasicBlock* current = this;
int result = (current->IsLoopHeader()) ? 1 : 0;
@@ -330,6 +340,15 @@ void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
}
+void HBasicBlock::MarkSuccEdgeUnreachable(int succ) {
+ ASSERT(IsFinished());
+ HBasicBlock* succ_block = end()->SuccessorAt(succ);
+
+ ASSERT(succ_block->predecessors()->length() == 1);
+ succ_block->MarkUnreachable();
+}
+
+
void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
if (HasPredecessor()) {
// Only loop header blocks can have a predecessor added after
@@ -605,7 +624,8 @@ void HGraph::Verify(bool do_full_verify) const {
block->predecessors()->first()->last_environment()->ast_id();
for (int k = 0; k < block->predecessors()->length(); k++) {
HBasicBlock* predecessor = block->predecessors()->at(k);
- ASSERT(predecessor->end()->IsGoto());
+ ASSERT(predecessor->end()->IsGoto() ||
+ predecessor->end()->IsDeoptimize());
ASSERT(predecessor->last_environment()->ast_id() == id);
}
}
@@ -745,19 +765,20 @@ bool HGraph::IsStandardConstant(HConstant* constant) {
HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder)
: builder_(builder),
finished_(false),
- deopt_then_(false),
- deopt_else_(false),
did_then_(false),
did_else_(false),
+ did_else_if_(false),
did_and_(false),
did_or_(false),
captured_(false),
needs_compare_(true),
+ pending_merge_block_(false),
split_edge_merge_block_(NULL),
- merge_block_(NULL) {
+ merge_at_join_blocks_(NULL),
+ normal_merge_at_join_block_count_(0),
+ deopt_merge_at_join_block_count_(0) {
HEnvironment* env = builder->environment();
first_true_block_ = builder->CreateBasicBlock(env->Copy());
- last_true_block_ = NULL;
first_false_block_ = builder->CreateBasicBlock(env->Copy());
}
@@ -767,19 +788,20 @@ HGraphBuilder::IfBuilder::IfBuilder(
HIfContinuation* continuation)
: builder_(builder),
finished_(false),
- deopt_then_(false),
- deopt_else_(false),
did_then_(false),
did_else_(false),
+ did_else_if_(false),
did_and_(false),
did_or_(false),
captured_(false),
needs_compare_(false),
+ pending_merge_block_(false),
first_true_block_(NULL),
- last_true_block_(NULL),
first_false_block_(NULL),
split_edge_merge_block_(NULL),
- merge_block_(NULL) {
+ merge_at_join_blocks_(NULL),
+ normal_merge_at_join_block_count_(0),
+ deopt_merge_at_join_block_count_(0) {
continuation->Continue(&first_true_block_,
&first_false_block_);
}
@@ -787,6 +809,20 @@ HGraphBuilder::IfBuilder::IfBuilder(
HControlInstruction* HGraphBuilder::IfBuilder::AddCompare(
HControlInstruction* compare) {
+ ASSERT(did_then_ == did_else_);
+ if (did_else_) {
+ // Handle if-then-elseif
+ did_else_if_ = true;
+ did_else_ = false;
+ did_then_ = false;
+ did_and_ = false;
+ did_or_ = false;
+ pending_merge_block_ = false;
+ split_edge_merge_block_ = NULL;
+ HEnvironment* env = builder_->environment();
+ first_true_block_ = builder_->CreateBasicBlock(env->Copy());
+ first_false_block_ = builder_->CreateBasicBlock(env->Copy());
+ }
if (split_edge_merge_block_ != NULL) {
HEnvironment* env = first_false_block_->last_environment();
HBasicBlock* split_edge =
@@ -842,29 +878,30 @@ void HGraphBuilder::IfBuilder::And() {
void HGraphBuilder::IfBuilder::CaptureContinuation(
HIfContinuation* continuation) {
+ ASSERT(!did_else_if_);
ASSERT(!finished_);
ASSERT(!captured_);
- HBasicBlock* true_block = last_true_block_ == NULL
- ? first_true_block_
- : last_true_block_;
- HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL)
- ? builder_->current_block()
- : first_false_block_;
+
+ HBasicBlock* true_block = NULL;
+ HBasicBlock* false_block = NULL;
+ Finish(&true_block, &false_block);
+ ASSERT(true_block != NULL);
+ ASSERT(false_block != NULL);
continuation->Capture(true_block, false_block);
captured_ = true;
+ builder_->set_current_block(NULL);
End();
}
void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) {
+ ASSERT(!did_else_if_);
ASSERT(!finished_);
ASSERT(!captured_);
- HBasicBlock* true_block = last_true_block_ == NULL
- ? first_true_block_
- : last_true_block_;
- HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL)
- ? builder_->current_block()
- : first_false_block_;
+ HBasicBlock* true_block = NULL;
+ HBasicBlock* false_block = NULL;
+ Finish(&true_block, &false_block);
+ merge_at_join_blocks_ = NULL;
if (true_block != NULL && !true_block->IsFinished()) {
ASSERT(continuation->IsTrueReachable());
builder_->GotoNoSimulate(true_block, continuation->true_branch());
@@ -895,6 +932,7 @@ void HGraphBuilder::IfBuilder::Then() {
builder_->FinishCurrentBlock(branch);
}
builder_->set_current_block(first_true_block_);
+ pending_merge_block_ = true;
}
@@ -902,20 +940,17 @@ void HGraphBuilder::IfBuilder::Else() {
ASSERT(did_then_);
ASSERT(!captured_);
ASSERT(!finished_);
- last_true_block_ = builder_->current_block();
+ AddMergeAtJoinBlock(false);
builder_->set_current_block(first_false_block_);
+ pending_merge_block_ = true;
did_else_ = true;
}
void HGraphBuilder::IfBuilder::Deopt(const char* reason) {
ASSERT(did_then_);
- if (did_else_) {
- deopt_else_ = true;
- } else {
- deopt_then_ = true;
- }
builder_->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
+ AddMergeAtJoinBlock(true);
}
@@ -923,51 +958,99 @@ void HGraphBuilder::IfBuilder::Return(HValue* value) {
HValue* parameter_count = builder_->graph()->GetConstantMinus1();
builder_->FinishExitCurrentBlock(
builder_->New<HReturn>(value, parameter_count));
- if (did_else_) {
- first_false_block_ = NULL;
- } else {
- first_true_block_ = NULL;
+ AddMergeAtJoinBlock(false);
+}
+
+
+void HGraphBuilder::IfBuilder::AddMergeAtJoinBlock(bool deopt) {
+ if (!pending_merge_block_) return;
+ HBasicBlock* block = builder_->current_block();
+ ASSERT(block == NULL || !block->IsFinished());
+ MergeAtJoinBlock* record =
+ new(builder_->zone()) MergeAtJoinBlock(block, deopt,
+ merge_at_join_blocks_);
+ merge_at_join_blocks_ = record;
+ if (block != NULL) {
+ ASSERT(block->end() == NULL);
+ if (deopt) {
+ normal_merge_at_join_block_count_++;
+ } else {
+ deopt_merge_at_join_block_count_++;
+ }
+ }
+ builder_->set_current_block(NULL);
+ pending_merge_block_ = false;
+}
+
+
+void HGraphBuilder::IfBuilder::Finish() {
+ ASSERT(!finished_);
+ if (!did_then_) {
+ Then();
+ }
+ AddMergeAtJoinBlock(false);
+ if (!did_else_) {
+ Else();
+ AddMergeAtJoinBlock(false);
+ }
+ finished_ = true;
+}
+
+
+void HGraphBuilder::IfBuilder::Finish(HBasicBlock** then_continuation,
+ HBasicBlock** else_continuation) {
+ Finish();
+
+ MergeAtJoinBlock* else_record = merge_at_join_blocks_;
+ if (else_continuation != NULL) {
+ *else_continuation = else_record->block_;
+ }
+ MergeAtJoinBlock* then_record = else_record->next_;
+ if (then_continuation != NULL) {
+ *then_continuation = then_record->block_;
}
+ ASSERT(then_record->next_ == NULL);
}
void HGraphBuilder::IfBuilder::End() {
- if (!captured_) {
- ASSERT(did_then_);
- if (!did_else_) {
- last_true_block_ = builder_->current_block();
- }
- if (last_true_block_ == NULL || last_true_block_->IsFinished()) {
- ASSERT(did_else_);
- // Return on true. Nothing to do, just continue the false block.
- } else if (first_false_block_ == NULL ||
- (did_else_ && builder_->current_block()->IsFinished())) {
- // Deopt on false. Nothing to do except switching to the true block.
- builder_->set_current_block(last_true_block_);
- } else {
- merge_block_ = builder_->graph()->CreateBasicBlock();
- ASSERT(!finished_);
- if (!did_else_) Else();
- ASSERT(!last_true_block_->IsFinished());
- HBasicBlock* last_false_block = builder_->current_block();
- ASSERT(!last_false_block->IsFinished());
- if (deopt_then_) {
- builder_->GotoNoSimulate(last_false_block, merge_block_);
- builder_->PadEnvironmentForContinuation(last_true_block_,
- merge_block_);
- builder_->GotoNoSimulate(last_true_block_, merge_block_);
- } else {
- builder_->GotoNoSimulate(last_true_block_, merge_block_);
- if (deopt_else_) {
- builder_->PadEnvironmentForContinuation(last_false_block,
- merge_block_);
- }
- builder_->GotoNoSimulate(last_false_block, merge_block_);
+ if (captured_) return;
+ Finish();
+
+ int total_merged_blocks = normal_merge_at_join_block_count_ +
+ deopt_merge_at_join_block_count_;
+ ASSERT(total_merged_blocks >= 1);
+ HBasicBlock* merge_block = total_merged_blocks == 1
+ ? NULL : builder_->graph()->CreateBasicBlock();
+
+ // Merge non-deopt blocks first to ensure environment has right size for
+ // padding.
+ MergeAtJoinBlock* current = merge_at_join_blocks_;
+ while (current != NULL) {
+ if (!current->deopt_ && current->block_ != NULL) {
+ // If there is only one block that makes it through to the end of the
+ // if, then just set it as the current block and continue rather then
+ // creating an unnecessary merge block.
+ if (total_merged_blocks == 1) {
+ builder_->set_current_block(current->block_);
+ return;
}
- builder_->set_current_block(merge_block_);
+ builder_->GotoNoSimulate(current->block_, merge_block);
}
+ current = current->next_;
}
- finished_ = true;
+
+ // Merge deopt blocks, padding when necessary.
+ current = merge_at_join_blocks_;
+ while (current != NULL) {
+ if (current->deopt_ && current->block_ != NULL) {
+ current->block_->FinishExit(
+ HAbnormalExit::New(builder_->zone(), NULL),
+ HSourcePosition::Unknown());
+ }
+ current = current->next_;
+ }
+ builder_->set_current_block(merge_block);
}
@@ -1051,6 +1134,7 @@ void HGraphBuilder::LoopBuilder::Break() {
}
builder_->GotoNoSimulate(exit_trampoline_block_);
+ builder_->set_current_block(NULL);
}
@@ -1095,9 +1179,10 @@ HGraph* HGraphBuilder::CreateGraph() {
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
- ASSERT(!FLAG_emit_opt_code_positions ||
- position_ != RelocInfo::kNoPosition || !info_->IsOptimizing());
- current_block()->AddInstruction(instr, position_);
+ ASSERT(!FLAG_hydrogen_track_positions ||
+ !position_.IsUnknown() ||
+ !info_->IsOptimizing());
+ current_block()->AddInstruction(instr, source_position());
if (graph()->IsInsideNoSideEffectsScope()) {
instr->SetFlag(HValue::kHasNoObservableSideEffects);
}
@@ -1106,9 +1191,10 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
- ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
- position_ != RelocInfo::kNoPosition);
- current_block()->Finish(last, position_);
+ ASSERT(!FLAG_hydrogen_track_positions ||
+ !info_->IsOptimizing() ||
+ !position_.IsUnknown());
+ current_block()->Finish(last, source_position());
if (last->IsReturn() || last->IsAbnormalExit()) {
set_current_block(NULL);
}
@@ -1116,9 +1202,9 @@ void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
- ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
- position_ != RelocInfo::kNoPosition);
- current_block()->FinishExit(instruction, position_);
+ ASSERT(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
+ !position_.IsUnknown());
+ current_block()->FinishExit(instruction, source_position());
if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
set_current_block(NULL);
}
@@ -1128,12 +1214,12 @@ void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
void HGraphBuilder::AddIncrementCounter(StatsCounter* counter) {
if (FLAG_native_code_counters && counter->Enabled()) {
HValue* reference = Add<HConstant>(ExternalReference(counter));
- HValue* old_value = Add<HLoadNamedField>(reference,
- HObjectAccess::ForCounter());
- HValue* new_value = Add<HAdd>(old_value, graph()->GetConstant1());
+ HValue* old_value = Add<HLoadNamedField>(
+ reference, static_cast<HValue*>(NULL), HObjectAccess::ForCounter());
+ HValue* new_value = AddUncasted<HAdd>(old_value, graph()->GetConstant1());
new_value->ClearFlag(HValue::kCanOverflow); // Ignore counter overflow
Add<HStoreNamedField>(reference, HObjectAccess::ForCounter(),
- new_value);
+ new_value, STORE_TO_INITIALIZED_ENTRY);
}
}
@@ -1142,7 +1228,7 @@ void HGraphBuilder::AddSimulate(BailoutId id,
RemovableSimulate removable) {
ASSERT(current_block() != NULL);
ASSERT(!graph()->IsInsideNoSideEffectsScope());
- current_block()->AddNewSimulate(id, removable);
+ current_block()->AddNewSimulate(id, source_position(), removable);
}
@@ -1168,58 +1254,49 @@ HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
}
-void HGraphBuilder::FinishExitWithHardDeoptimization(
- const char* reason, HBasicBlock* continuation) {
- PadEnvironmentForContinuation(current_block(), continuation);
+void HGraphBuilder::FinishExitWithHardDeoptimization(const char* reason) {
Add<HDeoptimize>(reason, Deoptimizer::EAGER);
- if (graph()->IsInsideNoSideEffectsScope()) {
- GotoNoSimulate(continuation);
- } else {
- Goto(continuation);
- }
+ FinishExitCurrentBlock(New<HAbnormalExit>());
}
-void HGraphBuilder::PadEnvironmentForContinuation(
- HBasicBlock* from,
- HBasicBlock* continuation) {
- if (continuation->last_environment() != NULL) {
- // When merging from a deopt block to a continuation, resolve differences in
- // environment by pushing constant 0 and popping extra values so that the
- // environments match during the join. Push 0 since it has the most specific
- // representation, and will not influence representation inference of the
- // phi.
- int continuation_env_length = continuation->last_environment()->length();
- while (continuation_env_length != from->last_environment()->length()) {
- if (continuation_env_length > from->last_environment()->length()) {
- from->last_environment()->Push(graph()->GetConstant0());
- } else {
- from->last_environment()->Pop();
- }
- }
- } else {
- ASSERT(continuation->predecessors()->length() == 0);
- }
+HValue* HGraphBuilder::BuildCheckMap(HValue* obj, Handle<Map> map) {
+ return Add<HCheckMaps>(obj, map, top_info());
}
-HValue* HGraphBuilder::BuildCheckMap(HValue* obj, Handle<Map> map) {
- return Add<HCheckMaps>(obj, map, top_info());
+HValue* HGraphBuilder::BuildCheckString(HValue* string) {
+ if (!string->type().IsString()) {
+ ASSERT(!string->IsConstant() ||
+ !HConstant::cast(string)->HasStringValue());
+ BuildCheckHeapObject(string);
+ return Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING);
+ }
+ return string;
}
HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
if (object->type().IsJSObject()) return object;
+ if (function->IsConstant() &&
+ HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ HConstant::cast(function)->handle(isolate()));
+ SharedFunctionInfo* shared = f->shared();
+ if (!shared->is_classic_mode() || shared->native()) return object;
+ }
return Add<HWrapReceiver>(object, function);
}
-HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
- HValue* elements,
- ElementsKind kind,
- HValue* length,
- HValue* key,
- bool is_js_array) {
+HValue* HGraphBuilder::BuildCheckForCapacityGrow(
+ HValue* object,
+ HValue* elements,
+ ElementsKind kind,
+ HValue* length,
+ HValue* key,
+ bool is_js_array,
+ PropertyAccessType access_type) {
IfBuilder length_checker(this);
Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
@@ -1236,7 +1313,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
capacity_checker.Then();
HValue* max_gap = Add<HConstant>(static_cast<int32_t>(JSObject::kMaxGap));
- HValue* max_capacity = Add<HAdd>(current_capacity, max_gap);
+ HValue* max_capacity = AddUncasted<HAdd>(current_capacity, max_gap);
IfBuilder key_checker(this);
key_checker.If<HCompareNumericAndBranch>(key, max_capacity, Token::LT);
key_checker.Then();
@@ -1262,6 +1339,13 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
new_length);
}
+ if (access_type == STORE && kind == FAST_SMI_ELEMENTS) {
+ HValue* checked_elements = environment()->Top();
+
+ // Write zero to ensure that the new element is initialized with some smi.
+ Add<HStoreKeyed>(checked_elements, key, graph()->GetConstant0(), kind);
+ }
+
length_checker.Else();
Add<HBoundsCheck>(key, length);
@@ -1280,7 +1364,8 @@ HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
IfBuilder cow_checker(this);
- cow_checker.If<HCompareMap>(elements, factory->fixed_cow_array_map());
+ cow_checker.If<HCompareMap>(
+ elements, factory->fixed_cow_array_map(), top_info());
cow_checker.Then();
HValue* capacity = AddLoadFixedArrayLength(elements);
@@ -1327,7 +1412,8 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
HInstruction* elements_length = AddLoadFixedArrayLength(elements);
HInstruction* array_length = is_jsarray
- ? Add<HLoadNamedField>(object, HObjectAccess::ForArrayLength(from_kind))
+ ? Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
+ HObjectAccess::ForArrayLength(from_kind))
: elements_length;
BuildGrowElementsCapacity(object, elements, from_kind, to_kind,
@@ -1340,10 +1426,215 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
}
-HValue* HGraphBuilder::BuildNumberToString(HValue* object,
- Handle<Type> type) {
+HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
+ HValue* elements,
+ HValue* key,
+ HValue* hash,
+ HValue* mask,
+ int current_probe) {
+ if (current_probe == kNumberDictionaryProbes) {
+ return NULL;
+ }
+
+ int32_t offset = SeededNumberDictionary::GetProbeOffset(current_probe);
+ HValue* raw_index = (current_probe == 0)
+ ? hash
+ : AddUncasted<HAdd>(hash, Add<HConstant>(offset));
+ raw_index = AddUncasted<HBitwise>(Token::BIT_AND, raw_index, mask);
+ int32_t entry_size = SeededNumberDictionary::kEntrySize;
+ raw_index = AddUncasted<HMul>(raw_index, Add<HConstant>(entry_size));
+ raw_index->ClearFlag(HValue::kCanOverflow);
+
+ int32_t base_offset = SeededNumberDictionary::kElementsStartIndex;
+ HValue* key_index = AddUncasted<HAdd>(raw_index, Add<HConstant>(base_offset));
+ key_index->ClearFlag(HValue::kCanOverflow);
+
+ HValue* candidate_key = Add<HLoadKeyed>(elements, key_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS);
+
+ IfBuilder key_compare(this);
+ key_compare.IfNot<HCompareObjectEqAndBranch>(key, candidate_key);
+ key_compare.Then();
+ {
+ // Key at the current probe doesn't match, try at the next probe.
+ HValue* result = BuildUncheckedDictionaryElementLoadHelper(
+ elements, key, hash, mask, current_probe + 1);
+ if (result == NULL) {
+ key_compare.Deopt("probes exhausted in keyed load dictionary lookup");
+ result = graph()->GetConstantUndefined();
+ } else {
+ Push(result);
+ }
+ }
+ key_compare.Else();
+ {
+ // Key at current probe matches. Details must be zero, otherwise the
+ // dictionary element requires special handling.
+ HValue* details_index = AddUncasted<HAdd>(
+ raw_index, Add<HConstant>(base_offset + 2));
+ details_index->ClearFlag(HValue::kCanOverflow);
+
+ HValue* details = Add<HLoadKeyed>(elements, details_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS);
+ IfBuilder details_compare(this);
+ details_compare.If<HCompareNumericAndBranch>(details,
+ graph()->GetConstant0(),
+ Token::NE);
+ details_compare.ThenDeopt("keyed load dictionary element not fast case");
+
+ details_compare.Else();
+ {
+ // Key matches and details are zero --> fast case. Load and return the
+ // value.
+ HValue* result_index = AddUncasted<HAdd>(
+ raw_index, Add<HConstant>(base_offset + 1));
+ result_index->ClearFlag(HValue::kCanOverflow);
+
+ Push(Add<HLoadKeyed>(elements, result_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS));
+ }
+ details_compare.End();
+ }
+ key_compare.End();
+
+ return Pop();
+}
+
+
+HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
+ int32_t seed_value = static_cast<uint32_t>(isolate()->heap()->HashSeed());
+ HValue* seed = Add<HConstant>(seed_value);
+ HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, index, seed);
+
+ // hash = ~hash + (hash << 15);
+ HValue* shifted_hash = AddUncasted<HShl>(hash, Add<HConstant>(15));
+ HValue* not_hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash,
+ graph()->GetConstantMinus1());
+ hash = AddUncasted<HAdd>(shifted_hash, not_hash);
+
+ // hash = hash ^ (hash >> 12);
+ shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(12));
+ hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+
+ // hash = hash + (hash << 2);
+ shifted_hash = AddUncasted<HShl>(hash, Add<HConstant>(2));
+ hash = AddUncasted<HAdd>(hash, shifted_hash);
+
+ // hash = hash ^ (hash >> 4);
+ shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(4));
+ hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+
+ // hash = hash * 2057;
+ hash = AddUncasted<HMul>(hash, Add<HConstant>(2057));
+ hash->ClearFlag(HValue::kCanOverflow);
+
+ // hash = hash ^ (hash >> 16);
+ shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(16));
+ return AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+}
+
+
+HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
+ HValue* key) {
+ HValue* elements = AddLoadElements(receiver);
+
+ HValue* hash = BuildElementIndexHash(key);
+
+ HValue* capacity = Add<HLoadKeyed>(
+ elements,
+ Add<HConstant>(NameDictionary::kCapacityIndex),
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS);
+
+ HValue* mask = AddUncasted<HSub>(capacity, graph()->GetConstant1());
+ mask->ChangeRepresentation(Representation::Integer32());
+ mask->ClearFlag(HValue::kCanOverflow);
+
+ return BuildUncheckedDictionaryElementLoadHelper(elements, key,
+ hash, mask, 0);
+}
+
+
+HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
+ HValue* index,
+ HValue* input) {
NoObservableSideEffectsScope scope(this);
+ // Compute the size of the RegExpResult followed by FixedArray with length.
+ HValue* size = length;
+ size = AddUncasted<HShl>(size, Add<HConstant>(kPointerSizeLog2));
+ size = AddUncasted<HAdd>(size, Add<HConstant>(static_cast<int32_t>(
+ JSRegExpResult::kSize + FixedArray::kHeaderSize)));
+
+ // Make sure size does not exceeds max regular heap object size.
+ Add<HBoundsCheck>(size, Add<HConstant>(Page::kMaxRegularHeapObjectSize));
+
+ // Allocate the JSRegExpResult and the FixedArray in one step.
+ HValue* result = Add<HAllocate>(
+ size, HType::JSArray(), NOT_TENURED, JS_ARRAY_TYPE);
+
+ // Determine the elements FixedArray.
+ HValue* elements = Add<HInnerAllocatedObject>(
+ result, Add<HConstant>(JSRegExpResult::kSize));
+
+ // Initialize the JSRegExpResult header.
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ HValue* native_context = Add<HLoadNamedField>(
+ global_object, static_cast<HValue*>(NULL),
+ HObjectAccess::ForGlobalObjectNativeContext());
+ AddStoreMapNoWriteBarrier(result, Add<HLoadNamedField>(
+ native_context, static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::REGEXP_RESULT_MAP_INDEX)));
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSArray::kPropertiesOffset),
+ Add<HConstant>(isolate()->factory()->empty_fixed_array()));
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
+ elements);
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSArray::kLengthOffset), length);
+
+ // Initialize the additional fields.
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kIndexOffset),
+ index);
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kInputOffset),
+ input);
+
+ // Initialize the elements header.
+ AddStoreMapConstantNoWriteBarrier(elements,
+ isolate()->factory()->fixed_array_map());
+ Add<HStoreNamedField>(elements, HObjectAccess::ForFixedArrayLength(), length);
+
+ // Initialize the elements contents with undefined.
+ LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement);
+ index = loop.BeginBody(graph()->GetConstant0(), length, Token::LT);
+ {
+ Add<HStoreKeyed>(elements, index, graph()->GetConstantUndefined(),
+ FAST_ELEMENTS);
+ }
+ loop.EndBody();
+
+ return result;
+}
+
+
+HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
+ NoObservableSideEffectsScope scope(this);
+
+ // Convert constant numbers at compile time.
+ if (object->IsConstant() && HConstant::cast(object)->HasNumberValue()) {
+ Handle<Object> number = HConstant::cast(object)->handle(isolate());
+ Handle<String> result = isolate()->factory()->NumberToString(number);
+ return Add<HConstant>(result);
+ }
+
// Create a joinable continuation.
HIfContinuation found(graph()->CreateBasicBlock(),
graph()->CreateBasicBlock());
@@ -1356,8 +1647,8 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object,
// contains two elements (number and string) for each cache entry.
HValue* mask = AddLoadFixedArrayLength(number_string_cache);
mask->set_type(HType::Smi());
- mask = Add<HSar>(mask, graph()->GetConstant1());
- mask = Add<HSub>(mask, graph()->GetConstant1());
+ mask = AddUncasted<HSar>(mask, graph()->GetConstant1());
+ mask = AddUncasted<HSub>(mask, graph()->GetConstant1());
// Check whether object is a smi.
IfBuilder if_objectissmi(this);
@@ -1365,10 +1656,10 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object,
if_objectissmi.Then();
{
// Compute hash for smi similar to smi_get_hash().
- HValue* hash = Add<HBitwise>(Token::BIT_AND, object, mask);
+ HValue* hash = AddUncasted<HBitwise>(Token::BIT_AND, object, mask);
// Load the key.
- HValue* key_index = Add<HShl>(hash, graph()->GetConstant1());
+ HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
static_cast<HValue*>(NULL),
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
@@ -1386,24 +1677,26 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object,
if_objectissmi.Else();
{
if (type->Is(Type::Smi())) {
- if_objectissmi.Deopt("Excepted smi");
+ if_objectissmi.Deopt("Expected smi");
} else {
// Check if the object is a heap number.
IfBuilder if_objectisnumber(this);
- if_objectisnumber.If<HCompareMap>(
- object, isolate()->factory()->heap_number_map());
+ HValue* objectisnumber = if_objectisnumber.If<HCompareMap>(
+ object, isolate()->factory()->heap_number_map(), top_info());
if_objectisnumber.Then();
{
// Compute hash for heap number similar to double_get_hash().
HValue* low = Add<HLoadNamedField>(
- object, HObjectAccess::ForHeapNumberValueLowestBits());
+ object, objectisnumber,
+ HObjectAccess::ForHeapNumberValueLowestBits());
HValue* high = Add<HLoadNamedField>(
- object, HObjectAccess::ForHeapNumberValueHighestBits());
- HValue* hash = Add<HBitwise>(Token::BIT_XOR, low, high);
- hash = Add<HBitwise>(Token::BIT_AND, hash, mask);
+ object, objectisnumber,
+ HObjectAccess::ForHeapNumberValueHighestBits());
+ HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, low, high);
+ hash = AddUncasted<HBitwise>(Token::BIT_AND, hash, mask);
// Load the key.
- HValue* key_index = Add<HShl>(hash, graph()->GetConstant1());
+ HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
static_cast<HValue*>(NULL),
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
@@ -1411,14 +1704,16 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object,
// Check if key is a heap number (the number string cache contains only
// SMIs and heap number, so it is sufficient to do a SMI check here).
IfBuilder if_keyisnotsmi(this);
- if_keyisnotsmi.IfNot<HIsSmiAndBranch>(key);
+ HValue* keyisnotsmi = if_keyisnotsmi.IfNot<HIsSmiAndBranch>(key);
if_keyisnotsmi.Then();
{
// Check if values of key and object match.
IfBuilder if_keyeqobject(this);
if_keyeqobject.If<HCompareNumericAndBranch>(
- Add<HLoadNamedField>(key, HObjectAccess::ForHeapNumberValue()),
- Add<HLoadNamedField>(object, HObjectAccess::ForHeapNumberValue()),
+ Add<HLoadNamedField>(key, keyisnotsmi,
+ HObjectAccess::ForHeapNumberValue()),
+ Add<HLoadNamedField>(object, objectisnumber,
+ HObjectAccess::ForHeapNumberValue()),
Token::EQ);
if_keyeqobject.Then();
{
@@ -1449,7 +1744,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object,
// Load the value in case of cache hit.
HValue* key_index = Pop();
- HValue* value_index = Add<HAdd>(key_index, graph()->GetConstant1());
+ HValue* value_index = AddUncasted<HAdd>(key_index, graph()->GetConstant1());
Push(Add<HLoadKeyed>(number_string_cache, value_index,
static_cast<HValue*>(NULL),
FAST_ELEMENTS, ALLOW_RETURN_HOLE));
@@ -1469,16 +1764,394 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object,
}
+HAllocate* HGraphBuilder::BuildAllocate(
+ HValue* object_size,
+ HType type,
+ InstanceType instance_type,
+ HAllocationMode allocation_mode) {
+ // Compute the effective allocation size.
+ HValue* size = object_size;
+ if (allocation_mode.CreateAllocationMementos()) {
+ size = AddUncasted<HAdd>(size, Add<HConstant>(AllocationMemento::kSize));
+ size->ClearFlag(HValue::kCanOverflow);
+ }
+
+ // Perform the actual allocation.
+ HAllocate* object = Add<HAllocate>(
+ size, type, allocation_mode.GetPretenureMode(),
+ instance_type, allocation_mode.feedback_site());
+
+ // Setup the allocation memento.
+ if (allocation_mode.CreateAllocationMementos()) {
+ BuildCreateAllocationMemento(
+ object, object_size, allocation_mode.current_site());
+ }
+
+ return object;
+}
+
+
+HValue* HGraphBuilder::BuildAddStringLengths(HValue* left_length,
+ HValue* right_length) {
+ // Compute the combined string length. If the result is larger than the max
+ // supported string length, we bailout to the runtime. This is done implicitly
+ // when converting the result back to a smi in case the max string length
+ // equals the max smi value. Otherwise, for platforms with 32-bit smis, we do
+ HValue* length = AddUncasted<HAdd>(left_length, right_length);
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (String::kMaxLength != Smi::kMaxValue) {
+ IfBuilder if_nooverflow(this);
+ if_nooverflow.If<HCompareNumericAndBranch>(
+ length, Add<HConstant>(String::kMaxLength), Token::LTE);
+ if_nooverflow.Then();
+ if_nooverflow.ElseDeopt("String length exceeds limit");
+ }
+ return length;
+}
+
+
+HValue* HGraphBuilder::BuildCreateConsString(
+ HValue* length,
+ HValue* left,
+ HValue* right,
+ HAllocationMode allocation_mode) {
+ // Determine the string instance types.
+ HInstruction* left_instance_type = AddLoadStringInstanceType(left);
+ HInstruction* right_instance_type = AddLoadStringInstanceType(right);
+
+ // Allocate the cons string object. HAllocate does not care whether we
+ // pass CONS_STRING_TYPE or CONS_ASCII_STRING_TYPE here, so we just use
+ // CONS_STRING_TYPE here. Below we decide whether the cons string is
+ // one-byte or two-byte and set the appropriate map.
+ ASSERT(HAllocate::CompatibleInstanceTypes(CONS_STRING_TYPE,
+ CONS_ASCII_STRING_TYPE));
+ HAllocate* result = BuildAllocate(Add<HConstant>(ConsString::kSize),
+ HType::String(), CONS_STRING_TYPE,
+ allocation_mode);
+
+ // Compute intersection and difference of instance types.
+ HValue* anded_instance_types = AddUncasted<HBitwise>(
+ Token::BIT_AND, left_instance_type, right_instance_type);
+ HValue* xored_instance_types = AddUncasted<HBitwise>(
+ Token::BIT_XOR, left_instance_type, right_instance_type);
+
+ // We create a one-byte cons string if
+ // 1. both strings are one-byte, or
+ // 2. at least one of the strings is two-byte, but happens to contain only
+ // one-byte characters.
+ // To do this, we check
+ // 1. if both strings are one-byte, or if the one-byte data hint is set in
+ // both strings, or
+ // 2. if one of the strings has the one-byte data hint set and the other
+ // string is one-byte.
+ IfBuilder if_onebyte(this);
+ STATIC_ASSERT(kOneByteStringTag != 0);
+ STATIC_ASSERT(kOneByteDataHintMask != 0);
+ if_onebyte.If<HCompareNumericAndBranch>(
+ AddUncasted<HBitwise>(
+ Token::BIT_AND, anded_instance_types,
+ Add<HConstant>(static_cast<int32_t>(
+ kStringEncodingMask | kOneByteDataHintMask))),
+ graph()->GetConstant0(), Token::NE);
+ if_onebyte.Or();
+ STATIC_ASSERT(kOneByteStringTag != 0 &&
+ kOneByteDataHintTag != 0 &&
+ kOneByteDataHintTag != kOneByteStringTag);
+ if_onebyte.If<HCompareNumericAndBranch>(
+ AddUncasted<HBitwise>(
+ Token::BIT_AND, xored_instance_types,
+ Add<HConstant>(static_cast<int32_t>(
+ kOneByteStringTag | kOneByteDataHintTag))),
+ Add<HConstant>(static_cast<int32_t>(
+ kOneByteStringTag | kOneByteDataHintTag)), Token::EQ);
+ if_onebyte.Then();
+ {
+ // We can safely skip the write barrier for storing the map here.
+ Handle<Map> map = isolate()->factory()->cons_ascii_string_map();
+ AddStoreMapConstantNoWriteBarrier(result, map);
+ }
+ if_onebyte.Else();
+ {
+ // We can safely skip the write barrier for storing the map here.
+ Handle<Map> map = isolate()->factory()->cons_string_map();
+ AddStoreMapConstantNoWriteBarrier(result, map);
+ }
+ if_onebyte.End();
+
+ // Initialize the cons string fields.
+ Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
+ Add<HConstant>(String::kEmptyHashField));
+ Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
+ Add<HStoreNamedField>(result, HObjectAccess::ForConsStringFirst(), left);
+ Add<HStoreNamedField>(result, HObjectAccess::ForConsStringSecond(), right);
+
+ // Count the native string addition.
+ AddIncrementCounter(isolate()->counters()->string_add_native());
+
+ return result;
+}
+
+
+void HGraphBuilder::BuildCopySeqStringChars(HValue* src,
+ HValue* src_offset,
+ String::Encoding src_encoding,
+ HValue* dst,
+ HValue* dst_offset,
+ String::Encoding dst_encoding,
+ HValue* length) {
+ ASSERT(dst_encoding != String::ONE_BYTE_ENCODING ||
+ src_encoding == String::ONE_BYTE_ENCODING);
+ LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement);
+ HValue* index = loop.BeginBody(graph()->GetConstant0(), length, Token::LT);
+ {
+ HValue* src_index = AddUncasted<HAdd>(src_offset, index);
+ HValue* value =
+ AddUncasted<HSeqStringGetChar>(src_encoding, src, src_index);
+ HValue* dst_index = AddUncasted<HAdd>(dst_offset, index);
+ Add<HSeqStringSetChar>(dst_encoding, dst, dst_index, value);
+ }
+ loop.EndBody();
+}
+
+
+HValue* HGraphBuilder::BuildUncheckedStringAdd(
+ HValue* left,
+ HValue* right,
+ HAllocationMode allocation_mode) {
+ // Determine the string lengths.
+ HValue* left_length = AddLoadStringLength(left);
+ HValue* right_length = AddLoadStringLength(right);
+
+ // Compute the combined string length.
+ HValue* length = BuildAddStringLengths(left_length, right_length);
+
+ // Do some manual constant folding here.
+ if (left_length->IsConstant()) {
+ HConstant* c_left_length = HConstant::cast(left_length);
+ ASSERT_NE(0, c_left_length->Integer32Value());
+ if (c_left_length->Integer32Value() + 1 >= ConsString::kMinLength) {
+ // The right string contains at least one character.
+ return BuildCreateConsString(length, left, right, allocation_mode);
+ }
+ } else if (right_length->IsConstant()) {
+ HConstant* c_right_length = HConstant::cast(right_length);
+ ASSERT_NE(0, c_right_length->Integer32Value());
+ if (c_right_length->Integer32Value() + 1 >= ConsString::kMinLength) {
+ // The left string contains at least one character.
+ return BuildCreateConsString(length, left, right, allocation_mode);
+ }
+ }
+
+ // Check if we should create a cons string.
+ IfBuilder if_createcons(this);
+ if_createcons.If<HCompareNumericAndBranch>(
+ length, Add<HConstant>(ConsString::kMinLength), Token::GTE);
+ if_createcons.Then();
+ {
+ // Create a cons string.
+ Push(BuildCreateConsString(length, left, right, allocation_mode));
+ }
+ if_createcons.Else();
+ {
+ // Determine the string instance types.
+ HValue* left_instance_type = AddLoadStringInstanceType(left);
+ HValue* right_instance_type = AddLoadStringInstanceType(right);
+
+ // Compute union and difference of instance types.
+ HValue* ored_instance_types = AddUncasted<HBitwise>(
+ Token::BIT_OR, left_instance_type, right_instance_type);
+ HValue* xored_instance_types = AddUncasted<HBitwise>(
+ Token::BIT_XOR, left_instance_type, right_instance_type);
+
+ // Check if both strings have the same encoding and both are
+ // sequential.
+ IfBuilder if_sameencodingandsequential(this);
+ if_sameencodingandsequential.If<HCompareNumericAndBranch>(
+ AddUncasted<HBitwise>(
+ Token::BIT_AND, xored_instance_types,
+ Add<HConstant>(static_cast<int32_t>(kStringEncodingMask))),
+ graph()->GetConstant0(), Token::EQ);
+ if_sameencodingandsequential.And();
+ STATIC_ASSERT(kSeqStringTag == 0);
+ if_sameencodingandsequential.If<HCompareNumericAndBranch>(
+ AddUncasted<HBitwise>(
+ Token::BIT_AND, ored_instance_types,
+ Add<HConstant>(static_cast<int32_t>(kStringRepresentationMask))),
+ graph()->GetConstant0(), Token::EQ);
+ if_sameencodingandsequential.Then();
+ {
+ HConstant* string_map =
+ Add<HConstant>(isolate()->factory()->string_map());
+ HConstant* ascii_string_map =
+ Add<HConstant>(isolate()->factory()->ascii_string_map());
+
+ // Determine map and size depending on whether result is one-byte string.
+ IfBuilder if_onebyte(this);
+ STATIC_ASSERT(kOneByteStringTag != 0);
+ if_onebyte.If<HCompareNumericAndBranch>(
+ AddUncasted<HBitwise>(
+ Token::BIT_AND, ored_instance_types,
+ Add<HConstant>(static_cast<int32_t>(kStringEncodingMask))),
+ graph()->GetConstant0(), Token::NE);
+ if_onebyte.Then();
+ {
+ // Allocate sequential one-byte string object.
+ Push(length);
+ Push(ascii_string_map);
+ }
+ if_onebyte.Else();
+ {
+ // Allocate sequential two-byte string object.
+ HValue* size = AddUncasted<HShl>(length, graph()->GetConstant1());
+ size->ClearFlag(HValue::kCanOverflow);
+ size->SetFlag(HValue::kUint32);
+ Push(size);
+ Push(string_map);
+ }
+ if_onebyte.End();
+ HValue* map = Pop();
+
+ // Calculate the number of bytes needed for the characters in the
+ // string while observing object alignment.
+ STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0);
+ HValue* size = Pop();
+ size = AddUncasted<HAdd>(size, Add<HConstant>(static_cast<int32_t>(
+ SeqString::kHeaderSize + kObjectAlignmentMask)));
+ size->ClearFlag(HValue::kCanOverflow);
+ size = AddUncasted<HBitwise>(
+ Token::BIT_AND, size, Add<HConstant>(static_cast<int32_t>(
+ ~kObjectAlignmentMask)));
+
+ // Allocate the string object. HAllocate does not care whether we pass
+ // STRING_TYPE or ASCII_STRING_TYPE here, so we just use STRING_TYPE here.
+ HAllocate* result = BuildAllocate(
+ size, HType::String(), STRING_TYPE, allocation_mode);
+
+ // We can safely skip the write barrier for storing map here.
+ AddStoreMapNoWriteBarrier(result, map);
+
+ // Initialize the string fields.
+ Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
+ Add<HConstant>(String::kEmptyHashField));
+ Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
+
+ // Copy characters to the result string.
+ IfBuilder if_twobyte(this);
+ if_twobyte.If<HCompareObjectEqAndBranch>(map, string_map);
+ if_twobyte.Then();
+ {
+ // Copy characters from the left string.
+ BuildCopySeqStringChars(
+ left, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
+ result, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
+ left_length);
+
+ // Copy characters from the right string.
+ BuildCopySeqStringChars(
+ right, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
+ result, left_length, String::TWO_BYTE_ENCODING,
+ right_length);
+ }
+ if_twobyte.Else();
+ {
+ // Copy characters from the left string.
+ BuildCopySeqStringChars(
+ left, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
+ result, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
+ left_length);
+
+ // Copy characters from the right string.
+ BuildCopySeqStringChars(
+ right, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
+ result, left_length, String::ONE_BYTE_ENCODING,
+ right_length);
+ }
+ if_twobyte.End();
+
+ // Count the native string addition.
+ AddIncrementCounter(isolate()->counters()->string_add_native());
+
+ // Return the sequential string.
+ Push(result);
+ }
+ if_sameencodingandsequential.Else();
+ {
+ // Fallback to the runtime to add the two strings.
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kStringAdd),
+ 2));
+ }
+ if_sameencodingandsequential.End();
+ }
+ if_createcons.End();
+
+ return Pop();
+}
+
+
+HValue* HGraphBuilder::BuildStringAdd(
+ HValue* left,
+ HValue* right,
+ HAllocationMode allocation_mode) {
+ NoObservableSideEffectsScope no_effects(this);
+
+ // Determine string lengths.
+ HValue* left_length = AddLoadStringLength(left);
+ HValue* right_length = AddLoadStringLength(right);
+
+ // Check if left string is empty.
+ IfBuilder if_leftempty(this);
+ if_leftempty.If<HCompareNumericAndBranch>(
+ left_length, graph()->GetConstant0(), Token::EQ);
+ if_leftempty.Then();
+ {
+ // Count the native string addition.
+ AddIncrementCounter(isolate()->counters()->string_add_native());
+
+ // Just return the right string.
+ Push(right);
+ }
+ if_leftempty.Else();
+ {
+ // Check if right string is empty.
+ IfBuilder if_rightempty(this);
+ if_rightempty.If<HCompareNumericAndBranch>(
+ right_length, graph()->GetConstant0(), Token::EQ);
+ if_rightempty.Then();
+ {
+ // Count the native string addition.
+ AddIncrementCounter(isolate()->counters()->string_add_native());
+
+ // Just return the left string.
+ Push(left);
+ }
+ if_rightempty.Else();
+ {
+ // Add the two non-empty strings.
+ Push(BuildUncheckedStringAdd(left, right, allocation_mode));
+ }
+ if_rightempty.End();
+ }
+ if_leftempty.End();
+
+ return Pop();
+}
+
+
HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* checked_object,
HValue* key,
HValue* val,
bool is_js_array,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode) {
- ASSERT(!IsExternalArrayElementsKind(elements_kind) || !is_js_array);
+ ASSERT((!IsExternalArrayElementsKind(elements_kind) &&
+ !IsFixedTypedArrayElementsKind(elements_kind)) ||
+ !is_js_array);
// No GVNFlag is necessary for ElementsKind if there is an explicit dependency
// on a HElementsTransition instruction. The flag can also be removed if the
// map to check has FAST_HOLEY_ELEMENTS, since there can be no further
@@ -1486,33 +2159,41 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
// generated store code.
if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
- (elements_kind == FAST_ELEMENTS && is_store)) {
- checked_object->ClearGVNFlag(kDependsOnElementsKind);
+ (elements_kind == FAST_ELEMENTS && access_type == STORE)) {
+ checked_object->ClearDependsOnFlag(kElementsKind);
}
bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
bool fast_elements = IsFastObjectElementsKind(elements_kind);
HValue* elements = AddLoadElements(checked_object);
- if (is_store && (fast_elements || fast_smi_only_elements) &&
+ if (access_type == STORE && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
elements, isolate()->factory()->fixed_array_map(), top_info());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ check_cow_map->ClearDependsOnFlag(kElementsKind);
}
HInstruction* length = NULL;
if (is_js_array) {
length = Add<HLoadNamedField>(
- checked_object, HObjectAccess::ForArrayLength(elements_kind));
+ checked_object, static_cast<HValue*>(NULL),
+ HObjectAccess::ForArrayLength(elements_kind));
} else {
length = AddLoadFixedArrayLength(elements);
}
length->set_type(HType::Smi());
HValue* checked_key = NULL;
- if (IsExternalArrayElementsKind(elements_kind)) {
+ if (IsExternalArrayElementsKind(elements_kind) ||
+ IsFixedTypedArrayElementsKind(elements_kind)) {
+ HValue* backing_store;
+ if (IsExternalArrayElementsKind(elements_kind)) {
+ backing_store = Add<HLoadNamedField>(
+ elements, static_cast<HValue*>(NULL),
+ HObjectAccess::ForExternalArrayExternalPointer());
+ } else {
+ backing_store = elements;
+ }
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
NoObservableSideEffectsScope no_effects(this);
- HLoadExternalArrayPointer* external_elements =
- Add<HLoadExternalArrayPointer>(elements);
IfBuilder length_checker(this);
length_checker.If<HCompareNumericAndBranch>(key, length, Token::LT);
length_checker.Then();
@@ -1521,18 +2202,17 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
key, graph()->GetConstant0(), Token::GTE);
negative_checker.Then();
HInstruction* result = AddElementAccess(
- external_elements, key, val, bounds_check, elements_kind, is_store);
+ backing_store, key, val, bounds_check, elements_kind, access_type);
negative_checker.ElseDeopt("Negative key encountered");
+ negative_checker.End();
length_checker.End();
return result;
} else {
ASSERT(store_mode == STANDARD_STORE);
checked_key = Add<HBoundsCheck>(key, length);
- HLoadExternalArrayPointer* external_elements =
- Add<HLoadExternalArrayPointer>(elements);
return AddElementAccess(
- external_elements, checked_key, val,
- checked_object, elements_kind, is_store);
+ backing_store, checked_key, val,
+ checked_object, elements_kind, access_type);
}
}
ASSERT(fast_smi_only_elements ||
@@ -1542,21 +2222,21 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// In case val is stored into a fast smi array, assure that the value is a smi
// before manipulating the backing store. Otherwise the actual store may
// deopt, leaving the backing store in an invalid state.
- if (is_store && IsFastSmiElementsKind(elements_kind) &&
+ if (access_type == STORE && IsFastSmiElementsKind(elements_kind) &&
!val->type().IsSmi()) {
- val = Add<HForceRepresentation>(val, Representation::Smi());
+ val = AddUncasted<HForceRepresentation>(val, Representation::Smi());
}
if (IsGrowStoreMode(store_mode)) {
NoObservableSideEffectsScope no_effects(this);
elements = BuildCheckForCapacityGrow(checked_object, elements,
elements_kind, length, key,
- is_js_array);
+ is_js_array, access_type);
checked_key = key;
} else {
checked_key = Add<HBoundsCheck>(key, length);
- if (is_store && (fast_elements || fast_smi_only_elements)) {
+ if (access_type == STORE && (fast_elements || fast_smi_only_elements)) {
if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
NoObservableSideEffectsScope no_effects(this);
elements = BuildCopyElementsOnWrite(checked_object, elements,
@@ -1564,14 +2244,57 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
} else {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
elements, isolate()->factory()->fixed_array_map(), top_info());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ check_cow_map->ClearDependsOnFlag(kElementsKind);
}
}
}
return AddElementAccess(elements, checked_key, val, checked_object,
- elements_kind, is_store, load_mode);
-}
+ elements_kind, access_type, load_mode);
+}
+
+
+
+HValue* HGraphBuilder::BuildAllocateArrayFromLength(
+ JSArrayBuilder* array_builder,
+ HValue* length_argument) {
+ if (length_argument->IsConstant() &&
+ HConstant::cast(length_argument)->HasSmiValue()) {
+ int array_length = HConstant::cast(length_argument)->Integer32Value();
+ HValue* new_object = array_length == 0
+ ? array_builder->AllocateEmptyArray()
+ : array_builder->AllocateArray(length_argument, length_argument);
+ return new_object;
+ }
+
+ HValue* constant_zero = graph()->GetConstant0();
+ HConstant* max_alloc_length =
+ Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ HInstruction* checked_length = Add<HBoundsCheck>(length_argument,
+ max_alloc_length);
+ IfBuilder if_builder(this);
+ if_builder.If<HCompareNumericAndBranch>(checked_length, constant_zero,
+ Token::EQ);
+ if_builder.Then();
+ const int initial_capacity = JSArray::kPreallocatedArrayElements;
+ HConstant* initial_capacity_node = Add<HConstant>(initial_capacity);
+ Push(initial_capacity_node); // capacity
+ Push(constant_zero); // length
+ if_builder.Else();
+ if (!(top_info()->IsStub()) &&
+ IsFastPackedElementsKind(array_builder->kind())) {
+ // We'll come back later with better (holey) feedback.
+ if_builder.Deopt("Holey array despite packed elements_kind feedback");
+ } else {
+ Push(checked_length); // capacity
+ Push(checked_length); // length
+ }
+ if_builder.End();
+ // Figure out total size
+ HValue* length = Pop();
+ HValue* capacity = Pop();
+ return array_builder->AllocateArray(capacity, length);
+}
HValue* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
HValue* capacity) {
@@ -1587,15 +2310,18 @@ HValue* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
}
HConstant* elements_size_value = Add<HConstant>(elements_size);
- HValue* mul = Add<HMul>(capacity, elements_size_value);
+ HValue* mul = AddUncasted<HMul>(capacity, elements_size_value);
mul->ClearFlag(HValue::kCanOverflow);
HConstant* header_size = Add<HConstant>(FixedArray::kHeaderSize);
- HValue* total_size = Add<HAdd>(mul, header_size);
+ HValue* total_size = AddUncasted<HAdd>(mul, header_size);
total_size->ClearFlag(HValue::kCanOverflow);
- return Add<HAllocate>(total_size, HType::JSArray(),
- isolate()->heap()->GetPretenureMode(), instance_type);
+ PretenureFlag pretenure_flag = !FLAG_allocation_site_pretenuring ?
+ isolate()->heap()->GetPretenureMode() : NOT_TENURED;
+
+ return Add<HAllocate>(total_size, HType::JSArray(), pretenure_flag,
+ instance_type);
}
@@ -1618,7 +2344,7 @@ HValue* HGraphBuilder::BuildAllocateElementsAndInitializeElementsHeader(
HValue* capacity) {
// The HForceRepresentation is to prevent possible deopt on int-smi
// conversion after allocation but before the new object fields are set.
- capacity = Add<HForceRepresentation>(capacity, Representation::Smi());
+ capacity = AddUncasted<HForceRepresentation>(capacity, Representation::Smi());
HValue* new_elements = BuildAllocateElements(kind, capacity);
BuildInitializeElementsHeader(new_elements, kind, capacity);
return new_elements;
@@ -1643,9 +2369,8 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
length_field);
if (mode == TRACK_ALLOCATION_SITE) {
- BuildCreateAllocationMemento(array,
- JSArray::kSize,
- allocation_site_payload);
+ BuildCreateAllocationMemento(
+ array, Add<HConstant>(JSArray::kSize), allocation_site_payload);
}
int elements_location = JSArray::kSize;
@@ -1653,9 +2378,10 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
elements_location += AllocationMemento::kSize;
}
- HValue* elements = Add<HInnerAllocatedObject>(array, elements_location);
+ HInnerAllocatedObject* elements = Add<HInnerAllocatedObject>(
+ array, Add<HConstant>(elements_location));
Add<HStoreNamedField>(array, HObjectAccess::ForElementsPointer(), elements);
- return static_cast<HInnerAllocatedObject*>(elements);
+ return elements;
}
@@ -1665,22 +2391,27 @@ HInstruction* HGraphBuilder::AddElementAccess(
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode) {
- if (is_store) {
+ if (access_type == STORE) {
ASSERT(val != NULL);
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
+ if (elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS) {
val = Add<HClampToUint8>(val);
}
- return Add<HStoreKeyed>(elements, checked_key, val, elements_kind);
+ return Add<HStoreKeyed>(elements, checked_key, val, elements_kind,
+ elements_kind == FAST_SMI_ELEMENTS
+ ? STORE_TO_INITIALIZED_ENTRY
+ : INITIALIZING_STORE);
}
- ASSERT(!is_store);
+ ASSERT(access_type == LOAD);
ASSERT(val == NULL);
HLoadKeyed* load = Add<HLoadKeyed>(
elements, checked_key, dependency, elements_kind, load_mode);
if (FLAG_opt_safe_uint32_operations &&
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS)) {
graph()->RecordUint32Instruction(load);
}
return load;
@@ -1688,13 +2419,14 @@ HInstruction* HGraphBuilder::AddElementAccess(
HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object) {
- return Add<HLoadNamedField>(object, HObjectAccess::ForElementsPointer());
+ return Add<HLoadNamedField>(
+ object, static_cast<HValue*>(NULL), HObjectAccess::ForElementsPointer());
}
HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(HValue* object) {
- return Add<HLoadNamedField>(object,
- HObjectAccess::ForFixedArrayLength());
+ return Add<HLoadNamedField>(
+ object, static_cast<HValue*>(NULL), HObjectAccess::ForFixedArrayLength());
}
@@ -1715,10 +2447,9 @@ HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* old_capacity) {
void HGraphBuilder::BuildNewSpaceArrayCheck(HValue* length, ElementsKind kind) {
- Heap* heap = isolate()->heap();
int element_size = IsFastDoubleElementsKind(kind) ? kDoubleSize
: kPointerSize;
- int max_size = heap->MaxRegularSpaceAllocationSize() / element_size;
+ int max_size = Page::kMaxRegularHeapObjectSize / element_size;
max_size -= JSArray::kSize / element_size;
HConstant* max_size_constant = Add<HConstant>(max_size);
Add<HBoundsCheck>(length, max_size_constant);
@@ -1761,19 +2492,15 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* elements,
: Add<HConstant>(nan_double);
// Special loop unfolding case
- static const int kLoopUnfoldLimit = 4;
- bool unfold_loop = false;
- int initial_capacity = JSArray::kPreallocatedArrayElements;
- if (from->IsConstant() && to->IsConstant() &&
- initial_capacity <= kLoopUnfoldLimit) {
- HConstant* constant_from = HConstant::cast(from);
- HConstant* constant_to = HConstant::cast(to);
+ static const int kLoopUnfoldLimit = 8;
+ STATIC_ASSERT(JSArray::kPreallocatedArrayElements <= kLoopUnfoldLimit);
+ int initial_capacity = -1;
+ if (from->IsInteger32Constant() && to->IsInteger32Constant()) {
+ int constant_from = from->GetInteger32Constant();
+ int constant_to = to->GetInteger32Constant();
- if (constant_from->HasInteger32Value() &&
- constant_from->Integer32Value() == 0 &&
- constant_to->HasInteger32Value() &&
- constant_to->Integer32Value() == initial_capacity) {
- unfold_loop = true;
+ if (constant_from == 0 && constant_to <= kLoopUnfoldLimit) {
+ initial_capacity = constant_to;
}
}
@@ -1783,7 +2510,7 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* elements,
elements_kind = FAST_HOLEY_ELEMENTS;
}
- if (unfold_loop) {
+ if (initial_capacity >= 0) {
for (int i = 0; i < initial_capacity; i++) {
HInstruction* key = Add<HConstant>(i);
Add<HStoreKeyed>(elements, key, hole, elements_kind);
@@ -1882,14 +2609,16 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length == 0)) {
HObjectAccess access = HObjectAccess::ForJSArrayOffset(i);
- Add<HStoreNamedField>(object, access,
- Add<HLoadNamedField>(boilerplate, access));
+ Add<HStoreNamedField>(
+ object, access, Add<HLoadNamedField>(
+ boilerplate, static_cast<HValue*>(NULL), access));
}
}
// Create an allocation site info if requested.
if (mode == TRACK_ALLOCATION_SITE) {
- BuildCreateAllocationMemento(object, JSArray::kSize, allocation_site);
+ BuildCreateAllocationMemento(
+ object, Add<HConstant>(JSArray::kSize), allocation_site);
}
if (length > 0) {
@@ -1910,8 +2639,9 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
// Copy the elements array header.
for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i);
- Add<HStoreNamedField>(object_elements, access,
- Add<HLoadNamedField>(boilerplate_elements, access));
+ Add<HStoreNamedField>(
+ object_elements, access, Add<HLoadNamedField>(
+ boilerplate_elements, static_cast<HValue*>(NULL), access));
}
// Copy the elements array contents.
@@ -1932,7 +2662,7 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
void HGraphBuilder::BuildCompareNil(
HValue* value,
- Handle<Type> type,
+ Type* type,
HIfContinuation* continuation) {
IfBuilder if_nil(this);
bool some_case_handled = false;
@@ -1982,27 +2712,62 @@ void HGraphBuilder::BuildCompareNil(
}
-HValue* HGraphBuilder::BuildCreateAllocationMemento(HValue* previous_object,
- int previous_object_size,
- HValue* alloc_site) {
- ASSERT(alloc_site != NULL);
- HInnerAllocatedObject* alloc_memento = Add<HInnerAllocatedObject>(
+void HGraphBuilder::BuildCreateAllocationMemento(
+ HValue* previous_object,
+ HValue* previous_object_size,
+ HValue* allocation_site) {
+ ASSERT(allocation_site != NULL);
+ HInnerAllocatedObject* allocation_memento = Add<HInnerAllocatedObject>(
previous_object, previous_object_size);
- Handle<Map> alloc_memento_map =
- isolate()->factory()->allocation_memento_map();
- AddStoreMapConstant(alloc_memento, alloc_memento_map);
- HObjectAccess access = HObjectAccess::ForAllocationMementoSite();
- Add<HStoreNamedField>(alloc_memento, access, alloc_site);
- return alloc_memento;
+ AddStoreMapConstant(
+ allocation_memento, isolate()->factory()->allocation_memento_map());
+ Add<HStoreNamedField>(
+ allocation_memento,
+ HObjectAccess::ForAllocationMementoSite(),
+ allocation_site);
+ if (FLAG_allocation_site_pretenuring) {
+ HValue* memento_create_count = Add<HLoadNamedField>(
+ allocation_site, static_cast<HValue*>(NULL),
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kPretenureCreateCountOffset));
+ memento_create_count = AddUncasted<HAdd>(
+ memento_create_count, graph()->GetConstant1());
+ // This smi value is reset to zero after every gc, overflow isn't a problem
+ // since the counter is bounded by the new space size.
+ memento_create_count->ClearFlag(HValue::kCanOverflow);
+ HStoreNamedField* store = Add<HStoreNamedField>(
+ allocation_site, HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kPretenureCreateCountOffset), memento_create_count);
+ // No write barrier needed to store a smi.
+ store->SkipWriteBarrier();
+ }
+}
+
+
+HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* closure) {
+ // Get the global context, then the native context
+ HInstruction* context =
+ Add<HLoadNamedField>(closure, static_cast<HValue*>(NULL),
+ HObjectAccess::ForFunctionContextPointer());
+ HInstruction* global_object = Add<HLoadNamedField>(
+ context, static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
+ GlobalObject::kNativeContextOffset);
+ return Add<HLoadNamedField>(
+ global_object, static_cast<HValue*>(NULL), access);
}
HInstruction* HGraphBuilder::BuildGetNativeContext() {
// Get the global context, then the native context
- HInstruction* global_object = Add<HGlobalObject>();
- HObjectAccess access = HObjectAccess::ForJSObjectOffset(
- GlobalObject::kNativeContextOffset);
- return Add<HLoadNamedField>(global_object, access);
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ return Add<HLoadNamedField>(
+ global_object, static_cast<HValue*>(NULL),
+ HObjectAccess::ForObservableJSObjectOffset(
+ GlobalObject::kNativeContextOffset));
}
@@ -2024,6 +2789,9 @@ HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
kind_(kind),
allocation_site_payload_(allocation_site_payload),
constructor_function_(constructor_function) {
+ ASSERT(!allocation_site_payload->IsConstant() ||
+ HConstant::cast(allocation_site_payload)->handle(
+ builder_->isolate())->IsAllocationSite());
mode_ = override_mode == DISABLE_ALLOCATION_SITES
? DONT_TRACK_ALLOCATION_SITE
: AllocationSite::GetMode(kind);
@@ -2042,14 +2810,27 @@ HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
- if (kind_ == GetInitialFastElementsKind()) {
+ if (!builder()->top_info()->IsStub()) {
+ // A constant map is fine.
+ Handle<Map> map(builder()->isolate()->get_initial_js_array_map(kind_),
+ builder()->isolate());
+ return builder()->Add<HConstant>(map);
+ }
+
+ if (constructor_function_ != NULL && kind_ == GetInitialFastElementsKind()) {
// No need for a context lookup if the kind_ matches the initial
// map, because we can just load the map in that case.
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->AddLoadNamedField(constructor_function_, access);
+ return builder()->Add<HLoadNamedField>(
+ constructor_function_, static_cast<HValue*>(NULL), access);
}
- HInstruction* native_context = builder()->BuildGetNativeContext();
+ // TODO(mvstanton): we should always have a constructor function if we
+ // are creating a stub.
+ HInstruction* native_context = constructor_function_ != NULL
+ ? builder()->BuildGetNativeContext(constructor_function_)
+ : builder()->BuildGetNativeContext();
+
HInstruction* index = builder()->Add<HConstant>(
static_cast<int32_t>(Context::JS_ARRAY_MAPS_INDEX));
@@ -2066,7 +2847,8 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->AddLoadNamedField(constructor_function_, access);
+ return builder()->Add<HLoadNamedField>(
+ constructor_function_, static_cast<HValue*>(NULL), access);
}
@@ -2084,12 +2866,14 @@ HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize(
HInstruction* elements_size_value =
builder()->Add<HConstant>(elements_size());
- HInstruction* mul = builder()->Add<HMul>(length_node, elements_size_value);
- mul->ClearFlag(HValue::kCanOverflow);
-
+ HInstruction* mul = HMul::NewImul(builder()->zone(), builder()->context(),
+ length_node, elements_size_value);
+ builder()->AddInstruction(mul);
HInstruction* base = builder()->Add<HConstant>(base_size);
- HInstruction* total_size = builder()->Add<HAdd>(base, mul);
+ HInstruction* total_size = HAdd::New(builder()->zone(), builder()->context(),
+ base, mul);
total_size->ClearFlag(HValue::kCanOverflow);
+ builder()->AddInstruction(total_size);
return total_size;
}
@@ -2113,30 +2897,31 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
HConstant* capacity = builder()->Add<HConstant>(initial_capacity());
return AllocateArray(size_in_bytes,
capacity,
- builder()->graph()->GetConstant0(),
- true);
+ builder()->graph()->GetConstant0());
}
HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* capacity,
HValue* length_field,
- bool fill_with_hole) {
+ FillMode fill_mode) {
HValue* size_in_bytes = EstablishAllocationSize(capacity);
- return AllocateArray(size_in_bytes, capacity, length_field, fill_with_hole);
+ return AllocateArray(size_in_bytes, capacity, length_field, fill_mode);
}
HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
HValue* capacity,
HValue* length_field,
- bool fill_with_hole) {
+ FillMode fill_mode) {
// These HForceRepresentations are because we store these as fields in the
// objects we construct, and an int32-to-smi HChange could deopt. Accept
// the deopt possibility now, before allocation occurs.
- capacity = builder()->Add<HForceRepresentation>(capacity,
- Representation::Smi());
- length_field = builder()->Add<HForceRepresentation>(length_field,
- Representation::Smi());
+ capacity =
+ builder()->AddUncasted<HForceRepresentation>(capacity,
+ Representation::Smi());
+ length_field =
+ builder()->AddUncasted<HForceRepresentation>(length_field,
+ Representation::Smi());
// Allocate (dealing with failure appropriately)
HAllocate* new_object = builder()->Add<HAllocate>(size_in_bytes,
HType::JSArray(), NOT_TENURED, JS_ARRAY_TYPE);
@@ -2163,7 +2948,7 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
// Initialize the elements
builder()->BuildInitializeElementsHeader(elements_location_, kind_, capacity);
- if (fill_with_hole) {
+ if (fill_mode == FILL_WITH_HOLE) {
builder()->BuildFillElementsWithHole(elements_location_, kind_,
graph()->GetConstant0(), capacity);
}
@@ -2180,20 +2965,24 @@ HStoreNamedField* HGraphBuilder::AddStoreMapConstant(HValue *object,
HValue* HGraphBuilder::AddLoadJSBuiltin(Builtins::JavaScript builtin) {
- HGlobalObject* global_object = Add<HGlobalObject>();
- HObjectAccess access = HObjectAccess::ForJSObjectOffset(
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
GlobalObject::kBuiltinsOffset);
- HValue* builtins = Add<HLoadNamedField>(global_object, access);
- HObjectAccess function_access = HObjectAccess::ForJSObjectOffset(
- JSBuiltinsObject::OffsetOfFunctionWithId(builtin));
- return Add<HLoadNamedField>(builtins, function_access);
+ HValue* builtins = Add<HLoadNamedField>(
+ global_object, static_cast<HValue*>(NULL), access);
+ HObjectAccess function_access = HObjectAccess::ForObservableJSObjectOffset(
+ JSBuiltinsObject::OffsetOfFunctionWithId(builtin));
+ return Add<HLoadNamedField>(
+ builtins, static_cast<HValue*>(NULL), function_access);
}
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
: HGraphBuilder(info),
function_state_(NULL),
- initial_function_state_(this, info, NORMAL_RETURN),
+ initial_function_state_(this, info, NORMAL_RETURN, 0),
ast_context_(NULL),
break_scope_(NULL),
inlined_count_(0),
@@ -2204,8 +2993,8 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
function_state_= &initial_function_state_;
- InitializeAstVisitor(info->isolate());
- if (FLAG_emit_opt_code_positions) {
+ InitializeAstVisitor(info->zone());
+ if (FLAG_hydrogen_track_positions) {
SetSourcePosition(info->shared_info()->start_position());
}
}
@@ -2274,7 +3063,8 @@ HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
}
-void HBasicBlock::FinishExit(HControlInstruction* instruction, int position) {
+void HBasicBlock::FinishExit(HControlInstruction* instruction,
+ HSourcePosition position) {
Finish(instruction, position);
ClearEnvironment();
}
@@ -2297,7 +3087,9 @@ HGraph::HGraph(CompilationInfo* info)
type_change_checksum_(0),
maximum_environment_size_(0),
no_side_effects_scope_count_(0),
- disallow_adding_new_values_(false) {
+ disallow_adding_new_values_(false),
+ next_inline_id_(0),
+ inlined_functions_(5, info->zone()) {
if (info->IsStub()) {
HydrogenCodeStub* stub = info->code_stub();
CodeStubInterfaceDescriptor* descriptor =
@@ -2305,6 +3097,7 @@ HGraph::HGraph(CompilationInfo* info)
start_environment_ =
new(zone_) HEnvironment(zone_, descriptor->environment_length());
} else {
+ TraceInlinedFunction(info->shared_info(), HSourcePosition::Unknown());
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
}
@@ -2323,7 +3116,7 @@ HBasicBlock* HGraph::CreateBasicBlock() {
void HGraph::FinalizeUniqueness() {
DisallowHeapAllocation no_gc;
- ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
it.Current()->FinalizeUniqueness();
@@ -2332,6 +3125,81 @@ void HGraph::FinalizeUniqueness() {
}
+int HGraph::TraceInlinedFunction(
+ Handle<SharedFunctionInfo> shared,
+ HSourcePosition position) {
+ if (!FLAG_hydrogen_track_positions) {
+ return 0;
+ }
+
+ int id = 0;
+ for (; id < inlined_functions_.length(); id++) {
+ if (inlined_functions_[id].shared().is_identical_to(shared)) {
+ break;
+ }
+ }
+
+ if (id == inlined_functions_.length()) {
+ inlined_functions_.Add(InlinedFunctionInfo(shared), zone());
+
+ if (!shared->script()->IsUndefined()) {
+ Handle<Script> script(Script::cast(shared->script()));
+ if (!script->source()->IsUndefined()) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ PrintF(tracing_scope.file(),
+ "--- FUNCTION SOURCE (%s) id{%d,%d} ---\n",
+ shared->DebugName()->ToCString().get(),
+ info()->optimization_id(),
+ id);
+
+ {
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(String::cast(script->source()),
+ &op,
+ shared->start_position());
+ // fun->end_position() points to the last character in the stream. We
+ // need to compensate by adding one to calculate the length.
+ int source_len =
+ shared->end_position() - shared->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.HasMore()) {
+ PrintF(tracing_scope.file(), "%c", stream.GetNext());
+ }
+ }
+ }
+
+ PrintF(tracing_scope.file(), "\n--- END ---\n");
+ }
+ }
+ }
+
+ int inline_id = next_inline_id_++;
+
+ if (inline_id != 0) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ PrintF(tracing_scope.file(), "INLINE (%s) id{%d,%d} AS %d AT ",
+ shared->DebugName()->ToCString().get(),
+ info()->optimization_id(),
+ id,
+ inline_id);
+ position.PrintTo(tracing_scope.file());
+ PrintF(tracing_scope.file(), "\n");
+ }
+
+ return inline_id;
+}
+
+
+int HGraph::SourcePositionToScriptPosition(HSourcePosition pos) {
+ if (!FLAG_hydrogen_track_positions || pos.IsUnknown()) {
+ return pos.raw();
+ }
+
+ return inlined_functions_[pos.inlining_id()].start_position() +
+ pos.position();
+}
+
+
// Block ordering was implemented with two mutually recursive methods,
// HGraph::Postorder and HGraph::PostorderLoopBlocks.
// The recursion could lead to stack overflow so the algorithm has been
@@ -2710,7 +3578,8 @@ void HGraph::CollectPhis() {
// a (possibly inlined) function.
FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- InliningKind inlining_kind)
+ InliningKind inlining_kind,
+ int inlining_id)
: owner_(owner),
compilation_info_(info),
call_context_(NULL),
@@ -2720,6 +3589,8 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
entry_(NULL),
arguments_object_(NULL),
arguments_elements_(NULL),
+ inlining_id_(inlining_id),
+ outer_source_position_(HSourcePosition::Unknown()),
outer_(owner->function_state()) {
if (outer_ != NULL) {
// State for an inline function.
@@ -2743,12 +3614,27 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
// Push on the state stack.
owner->set_function_state(this);
+
+ if (FLAG_hydrogen_track_positions) {
+ outer_source_position_ = owner->source_position();
+ owner->EnterInlinedSource(
+ info->shared_info()->start_position(),
+ inlining_id);
+ owner->SetSourcePosition(info->shared_info()->start_position());
+ }
}
FunctionState::~FunctionState() {
delete test_context_;
owner_->set_function_state(outer_);
+
+ if (FLAG_hydrogen_track_positions) {
+ owner_->set_source_position(outer_source_position_);
+ owner_->EnterInlinedSource(
+ outer_->compilation_info()->shared_info()->start_position(),
+ outer_->inlining_id());
+ }
}
@@ -2954,15 +3840,8 @@ void TestContext::BuildBranch(HValue* value) {
if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
builder->Bailout(kArgumentsObjectValueInATestContext);
}
- HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
- HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
ToBooleanStub::Types expected(condition()->to_boolean_types());
- builder->FinishCurrentBlock(builder->New<HBranch>(
- value, expected, empty_true, empty_false));
-
- owner()->Goto(empty_true, if_true(), builder->function_state());
- owner()->Goto(empty_false , if_false(), builder->function_state());
- builder->set_current_block(NULL);
+ ReturnControl(owner()->New<HBranch>(value, expected), BailoutId::None());
}
@@ -3154,7 +4033,6 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
// where unreachable code could unnecessarily defeat LICM.
Run<HMarkUnreachableBlocksPhase>();
- if (FLAG_check_elimination) Run<HCheckEliminationPhase>();
if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
@@ -3185,6 +4063,8 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
if (FLAG_use_gvn) Run<HGlobalValueNumberingPhase>();
+ if (FLAG_check_elimination) Run<HCheckEliminationPhase>();
+
if (FLAG_use_range) Run<HRangeAnalysisPhase>();
Run<HComputeChangeUndefinedToNaN>();
@@ -3223,7 +4103,13 @@ void HGraph::RestoreActualValues() {
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
- if (instruction->ActualValue() != instruction) {
+ if (instruction->ActualValue() == instruction) continue;
+ if (instruction->CheckFlag(HValue::kIsDead)) {
+ // The instruction was marked as deleted but left in the graph
+ // as a control flow dependency point for subsequent
+ // instructions.
+ instruction->DeleteAndReplaceWith(instruction->ActualValue());
+ } else {
ASSERT(instruction->IsInformativeDefinition());
if (instruction->IsPurelyInformativeDefinition()) {
instruction->DeleteAndReplaceWith(instruction->RedefinedOperand());
@@ -3236,9 +4122,7 @@ void HGraph::RestoreActualValues() {
}
-template <class Instruction>
-HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
- int count = call->argument_count();
+void HOptimizedGraphBuilder::PushArgumentsFromEnvironment(int count) {
ZoneList<HValue*> arguments(count, zone());
for (int i = 0; i < count; ++i) {
arguments.Add(Pop(), zone());
@@ -3247,6 +4131,12 @@ HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
while (!arguments.is_empty()) {
Add<HPushArgument>(arguments.RemoveLast());
}
+}
+
+
+template <class Instruction>
+HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
+ PushArgumentsFromEnvironment(call->argument_count());
return call;
}
@@ -3525,45 +4415,27 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- // We only optimize switch statements with smi-literal smi comparisons,
- // with a bounded number of clauses.
+ // We only optimize switch statements with a bounded number of clauses.
const int kCaseClauseLimit = 128;
ZoneList<CaseClause*>* clauses = stmt->cases();
int clause_count = clauses->length();
+ ZoneList<HBasicBlock*> body_blocks(clause_count, zone());
if (clause_count > kCaseClauseLimit) {
return Bailout(kSwitchStatementTooManyClauses);
}
- ASSERT(stmt->switch_type() != SwitchStatement::UNKNOWN_SWITCH);
- if (stmt->switch_type() == SwitchStatement::GENERIC_SWITCH) {
- return Bailout(kSwitchStatementMixedOrNonLiteralSwitchLabels);
- }
-
CHECK_ALIVE(VisitForValue(stmt->tag()));
Add<HSimulate>(stmt->EntryId());
- HValue* tag_value = Pop();
- HBasicBlock* first_test_block = current_block();
-
- HUnaryControlInstruction* string_check = NULL;
- HBasicBlock* not_string_block = NULL;
-
- // Test switch's tag value if all clauses are string literals
- if (stmt->switch_type() == SwitchStatement::STRING_SWITCH) {
- first_test_block = graph()->CreateBasicBlock();
- not_string_block = graph()->CreateBasicBlock();
- string_check = New<HIsStringAndBranch>(
- tag_value, first_test_block, not_string_block);
- FinishCurrentBlock(string_check);
-
- set_current_block(first_test_block);
- }
+ HValue* tag_value = Top();
+ Type* tag_type = stmt->tag()->bounds().lower;
// 1. Build all the tests, with dangling true branches
BailoutId default_id = BailoutId::None();
for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
if (clause->is_default()) {
- default_id = clause->EntryId();
+ body_blocks.Add(NULL, zone());
+ if (default_id.IsNone()) default_id = clause->EntryId();
continue;
}
@@ -3571,48 +4443,35 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
CHECK_ALIVE(VisitForValue(clause->label()));
HValue* label_value = Pop();
+ Type* label_type = clause->label()->bounds().lower;
+ Type* combined_type = clause->compare_type();
+ HControlInstruction* compare = BuildCompareInstruction(
+ Token::EQ_STRICT, tag_value, label_value, tag_type, label_type,
+ combined_type,
+ ScriptPositionToSourcePosition(stmt->tag()->position()),
+ ScriptPositionToSourcePosition(clause->label()->position()),
+ PUSH_BEFORE_SIMULATE, clause->id());
+
HBasicBlock* next_test_block = graph()->CreateBasicBlock();
HBasicBlock* body_block = graph()->CreateBasicBlock();
-
- HControlInstruction* compare;
-
- if (stmt->switch_type() == SwitchStatement::SMI_SWITCH) {
- if (!clause->compare_type()->Is(Type::Smi())) {
- Add<HDeoptimize>("Non-smi switch type", Deoptimizer::SOFT);
- }
-
- HCompareNumericAndBranch* compare_ =
- New<HCompareNumericAndBranch>(tag_value,
- label_value,
- Token::EQ_STRICT);
- compare_->set_observed_input_representation(
- Representation::Smi(), Representation::Smi());
- compare = compare_;
- } else {
- compare = New<HStringCompareAndBranch>(tag_value,
- label_value,
- Token::EQ_STRICT);
- }
-
+ body_blocks.Add(body_block, zone());
compare->SetSuccessorAt(0, body_block);
compare->SetSuccessorAt(1, next_test_block);
FinishCurrentBlock(compare);
+ set_current_block(body_block);
+ Drop(1); // tag_value
+
set_current_block(next_test_block);
}
// Save the current block to use for the default or to join with the
// exit.
HBasicBlock* last_block = current_block();
-
- if (not_string_block != NULL) {
- BailoutId join_id = !default_id.IsNone() ? default_id : stmt->ExitId();
- last_block = CreateJoin(last_block, not_string_block, join_id);
- }
+ Drop(1); // tag_value
// 2. Loop over the clauses and the linked list of tests in lockstep,
// translating the clause bodies.
- HBasicBlock* curr_test_block = first_test_block;
HBasicBlock* fall_through_block = NULL;
BreakAndContinueInfo break_info(stmt);
@@ -3624,40 +4483,16 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// goes to.
HBasicBlock* normal_block = NULL;
if (clause->is_default()) {
- if (last_block != NULL) {
- normal_block = last_block;
- last_block = NULL; // Cleared to indicate we've handled it.
- }
+ if (last_block == NULL) continue;
+ normal_block = last_block;
+ last_block = NULL; // Cleared to indicate we've handled it.
} else {
- // If the current test block is deoptimizing due to an unhandled clause
- // of the switch, the test instruction is in the next block since the
- // deopt must end the current block.
- if (curr_test_block->IsDeoptimizing()) {
- ASSERT(curr_test_block->end()->SecondSuccessor() == NULL);
- curr_test_block = curr_test_block->end()->FirstSuccessor();
- }
- normal_block = curr_test_block->end()->FirstSuccessor();
- curr_test_block = curr_test_block->end()->SecondSuccessor();
+ normal_block = body_blocks[i];
}
- // Identify a block to emit the body into.
- if (normal_block == NULL) {
- if (fall_through_block == NULL) {
- // (a) Unreachable.
- if (clause->is_default()) {
- continue; // Might still be reachable clause bodies.
- } else {
- break;
- }
- } else {
- // (b) Reachable only as fall through.
- set_current_block(fall_through_block);
- }
- } else if (fall_through_block == NULL) {
- // (c) Reachable only normally.
+ if (fall_through_block == NULL) {
set_current_block(normal_block);
} else {
- // (d) Reachable both ways.
HBasicBlock* join = CreateJoin(fall_through_block,
normal_block,
clause->EntryId());
@@ -3712,12 +4547,17 @@ void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
HBasicBlock* loop_successor = NULL;
if (body_exit != NULL && !stmt->cond()->ToBooleanIsTrue()) {
set_current_block(body_exit);
- // The block for a true condition, the actual predecessor block of the
- // back edge.
- body_exit = graph()->CreateBasicBlock();
loop_successor = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(stmt->cond(), body_exit, loop_successor));
- if (body_exit->HasPredecessor()) {
+ if (stmt->cond()->ToBooleanIsFalse()) {
+ Goto(loop_successor);
+ body_exit = NULL;
+ } else {
+ // The block for a true condition, the actual predecessor block of the
+ // back edge.
+ body_exit = graph()->CreateBasicBlock();
+ CHECK_BAILOUT(VisitForControl(stmt->cond(), body_exit, loop_successor));
+ }
+ if (body_exit != NULL && body_exit->HasPredecessor()) {
body_exit->SetJoinId(stmt->BackEdgeId());
} else {
body_exit = NULL;
@@ -3913,7 +4753,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
set_current_block(body_exit);
HValue* current_index = Pop();
- Push(Add<HAdd>(current_index, graph()->GetConstant1()));
+ Push(AddUncasted<HAdd>(current_index, graph()->GetConstant1()));
body_exit = current_block();
}
@@ -3965,31 +4805,11 @@ void HOptimizedGraphBuilder::VisitCaseClause(CaseClause* clause) {
}
-static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
- Code* unoptimized_code, FunctionLiteral* expr) {
- int start_position = expr->start_position();
- for (RelocIterator it(unoptimized_code); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
- Object* obj = rinfo->target_object();
- if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- if (shared->start_position() == start_position) {
- return Handle<SharedFunctionInfo>(shared);
- }
- }
- }
-
- return Handle<SharedFunctionInfo>();
-}
-
-
void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- Handle<SharedFunctionInfo> shared_info =
- SearchSharedFunctionInfo(current_info()->shared_info()->code(), expr);
+ Handle<SharedFunctionInfo> shared_info = expr->shared_info();
if (shared_info.is_null()) {
shared_info = Compiler::BuildFunctionInfo(expr, current_info()->script());
}
@@ -4050,14 +4870,14 @@ void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
HOptimizedGraphBuilder::GlobalPropertyAccess
HOptimizedGraphBuilder::LookupGlobalProperty(
- Variable* var, LookupResult* lookup, bool is_store) {
+ Variable* var, LookupResult* lookup, PropertyAccessType access_type) {
if (var->is_this() || !current_info()->has_global_object()) {
return kUseGeneric;
}
Handle<GlobalObject> global(current_info()->global_object());
global->Lookup(*var->name(), lookup);
if (!lookup->IsNormal() ||
- (is_store && lookup->IsReadOnly()) ||
+ (access_type == STORE && lookup->IsReadOnly()) ||
lookup->holder() != *global) {
return kUseGeneric;
}
@@ -4071,13 +4891,19 @@ HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
HValue* context = environment()->context();
int length = current_info()->scope()->ContextChainLength(var->scope());
while (length-- > 0) {
- context = Add<HOuterContext>(context);
+ context = Add<HLoadNamedField>(
+ context, static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
}
return context;
}
void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+ if (expr->is_this()) {
+ current_info()->set_this_has_uses(true);
+ }
+
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4098,8 +4924,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
LookupResult lookup(isolate());
- GlobalPropertyAccess type =
- LookupGlobalProperty(variable, &lookup, false);
+ GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, LOAD);
if (type == kUseCell &&
current_info()->global_object()->IsAccessCheckNeeded()) {
@@ -4124,7 +4949,9 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return ast_context()->ReturnInstruction(instr, expr->id());
}
} else {
- HGlobalObject* global_object = Add<HGlobalObject>();
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HLoadGlobalGeneric* instr =
New<HLoadGlobalGeneric>(global_object,
variable->name(),
@@ -4179,81 +5006,13 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-static bool CanInlinePropertyAccess(Map* type) {
- return type->IsJSObjectMap() &&
- !type->is_dictionary_map() &&
- !type->has_named_interceptor();
-}
-
-
-static void LookupInPrototypes(Handle<Map> map,
- Handle<String> name,
- LookupResult* lookup) {
- while (map->prototype()->IsJSObject()) {
- Handle<JSObject> holder(JSObject::cast(map->prototype()));
- map = Handle<Map>(holder->map());
- if (!CanInlinePropertyAccess(*map)) break;
- map->LookupDescriptor(*holder, *name, lookup);
- if (lookup->IsFound()) return;
- }
- lookup->NotFound();
-}
-
-
-// Tries to find a JavaScript accessor of the given name in the prototype chain
-// starting at the given map. Return true iff there is one, including the
-// corresponding AccessorPair plus its holder (which could be null when the
-// accessor is found directly in the given map).
-static bool LookupAccessorPair(Handle<Map> map,
- Handle<String> name,
- Handle<AccessorPair>* accessors,
- Handle<JSObject>* holder) {
- Isolate* isolate = map->GetIsolate();
- LookupResult lookup(isolate);
-
- // Check for a JavaScript accessor directly in the map.
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsPropertyCallbacks()) {
- Handle<Object> callback(lookup.GetValueFromMap(*map), isolate);
- if (!callback->IsAccessorPair()) return false;
- *accessors = Handle<AccessorPair>::cast(callback);
- *holder = Handle<JSObject>();
- return true;
- }
-
- // Everything else, e.g. a field, can't be an accessor call.
- if (lookup.IsFound()) return false;
-
- // Check for a JavaScript accessor somewhere in the proto chain.
- LookupInPrototypes(map, name, &lookup);
- if (lookup.IsPropertyCallbacks()) {
- Handle<Object> callback(lookup.GetValue(), isolate);
- if (!callback->IsAccessorPair()) return false;
- *accessors = Handle<AccessorPair>::cast(callback);
- *holder = Handle<JSObject>(lookup.holder());
- return true;
- }
-
- // We haven't found a JavaScript accessor anywhere.
- return false;
-}
-
-
-static bool LookupSetter(Handle<Map> map,
- Handle<String> name,
- Handle<JSFunction>* setter,
- Handle<JSObject>* holder) {
- Handle<AccessorPair> accessors;
- if (LookupAccessorPair(map, name, &accessors, holder) &&
- accessors->setter()->IsJSFunction()) {
- Handle<JSFunction> func(JSFunction::cast(accessors->setter()));
- CallOptimization call_optimization(func);
- // TODO(dcarney): temporary hack unless crankshaft can handle api calls.
- if (call_optimization.is_simple_api_call()) return false;
- *setter = func;
- return true;
- }
- return false;
+static bool CanInlinePropertyAccess(Type* type) {
+ if (type->Is(Type::NumberOrString())) return true;
+ if (!type->IsClass()) return false;
+ Handle<Map> map = type->AsClass();
+ return map->IsJSObjectMap() &&
+ !map->is_dictionary_map() &&
+ !map->has_named_interceptor();
}
@@ -4326,6 +5085,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ expr->BuildConstantProperties(isolate());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
HInstruction* literal;
@@ -4399,17 +5159,20 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
HInstruction* store;
if (map.is_null()) {
// If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildStoreNamedGeneric(literal, name, value));
+ CHECK_ALIVE(store = BuildNamedGeneric(
+ STORE, literal, name, value));
} else {
-#if DEBUG
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- ASSERT(!LookupSetter(map, name, &setter, &holder));
-#endif
- CHECK_ALIVE(store = BuildStoreNamedMonomorphic(literal,
- name,
- value,
- map));
+ PropertyAccessInfo info(this, STORE, ToType(map), name);
+ if (info.CanAccessMonomorphic()) {
+ HValue* checked_literal = BuildCheckMap(literal, map);
+ ASSERT(!info.lookup()->IsPropertyCallbacks());
+ store = BuildMonomorphicAccess(
+ &info, literal, checked_literal, value,
+ BailoutId::None(), BailoutId::None());
+ } else {
+ CHECK_ALIVE(store = BuildNamedGeneric(
+ STORE, literal, name, value));
+ }
}
AddInstruction(store);
if (store->HasObservableSideEffects()) {
@@ -4447,6 +5210,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ expr->BuildConstantElements(isolate());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
HInstruction* literal;
@@ -4506,19 +5270,23 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// pass an empty fixed array to the runtime function instead.
Handle<FixedArray> constants = isolate()->factory()->empty_fixed_array();
int literal_index = expr->literal_index();
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+ flags |= ArrayLiteral::kDisableMementos;
Add<HPushArgument>(Add<HConstant>(literals));
Add<HPushArgument>(Add<HConstant>(literal_index));
Add<HPushArgument>(Add<HConstant>(constants));
+ Add<HPushArgument>(Add<HConstant>(flags));
// TODO(mvstanton): Consider a flag to turn off creation of any
// AllocationMementos for this call: we are in crankshaft and should have
// learned enough about transition behavior to stop emitting mementos.
- Runtime::FunctionId function_id = (expr->depth() > 1)
- ? Runtime::kCreateArrayLiteral : Runtime::kCreateArrayLiteralShallow;
+ Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
- 3);
+ 4);
// De-opt if elements kind changed from boilerplate_elements_kind.
Handle<Map> map = Handle<Map>(boilerplate_object->map(), isolate());
@@ -4579,55 +5347,48 @@ HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
}
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
- HValue* checked_object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map,
- LookupResult* lookup) {
- ASSERT(lookup->IsFound());
- // If the property does not exist yet, we have to check that it wasn't made
- // readonly or turned into a setter by some meanwhile modifications on the
- // prototype chain.
- if (!lookup->IsProperty() && map->prototype()->IsJSReceiver()) {
- Object* proto = map->prototype();
- // First check that the prototype chain isn't affected already.
- LookupResult proto_result(isolate());
- proto->Lookup(*name, &proto_result);
- if (proto_result.IsProperty()) {
- // If the inherited property could induce readonly-ness, bail out.
- if (proto_result.IsReadOnly() || !proto_result.IsCacheable()) {
- Bailout(kImproperObjectOnPrototypeChainForStore);
- return NULL;
- }
- // We only need to check up to the preexisting property.
- proto = proto_result.holder();
- } else {
- // Otherwise, find the top prototype.
- while (proto->GetPrototype(isolate())->IsJSObject()) {
- proto = proto->GetPrototype(isolate());
- }
- ASSERT(proto->GetPrototype(isolate())->IsNull());
- }
- ASSERT(proto->IsJSObject());
- BuildCheckPrototypeMaps(
- Handle<JSObject>(JSObject::cast(map->prototype())),
- Handle<JSObject>(JSObject::cast(proto)));
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
+ PropertyAccessInfo* info,
+ HValue* checked_object) {
+ HObjectAccess access = info->access();
+ if (access.representation().IsDouble()) {
+ // Load the heap number.
+ checked_object = Add<HLoadNamedField>(
+ checked_object, static_cast<HValue*>(NULL),
+ access.WithRepresentation(Representation::Tagged()));
+ checked_object->set_type(HType::HeapNumber());
+ // Load the double value from it.
+ access = HObjectAccess::ForHeapNumberValue();
}
+ return New<HLoadNamedField>(
+ checked_object, static_cast<HValue*>(NULL), access);
+}
+
- HObjectAccess field_access = HObjectAccess::ForField(map, lookup, name);
- bool transition_to_field = lookup->IsTransitionToField(*map);
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
+ PropertyAccessInfo* info,
+ HValue* checked_object,
+ HValue* value) {
+ bool transition_to_field = info->lookup()->IsTransition();
+ // TODO(verwaest): Move this logic into PropertyAccessInfo.
+ HObjectAccess field_access = HObjectAccess::ForField(
+ info->map(), info->lookup(), info->name());
HStoreNamedField *instr;
- if (FLAG_track_double_fields && field_access.representation().IsDouble()) {
+ if (field_access.representation().IsDouble()) {
HObjectAccess heap_number_access =
field_access.WithRepresentation(Representation::Tagged());
if (transition_to_field) {
// The store requires a mutable HeapNumber to be allocated.
NoObservableSideEffectsScope no_side_effects(this);
HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
+
+ PretenureFlag pretenure_flag = !FLAG_allocation_site_pretenuring ?
+ isolate()->heap()->GetPretenureMode() : NOT_TENURED;
+
HInstruction* heap_number = Add<HAllocate>(heap_number_size,
- HType::HeapNumber(), isolate()->heap()->GetPretenureMode(),
+ HType::HeapNumber(),
+ pretenure_flag,
HEAP_NUMBER_TYPE);
AddStoreMapConstant(heap_number, isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
@@ -4637,92 +5398,51 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
heap_number);
} else {
// Already holds a HeapNumber; load the box and write its value field.
- HInstruction* heap_number = Add<HLoadNamedField>(checked_object,
- heap_number_access);
+ HInstruction* heap_number = Add<HLoadNamedField>(
+ checked_object, static_cast<HValue*>(NULL), heap_number_access);
heap_number->set_type(HType::HeapNumber());
instr = New<HStoreNamedField>(heap_number,
HObjectAccess::ForHeapNumberValue(),
- value);
+ value, STORE_TO_INITIALIZED_ENTRY);
}
} else {
// This is a normal store.
- instr = New<HStoreNamedField>(checked_object->ActualValue(),
- field_access,
- value);
+ instr = New<HStoreNamedField>(
+ checked_object->ActualValue(), field_access, value,
+ transition_to_field ? INITIALIZING_STORE : STORE_TO_INITIALIZED_ENTRY);
}
if (transition_to_field) {
- Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
- HConstant* transition_constant = Add<HConstant>(transition);
+ HConstant* transition_constant = Add<HConstant>(info->transition());
instr->SetTransition(transition_constant, top_info());
- // TODO(fschneider): Record the new map type of the object in the IR to
- // enable elimination of redundant checks after the transition store.
- instr->SetGVNFlag(kChangesMaps);
+ instr->SetChangesFlag(kMaps);
}
return instr;
}
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
- HValue* object,
- Handle<String> name,
- HValue* value) {
- return New<HStoreNamedGeneric>(
- object,
- name,
- value,
- function_strict_mode_flag());
-}
-
-
-// Sets the lookup result and returns true if the load/store can be inlined.
-static bool ComputeStoreField(Handle<Map> type,
- Handle<String> name,
- LookupResult* lookup,
- bool lookup_transition = true) {
- ASSERT(!type->is_observed());
- if (!CanInlinePropertyAccess(*type)) {
- lookup->NotFound();
- return false;
- }
- // If we directly find a field, the access can be inlined.
- type->LookupDescriptor(NULL, *name, lookup);
- if (lookup->IsField()) return true;
-
- if (!lookup_transition) return false;
-
- type->LookupTransition(NULL, *name, lookup);
- return lookup->IsTransitionToField(*type) &&
- (type->unused_property_fields() > 0);
-}
+bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
+ PropertyAccessInfo* info) {
+ if (!CanInlinePropertyAccess(type_)) return false;
+ // Currently only handle Type::Number as a polymorphic case.
+ // TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
+ // instruction.
+ if (type_->Is(Type::Number())) return false;
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
- HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map) {
- // Handle a store to a known field.
- LookupResult lookup(isolate());
- if (ComputeStoreField(map, name, &lookup)) {
- HCheckMaps* checked_object = AddCheckMap(object, map);
- return BuildStoreNamedField(checked_object, name, value, map, &lookup);
+ // Values are only compatible for monomorphic load if they all behave the same
+ // regarding value wrappers.
+ if (type_->Is(Type::NumberOrString())) {
+ if (!info->type_->Is(Type::NumberOrString())) return false;
+ } else {
+ if (info->type_->Is(Type::NumberOrString())) return false;
}
- // No luck, do a generic store.
- return BuildStoreNamedGeneric(object, name, value);
-}
-
-
-bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatibleForLoad(
- PropertyAccessInfo* info) {
- if (!CanInlinePropertyAccess(*map_)) return false;
-
if (!LookupDescriptor()) return false;
if (!lookup_.IsFound()) {
return (!info->lookup_.IsFound() || info->has_holder()) &&
- map_->prototype() == info->map_->prototype();
+ map()->prototype() == info->map()->prototype();
}
// Mismatch if the other access info found the property in the prototype
@@ -4730,7 +5450,8 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatibleForLoad(
if (info->has_holder()) return false;
if (lookup_.IsPropertyCallbacks()) {
- return accessor_.is_identical_to(info->accessor_);
+ return accessor_.is_identical_to(info->accessor_) &&
+ api_holder_.is_identical_to(info->api_holder_);
}
if (lookup_.IsConstant()) {
@@ -4741,7 +5462,11 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatibleForLoad(
if (!info->lookup_.IsField()) return false;
Representation r = access_.representation();
- if (!info->access_.representation().IsCompatibleForLoad(r)) return false;
+ if (IsLoad()) {
+ if (!info->access_.representation().IsCompatibleForLoad(r)) return false;
+ } else {
+ if (!info->access_.representation().IsCompatibleForStore(r)) return false;
+ }
if (info->access_.offset() != access_.offset()) return false;
if (info->access_.IsInobject() != access_.IsInobject()) return false;
info->GeneralizeRepresentation(r);
@@ -4750,23 +5475,42 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatibleForLoad(
bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() {
- map_->LookupDescriptor(NULL, *name_, &lookup_);
- return LoadResult(map_);
+ if (!type_->IsClass()) return true;
+ map()->LookupDescriptor(NULL, *name_, &lookup_);
+ return LoadResult(map());
}
bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
+ if (!IsLoad() && lookup_.IsProperty() &&
+ (lookup_.IsReadOnly() || !lookup_.IsCacheable())) {
+ return false;
+ }
+
if (lookup_.IsField()) {
access_ = HObjectAccess::ForField(map, &lookup_, name_);
} else if (lookup_.IsPropertyCallbacks()) {
Handle<Object> callback(lookup_.GetValueFromMap(*map), isolate());
if (!callback->IsAccessorPair()) return false;
- Object* getter = Handle<AccessorPair>::cast(callback)->getter();
- if (!getter->IsJSFunction()) return false;
- Handle<JSFunction> accessor = handle(JSFunction::cast(getter));
- CallOptimization call_optimization(accessor);
- // TODO(dcarney): temporary hack unless crankshaft can handle api calls.
- if (call_optimization.is_simple_api_call()) return false;
+ Object* raw_accessor = IsLoad()
+ ? Handle<AccessorPair>::cast(callback)->getter()
+ : Handle<AccessorPair>::cast(callback)->setter();
+ if (!raw_accessor->IsJSFunction()) return false;
+ Handle<JSFunction> accessor = handle(JSFunction::cast(raw_accessor));
+ if (accessor->shared()->IsApiFunction()) {
+ CallOptimization call_optimization(accessor);
+ if (!call_optimization.is_simple_api_call()) return false;
+ CallOptimization::HolderLookup holder_lookup;
+ api_holder_ = call_optimization.LookupHolderOfExpectedType(
+ map, &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderNotFound:
+ return false;
+ case CallOptimization::kHolderIsReceiver:
+ case CallOptimization::kHolderFound:
+ break;
+ }
+ }
accessor_ = accessor;
} else if (lookup_.IsConstant()) {
constant_ = handle(lookup_.GetConstantFromMap(*map), isolate());
@@ -4777,14 +5521,15 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
- Handle<Map> map = map_;
+ Handle<Map> map = this->map();
+
while (map->prototype()->IsJSObject()) {
holder_ = handle(JSObject::cast(map->prototype()));
if (holder_->map()->is_deprecated()) {
JSObject::TryMigrateInstance(holder_);
}
map = Handle<Map>(holder_->map());
- if (!CanInlinePropertyAccess(*map)) {
+ if (!CanInlinePropertyAccess(ToType(map))) {
lookup_.NotFound();
return false;
}
@@ -4796,68 +5541,85 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
}
-bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadMonomorphic() {
- if (!CanInlinePropertyAccess(*map_)) return IsStringLength();
- if (IsJSObjectFieldAccessor()) return true;
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
+ if (!CanInlinePropertyAccess(type_)) return false;
+ if (IsJSObjectFieldAccessor()) return IsLoad();
if (!LookupDescriptor()) return false;
- if (lookup_.IsFound()) return true;
- return LookupInPrototypes();
+ if (lookup_.IsFound()) {
+ if (IsLoad()) return true;
+ return !lookup_.IsReadOnly() && lookup_.IsCacheable();
+ }
+ if (!LookupInPrototypes()) return false;
+ if (IsLoad()) return true;
+
+ if (lookup_.IsPropertyCallbacks()) return true;
+ Handle<Map> map = this->map();
+ map->LookupTransition(NULL, *name_, &lookup_);
+ if (lookup_.IsTransitionToField() && map->unused_property_fields() > 0) {
+ return true;
+ }
+ return false;
}
-bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadAsMonomorphic(
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
SmallMapList* types) {
- ASSERT(map_.is_identical_to(types->first()));
- if (!CanLoadMonomorphic()) return false;
+ ASSERT(type_->Is(ToType(types->first())));
+ if (!CanAccessMonomorphic()) return false;
+ STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
if (types->length() > kMaxLoadPolymorphism) return false;
- if (IsStringLength()) {
+ HObjectAccess access = HObjectAccess::ForMap(); // bogus default
+ if (GetJSObjectFieldAccess(&access)) {
for (int i = 1; i < types->length(); ++i) {
- if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+ PropertyAccessInfo test_info(
+ builder_, access_type_, ToType(types->at(i)), name_);
+ HObjectAccess test_access = HObjectAccess::ForMap(); // bogus default
+ if (!test_info.GetJSObjectFieldAccess(&test_access)) return false;
+ if (!access.Equals(test_access)) return false;
}
return true;
}
- if (IsArrayLength()) {
- bool is_fast = IsFastElementsKind(map_->elements_kind());
- for (int i = 1; i < types->length(); ++i) {
- Handle<Map> test_map = types->at(i);
- if (test_map->instance_type() != JS_ARRAY_TYPE) return false;
- if (IsFastElementsKind(test_map->elements_kind()) != is_fast) {
- return false;
- }
- }
- return true;
- }
+ // Currently only handle Type::Number as a polymorphic case.
+ // TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
+ // instruction.
+ if (type_->Is(Type::Number())) return false;
- if (IsJSObjectFieldAccessor()) {
- InstanceType instance_type = map_->instance_type();
- for (int i = 1; i < types->length(); ++i) {
- if (types->at(i)->instance_type() != instance_type) return false;
- }
- return true;
- }
+ // Multiple maps cannot transition to the same target map.
+ ASSERT(!IsLoad() || !lookup_.IsTransition());
+ if (lookup_.IsTransition() && types->length() > 1) return false;
for (int i = 1; i < types->length(); ++i) {
- PropertyAccessInfo test_info(isolate(), types->at(i), name_);
- if (!test_info.IsCompatibleForLoad(this)) return false;
+ PropertyAccessInfo test_info(
+ builder_, access_type_, ToType(types->at(i)), name_);
+ if (!test_info.IsCompatible(this)) return false;
}
return true;
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadMonomorphic(
+static bool NeedsWrappingFor(Type* type, Handle<JSFunction> target) {
+ return type->Is(Type::NumberOrString()) &&
+ target->shared()->is_classic_mode() &&
+ !target->shared()->native();
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
PropertyAccessInfo* info,
HValue* object,
- HInstruction* checked_object,
+ HValue* checked_object,
+ HValue* value,
BailoutId ast_id,
BailoutId return_id,
bool can_inline_accessor) {
HObjectAccess access = HObjectAccess::ForMap(); // bogus default
if (info->GetJSObjectFieldAccess(&access)) {
- return New<HLoadNamedField>(checked_object, access);
+ ASSERT(info->IsLoad());
+ return New<HLoadNamedField>(object, checked_object, access);
}
HValue* checked_holder = checked_object;
@@ -4866,242 +5628,202 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadMonomorphic(
checked_holder = BuildCheckPrototypeMaps(prototype, info->holder());
}
- if (!info->lookup()->IsFound()) return graph()->GetConstantUndefined();
+ if (!info->lookup()->IsFound()) {
+ ASSERT(info->IsLoad());
+ return graph()->GetConstantUndefined();
+ }
if (info->lookup()->IsField()) {
- return BuildLoadNamedField(checked_holder, info->access());
+ if (info->IsLoad()) {
+ return BuildLoadNamedField(info, checked_holder);
+ } else {
+ return BuildStoreNamedField(info, checked_object, value);
+ }
+ }
+
+ if (info->lookup()->IsTransition()) {
+ ASSERT(!info->IsLoad());
+ return BuildStoreNamedField(info, checked_object, value);
}
if (info->lookup()->IsPropertyCallbacks()) {
Push(checked_object);
- if (FLAG_inline_accessors &&
- can_inline_accessor &&
- TryInlineGetter(info->accessor(), ast_id, return_id)) {
- return NULL;
+ int argument_count = 1;
+ if (!info->IsLoad()) {
+ argument_count = 2;
+ Push(value);
+ }
+
+ if (NeedsWrappingFor(info->type(), info->accessor())) {
+ HValue* function = Add<HConstant>(info->accessor());
+ PushArgumentsFromEnvironment(argument_count);
+ return New<HCallFunction>(function, argument_count, WRAP_AND_CALL);
+ } else if (FLAG_inline_accessors && can_inline_accessor) {
+ bool success = info->IsLoad()
+ ? TryInlineGetter(info->accessor(), info->map(), ast_id, return_id)
+ : TryInlineSetter(
+ info->accessor(), info->map(), ast_id, return_id, value);
+ if (success) return NULL;
}
- Add<HPushArgument>(Pop());
- return New<HCallConstantFunction>(info->accessor(), 1);
+
+ PushArgumentsFromEnvironment(argument_count);
+ return BuildCallConstantFunction(info->accessor(), argument_count);
}
ASSERT(info->lookup()->IsConstant());
- return New<HConstant>(info->constant());
+ if (info->IsLoad()) {
+ return New<HConstant>(info->constant());
+ } else {
+ return New<HCheckValue>(value, Handle<JSFunction>::cast(info->constant()));
+ }
}
-void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
+void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
+ PropertyAccessType access_type,
BailoutId ast_id,
BailoutId return_id,
HValue* object,
+ HValue* value,
SmallMapList* types,
Handle<String> name) {
// Something did not match; must use a polymorphic load.
int count = 0;
HBasicBlock* join = NULL;
+ HBasicBlock* number_block = NULL;
+ bool handled_string = false;
+
+ bool handle_smi = false;
+ STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- PropertyAccessInfo info(isolate(), types->at(i), name);
- if (info.CanLoadMonomorphic()) {
- if (count == 0) {
- BuildCheckHeapObject(object);
- join = graph()->CreateBasicBlock();
- }
- ++count;
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare = New<HCompareMap>(
- object, info.map(), if_true, if_false);
- FinishCurrentBlock(compare);
-
- set_current_block(if_true);
-
- HInstruction* load = BuildLoadMonomorphic(
- &info, object, compare, ast_id, return_id, FLAG_polymorphic_inlining);
- if (load == NULL) {
- if (HasStackOverflow()) return;
- } else {
- if (!load->IsLinked()) {
- AddInstruction(load);
- }
- if (!ast_context()->IsEffect()) Push(load);
+ PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
+ if (info.type()->Is(Type::String())) {
+ if (handled_string) continue;
+ handled_string = true;
+ }
+ if (info.CanAccessMonomorphic()) {
+ count++;
+ if (info.type()->Is(Type::Number())) {
+ handle_smi = true;
+ break;
}
-
- if (current_block() != NULL) Goto(join);
- set_current_block(if_false);
}
}
- // Finish up. Unconditionally deoptimize if we've handled all the maps we
- // know about and do not want to handle ones we've never seen. Otherwise
- // use a generic IC.
- if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- // Because the deopt may be the only path in the polymorphic load, make sure
- // that the environment stack matches the depth on deopt that it otherwise
- // would have had after a successful load.
- if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
- FinishExitWithHardDeoptimization("Unknown map in polymorphic load", join);
- } else {
- HInstruction* load = Add<HLoadNamedGeneric>(object, name);
- if (!ast_context()->IsEffect()) Push(load);
+ count = 0;
+ HControlInstruction* smi_check = NULL;
+ handled_string = false;
- if (join != NULL) {
- Goto(join);
- } else {
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
- return;
+ for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
+ PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
+ if (info.type()->Is(Type::String())) {
+ if (handled_string) continue;
+ handled_string = true;
}
- }
-
- ASSERT(join != NULL);
- join->SetJoinId(ast_id);
- set_current_block(join);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
-}
-
-
-bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
- BailoutId assignment_id,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name) {
- // Use monomorphic store if property lookup results in the same field index
- // for all maps. Requires special map check on the set of all handled maps.
- if (types->length() > kMaxStorePolymorphism) return false;
-
- LookupResult lookup(isolate());
- int count;
- Representation representation = Representation::None();
- HObjectAccess access = HObjectAccess::ForMap(); // initial value unused.
- for (count = 0; count < types->length(); ++count) {
- Handle<Map> map = types->at(count);
- // Pass false to ignore transitions.
- if (!ComputeStoreField(map, name, &lookup, false)) break;
- ASSERT(!map->is_observed());
-
- HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
- Representation new_representation = new_access.representation();
+ if (!info.CanAccessMonomorphic()) continue;
if (count == 0) {
- // First time through the loop; set access and representation.
- access = new_access;
- representation = new_representation;
- } else if (!representation.IsCompatibleForStore(new_representation)) {
- // Representations did not match.
- break;
- } else if (access.offset() != new_access.offset()) {
- // Offsets did not match.
- break;
- } else if (access.IsInobject() != new_access.IsInobject()) {
- // In-objectness did not match.
- break;
+ join = graph()->CreateBasicBlock();
+ if (handle_smi) {
+ HBasicBlock* empty_smi_block = graph()->CreateBasicBlock();
+ HBasicBlock* not_smi_block = graph()->CreateBasicBlock();
+ number_block = graph()->CreateBasicBlock();
+ smi_check = New<HIsSmiAndBranch>(
+ object, empty_smi_block, not_smi_block);
+ FinishCurrentBlock(smi_check);
+ GotoNoSimulate(empty_smi_block, number_block);
+ set_current_block(not_smi_block);
+ } else {
+ BuildCheckHeapObject(object);
+ }
}
- }
+ ++count;
+ HBasicBlock* if_true = graph()->CreateBasicBlock();
+ HBasicBlock* if_false = graph()->CreateBasicBlock();
+ HUnaryControlInstruction* compare;
- if (count != types->length()) return false;
+ HValue* dependency;
+ if (info.type()->Is(Type::Number())) {
+ Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
+ compare = New<HCompareMap>(object, heap_number_map, top_info(),
+ if_true, if_false);
+ dependency = smi_check;
+ } else if (info.type()->Is(Type::String())) {
+ compare = New<HIsStringAndBranch>(object, if_true, if_false);
+ dependency = compare;
+ } else {
+ compare = New<HCompareMap>(object, info.map(), top_info(),
+ if_true, if_false);
+ dependency = compare;
+ }
+ FinishCurrentBlock(compare);
- // Everything matched; can use monomorphic store.
- BuildCheckHeapObject(object);
- HCheckMaps* checked_object = Add<HCheckMaps>(object, types);
- HInstruction* store;
- CHECK_ALIVE_OR_RETURN(
- store = BuildStoreNamedField(
- checked_object, name, value, types->at(count - 1), &lookup),
- true);
- if (!ast_context()->IsEffect()) Push(value);
- AddInstruction(store);
- Add<HSimulate>(assignment_id);
- if (!ast_context()->IsEffect()) Drop(1);
- ast_context()->ReturnValue(value);
- return true;
-}
+ if (info.type()->Is(Type::Number())) {
+ GotoNoSimulate(if_true, number_block);
+ if_true = number_block;
+ }
+ set_current_block(if_true);
-void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
- BailoutId assignment_id,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name) {
- if (TryStorePolymorphicAsMonomorphic(
- assignment_id, object, value, types, name)) {
- return;
- }
+ HInstruction* access = BuildMonomorphicAccess(
+ &info, object, dependency, value, ast_id,
+ return_id, FLAG_polymorphic_inlining);
- // TODO(ager): We should recognize when the prototype chains for different
- // maps are identical. In that case we can avoid repeatedly generating the
- // same prototype map checks.
- int count = 0;
- HBasicBlock* join = NULL;
- for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
- Handle<Map> map = types->at(i);
- LookupResult lookup(isolate());
- if (ComputeStoreField(map, name, &lookup)) {
- if (count == 0) {
- BuildCheckHeapObject(object);
- join = graph()->CreateBasicBlock();
- }
- ++count;
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare = New<HCompareMap>(object, map, if_true, if_false);
- FinishCurrentBlock(compare);
-
- set_current_block(if_true);
- HInstruction* instr;
- CHECK_ALIVE(instr = BuildStoreNamedField(
- compare, name, value, map, &lookup));
- // Goto will add the HSimulate for the store.
- AddInstruction(instr);
- if (!ast_context()->IsEffect()) Push(value);
- Goto(join);
+ HValue* result = NULL;
+ switch (access_type) {
+ case LOAD:
+ result = access;
+ break;
+ case STORE:
+ result = value;
+ break;
+ }
- set_current_block(if_false);
+ if (access == NULL) {
+ if (HasStackOverflow()) return;
+ } else {
+ if (!access->IsLinked()) AddInstruction(access);
+ if (!ast_context()->IsEffect()) Push(result);
}
+
+ if (current_block() != NULL) Goto(join);
+ set_current_block(if_false);
}
// Finish up. Unconditionally deoptimize if we've handled all the maps we
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- FinishExitWithHardDeoptimization("Unknown map in polymorphic store", join);
+ FinishExitWithHardDeoptimization("Uknown map in polymorphic access");
} else {
- HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
+ HInstruction* instr = BuildNamedGeneric(access_type, object, name, value);
AddInstruction(instr);
+ if (!ast_context()->IsEffect()) Push(access_type == LOAD ? instr : value);
if (join != NULL) {
- if (!ast_context()->IsEffect()) {
- Push(value);
- }
Goto(join);
} else {
- // The HSimulate for the store should not see the stored value in
- // effect contexts (it is not materialized at expr->id() in the
- // unoptimized code).
- if (instr->HasObservableSideEffects()) {
- if (ast_context()->IsEffect()) {
- Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
- } else {
- Push(value);
- Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
- Drop(1);
- }
- }
- return ast_context()->ReturnValue(value);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ return;
}
}
ASSERT(join != NULL);
- join->SetJoinId(assignment_id);
- set_current_block(join);
- if (!ast_context()->IsEffect()) {
- ast_context()->ReturnValue(Pop());
+ if (join->HasPredecessor()) {
+ join->SetJoinId(ast_id);
+ set_current_block(join);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ } else {
+ set_current_block(NULL);
}
}
static bool ComputeReceiverTypes(Expression* expr,
HValue* receiver,
- SmallMapList** t) {
+ SmallMapList** t,
+ Zone* zone) {
SmallMapList* types = expr->GetReceiverTypes();
*t = types;
bool monomorphic = expr->IsMonomorphic();
@@ -5110,7 +5832,16 @@ static bool ComputeReceiverTypes(Expression* expr,
types->FilterForPossibleTransitions(root_map);
monomorphic = types->length() == 1;
}
- return monomorphic && CanInlinePropertyAccess(*types->first());
+ return monomorphic && CanInlinePropertyAccess(
+ IC::MapToType<Type>(types->first(), zone));
+}
+
+
+static bool AreStringTypes(SmallMapList* types) {
+ for (int i = 0; i < types->length(); i++) {
+ if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+ }
+ return true;
}
@@ -5119,16 +5850,14 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
BailoutId ast_id,
BailoutId return_id,
bool is_uninitialized) {
- HValue* value = environment()->ExpressionStackAt(0);
-
if (!prop->key()->IsPropertyName()) {
// Keyed store.
+ HValue* value = environment()->ExpressionStackAt(0);
HValue* key = environment()->ExpressionStackAt(1);
HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
HandleKeyedElementAccess(object, key, value, expr,
- true, // is_store
- &has_side_effects);
+ STORE, &has_side_effects);
Drop(3);
Push(value);
Add<HSimulate>(return_id, REMOVABLE_SIMULATE);
@@ -5136,50 +5865,16 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
}
// Named store.
- HValue* object = environment()->ExpressionStackAt(1);
-
- if (is_uninitialized) {
- Add<HDeoptimize>("Insufficient type feedback for property assignment",
- Deoptimizer::SOFT);
- }
+ HValue* value = Pop();
+ HValue* object = Pop();
Literal* key = prop->key()->AsLiteral();
Handle<String> name = Handle<String>::cast(key->value());
ASSERT(!name.is_null());
- HInstruction* instr = NULL;
-
- SmallMapList* types;
- bool monomorphic = ComputeReceiverTypes(expr, object, &types);
-
- if (monomorphic) {
- Handle<Map> map = types->first();
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- AddCheckConstantFunction(holder, object, map);
- if (FLAG_inline_accessors &&
- TryInlineSetter(setter, ast_id, return_id, value)) {
- return;
- }
- Drop(2);
- Add<HPushArgument>(object);
- Add<HPushArgument>(value);
- instr = New<HCallConstantFunction>(setter, 2);
- } else {
- Drop(2);
- CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
- name,
- value,
- map));
- }
- } else if (types != NULL && types->length() > 1) {
- Drop(2);
- return HandlePolymorphicStoreNamedField(ast_id, object, value, types, name);
- } else {
- Drop(2);
- instr = BuildStoreNamedGeneric(object, name, value);
- }
+ HInstruction* instr = BuildNamedAccess(STORE, ast_id, return_id, expr,
+ object, name, value, is_uninitialized);
+ if (instr == NULL) return;
if (!ast_context()->IsEffect()) Push(value);
AddInstruction(instr);
@@ -5212,7 +5907,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HValue* value,
BailoutId ast_id) {
LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, STORE);
if (type == kUseCell) {
Handle<GlobalObject> global(current_info()->global_object());
Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
@@ -5236,9 +5931,11 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
} else {
- HGlobalObject* global_object = Add<HGlobalObject>();
- HStoreGlobalGeneric* instr =
- Add<HStoreGlobalGeneric>(global_object, var->name(),
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ HStoreNamedGeneric* instr =
+ Add<HStoreNamedGeneric>(global_object, var->name(),
value, function_strict_mode_flag());
USE(instr);
ASSERT(instr->HasObservableSideEffects());
@@ -5342,12 +6039,8 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HValue* right = Pop();
HValue* left = Pop();
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- AddInstruction(instr);
- Push(instr);
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
- }
+ Push(BuildBinaryOperation(operation, left, right, PUSH_BEFORE_SIMULATE));
+
BuildStore(expr, prop, expr->id(),
expr->AssignmentId(), expr->IsUninitialized());
} else {
@@ -5498,8 +6191,10 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
CHECK_ALIVE(VisitForValue(expr->exception()));
HValue* value = environment()->Pop();
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
- Add<HThrow>(value);
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ Add<HPushArgument>(value);
+ Add<HCallRuntime>(isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kThrow), 1);
Add<HSimulate>(expr->id());
// If the throw definitely exits the function, we can finish with a dummy
@@ -5511,55 +6206,63 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
}
-HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- HObjectAccess access) {
- if (FLAG_track_double_fields && access.representation().IsDouble()) {
- // load the heap number
- HLoadNamedField* heap_number = Add<HLoadNamedField>(
- object, access.WithRepresentation(Representation::Tagged()));
- heap_number->set_type(HType::HeapNumber());
- // load the double value from it
- return New<HLoadNamedField>(
- heap_number, HObjectAccess::ForHeapNumberValue());
+HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) {
+ if (string->IsConstant()) {
+ HConstant* c_string = HConstant::cast(string);
+ if (c_string->HasStringValue()) {
+ return Add<HConstant>(c_string->StringValue()->map()->instance_type());
+ }
}
- return New<HLoadNamedField>(object, access);
+ return Add<HLoadNamedField>(
+ Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap()),
+ static_cast<HValue*>(NULL), HObjectAccess::ForMapInstanceType());
}
-HInstruction* HGraphBuilder::AddLoadNamedField(HValue* object,
- HObjectAccess access) {
- return AddInstruction(BuildLoadNamedField(object, access));
-}
-
-
-HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* object,
- HValue* checked_string) {
- if (FLAG_fold_constants && object->IsConstant()) {
- HConstant* constant = HConstant::cast(object);
- if (constant->HasStringValue()) {
- return New<HConstant>(constant->StringValue()->length());
+HInstruction* HGraphBuilder::AddLoadStringLength(HValue* string) {
+ if (string->IsConstant()) {
+ HConstant* c_string = HConstant::cast(string);
+ if (c_string->HasStringValue()) {
+ return Add<HConstant>(c_string->StringValue()->length());
}
}
- return BuildLoadNamedField(checked_string, HObjectAccess::ForStringLength());
+ return Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
+ HObjectAccess::ForStringLength());
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
+HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
+ PropertyAccessType access_type,
HValue* object,
Handle<String> name,
- Property* expr) {
- if (expr->IsUninitialized()) {
- Add<HDeoptimize>("Insufficient type feedback for generic named load",
+ HValue* value,
+ bool is_uninitialized) {
+ if (is_uninitialized) {
+ Add<HDeoptimize>("Insufficient type feedback for generic named access",
Deoptimizer::SOFT);
}
- return New<HLoadNamedGeneric>(object, name);
+ if (access_type == LOAD) {
+ return New<HLoadNamedGeneric>(object, name);
+ } else {
+ return New<HStoreNamedGeneric>(
+ object, name, value, function_strict_mode_flag());
+ }
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
- HValue* key) {
- return New<HLoadKeyedGeneric>(object, key);
+HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
+ PropertyAccessType access_type,
+ HValue* object,
+ HValue* key,
+ HValue* value) {
+ if (access_type == LOAD) {
+ return New<HLoadKeyedGeneric>(object, key);
+ } else {
+ return New<HStoreKeyedGeneric>(
+ object, key, value, function_strict_mode_flag());
+ }
}
@@ -5585,19 +6288,34 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
HValue* val,
HValue* dependency,
Handle<Map> map,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode) {
HCheckMaps* checked_object = Add<HCheckMaps>(object, map, top_info(),
dependency);
if (dependency) {
- checked_object->ClearGVNFlag(kDependsOnElementsKind);
+ checked_object->ClearDependsOnFlag(kElementsKind);
+ }
+
+ if (access_type == STORE && map->prototype()->IsJSObject()) {
+ // monomorphic stores need a prototype chain check because shape
+ // changes could allow callbacks on elements in the chain that
+ // aren't compatible with monomorphic keyed stores.
+ Handle<JSObject> prototype(JSObject::cast(map->prototype()));
+ Object* holder = map->prototype();
+ while (holder->GetPrototype(isolate())->IsJSObject()) {
+ holder = holder->GetPrototype(isolate());
+ }
+ ASSERT(holder->GetPrototype(isolate())->IsNull());
+
+ BuildCheckPrototypeMaps(prototype,
+ Handle<JSObject>(JSObject::cast(holder)));
}
LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
return BuildUncheckedMonomorphicElementAccess(
checked_object, key, val,
map->instance_type() == JS_ARRAY_TYPE,
- map->elements_kind(), is_store,
+ map->elements_kind(), access_type,
load_mode, store_mode);
}
@@ -5653,7 +6371,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
}
if (!has_double_maps && !has_smi_or_object_maps) return NULL;
- HCheckMaps* checked_object = Add<HCheckMaps>(object, maps);
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, maps, top_info());
// FAST_ELEMENTS is considered more general than FAST_HOLEY_SMI_ELEMENTS.
// If we've seen both, the consolidated load must use FAST_HOLEY_ELEMENTS.
ElementsKind consolidated_elements_kind = has_seen_holey_elements
@@ -5663,7 +6381,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
checked_object, key, val,
most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
consolidated_elements_kind,
- false, NEVER_RETURN_HOLE, STANDARD_STORE);
+ LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
return instr;
}
@@ -5673,13 +6391,13 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* key,
HValue* val,
SmallMapList* maps,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode,
bool* has_side_effects) {
*has_side_effects = false;
BuildCheckHeapObject(object);
- if (!is_store) {
+ if (access_type == LOAD) {
HInstruction* consolidated_load =
TryBuildConsolidatedElementLoad(object, key, val, maps);
if (consolidated_load != NULL) {
@@ -5732,15 +6450,14 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HInstruction* instr = NULL;
if (untransitionable_map->has_slow_elements_kind() ||
!untransitionable_map->IsJSObjectMap()) {
- instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
- : BuildLoadKeyedGeneric(object, key));
+ instr = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
} else {
instr = BuildMonomorphicElementAccess(
- object, key, val, transition, untransitionable_map, is_store,
+ object, key, val, transition, untransitionable_map, access_type,
store_mode);
}
*has_side_effects |= instr->HasObservableSideEffects();
- return is_store ? NULL : instr;
+ return access_type == STORE ? NULL : instr;
}
HBasicBlock* join = graph()->CreateBasicBlock();
@@ -5752,15 +6469,13 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HBasicBlock* this_map = graph()->CreateBasicBlock();
HBasicBlock* other_map = graph()->CreateBasicBlock();
HCompareMap* mapcompare =
- New<HCompareMap>(object, map, this_map, other_map);
+ New<HCompareMap>(object, map, top_info(), this_map, other_map);
FinishCurrentBlock(mapcompare);
set_current_block(this_map);
HInstruction* access = NULL;
if (IsDictionaryElementsKind(elements_kind)) {
- access = is_store
- ? AddInstruction(BuildStoreKeyedGeneric(object, key, val))
- : AddInstruction(BuildLoadKeyedGeneric(object, key));
+ access = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
} else {
ASSERT(IsFastElementsKind(elements_kind) ||
IsExternalArrayElementsKind(elements_kind));
@@ -5769,14 +6484,14 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
access = BuildUncheckedMonomorphicElementAccess(
mapcompare, key, val,
map->instance_type() == JS_ARRAY_TYPE,
- elements_kind, is_store,
+ elements_kind, access_type,
load_mode,
store_mode);
}
*has_side_effects |= access->HasObservableSideEffects();
// The caller will use has_side_effects and add a correct Simulate.
access->SetFlag(HValue::kHasNoObservableSideEffects);
- if (!is_store) {
+ if (access_type == LOAD) {
Push(access);
}
NoObservableSideEffectsScope scope(this);
@@ -5784,12 +6499,16 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
set_current_block(other_map);
}
+ // Ensure that we visited at least one map above that goes to join. This is
+ // necessary because FinishExitWithHardDeoptimization does an AbnormalExit
+ // rather than joining the join block. If this becomes an issue, insert a
+ // generic access in the case length() == 0.
+ ASSERT(join->predecessors()->length() > 0);
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
- FinishExitWithHardDeoptimization("Unknown map in polymorphic element access",
- join);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic element access");
set_current_block(join);
- return is_store ? NULL : Pop();
+ return access_type == STORE ? NULL : Pop();
}
@@ -5798,63 +6517,64 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
HValue* key,
HValue* val,
Expression* expr,
- bool is_store,
+ PropertyAccessType access_type,
bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
SmallMapList* types;
- bool monomorphic = ComputeReceiverTypes(expr, obj, &types);
+ bool monomorphic = ComputeReceiverTypes(expr, obj, &types, zone());
+
+ bool force_generic = false;
+ if (access_type == STORE &&
+ (monomorphic || (types != NULL && !types->is_empty()))) {
+ // Stores can't be mono/polymorphic if their prototype chain has dictionary
+ // elements. However a receiver map that has dictionary elements itself
+ // should be left to normal mono/poly behavior (the other maps may benefit
+ // from highly optimized stores).
+ for (int i = 0; i < types->length(); i++) {
+ Handle<Map> current_map = types->at(i);
+ if (current_map->DictionaryElementsInPrototypeChainOnly()) {
+ force_generic = true;
+ monomorphic = false;
+ break;
+ }
+ }
+ }
if (monomorphic) {
Handle<Map> map = types->first();
- if (map->has_slow_elements_kind()) {
- instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
- : BuildLoadKeyedGeneric(obj, key);
- AddInstruction(instr);
+ if (map->has_slow_elements_kind() || !map->IsJSObjectMap()) {
+ instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
} else {
BuildCheckHeapObject(obj);
instr = BuildMonomorphicElementAccess(
- obj, key, val, NULL, map, is_store, expr->GetStoreMode());
+ obj, key, val, NULL, map, access_type, expr->GetStoreMode());
}
- } else if (types != NULL && !types->is_empty()) {
+ } else if (!force_generic && (types != NULL && !types->is_empty())) {
return HandlePolymorphicElementAccess(
- obj, key, val, types, is_store,
+ obj, key, val, types, access_type,
expr->GetStoreMode(), has_side_effects);
} else {
- if (is_store) {
+ if (access_type == STORE) {
if (expr->IsAssignment() &&
expr->AsAssignment()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed store",
Deoptimizer::SOFT);
}
- instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
if (expr->AsProperty()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed load",
Deoptimizer::SOFT);
}
- instr = BuildLoadKeyedGeneric(obj, key);
}
- AddInstruction(instr);
+ instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
}
*has_side_effects = instr->HasObservableSideEffects();
return instr;
}
-HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
- HValue* object,
- HValue* key,
- HValue* value) {
- return New<HStoreKeyedGeneric>(
- object,
- key,
- value,
- function_strict_mode_flag());
-}
-
-
void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
// Outermost function already has arguments on the stack.
if (function_state()->outer() == NULL) return;
@@ -5932,6 +6652,45 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
}
+HInstruction* HOptimizedGraphBuilder::BuildNamedAccess(
+ PropertyAccessType access,
+ BailoutId ast_id,
+ BailoutId return_id,
+ Expression* expr,
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ bool is_uninitialized) {
+ SmallMapList* types;
+ ComputeReceiverTypes(expr, object, &types, zone());
+ ASSERT(types != NULL);
+
+ if (types->length() > 0) {
+ PropertyAccessInfo info(this, access, ToType(types->first()), name);
+ if (!info.CanAccessAsMonomorphic(types)) {
+ HandlePolymorphicNamedFieldAccess(
+ access, ast_id, return_id, object, value, types, name);
+ return NULL;
+ }
+
+ HValue* checked_object;
+ // Type::Number() is only supported by polymorphic load/call handling.
+ ASSERT(!info.type()->Is(Type::Number()));
+ BuildCheckHeapObject(object);
+ if (AreStringTypes(types)) {
+ checked_object =
+ Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
+ } else {
+ checked_object = Add<HCheckMaps>(object, types, top_info());
+ }
+ return BuildMonomorphicAccess(
+ &info, object, checked_object, value, ast_id, return_id);
+ }
+
+ return BuildNamedGeneric(access, object, name, value, is_uninitialized);
+}
+
+
void HOptimizedGraphBuilder::PushLoad(Property* expr,
HValue* object,
HValue* key) {
@@ -5942,14 +6701,6 @@ void HOptimizedGraphBuilder::PushLoad(Property* expr,
}
-static bool AreStringTypes(SmallMapList* types) {
- for (int i = 0; i < types->length(); i++) {
- if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
- }
- return true;
-}
-
-
void HOptimizedGraphBuilder::BuildLoad(Property* expr,
BailoutId ast_id) {
HInstruction* instr = NULL;
@@ -5969,32 +6720,10 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
HValue* object = Pop();
- SmallMapList* types;
- ComputeReceiverTypes(expr, object, &types);
- ASSERT(types != NULL);
-
- if (types->length() > 0) {
- PropertyAccessInfo info(isolate(), types->first(), name);
- if (!info.CanLoadAsMonomorphic(types)) {
- return HandlePolymorphicLoadNamedField(
- ast_id, expr->LoadId(), object, types, name);
- }
-
- BuildCheckHeapObject(object);
- HInstruction* checked_object;
- if (AreStringTypes(types)) {
- checked_object =
- Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
- } else {
- checked_object = Add<HCheckMaps>(object, types);
- }
- instr = BuildLoadMonomorphic(
- &info, object, checked_object, ast_id, expr->LoadId());
- if (instr == NULL) return;
- if (instr->IsLinked()) return ast_context()->ReturnValue(instr);
- } else {
- instr = BuildLoadNamedGeneric(object, name, expr);
- }
+ instr = BuildNamedAccess(LOAD, ast_id, expr->LoadId(), expr,
+ object, name, NULL, expr->IsUninitialized());
+ if (instr == NULL) return;
+ if (instr->IsLinked()) return ast_context()->ReturnValue(instr);
} else {
HValue* key = Pop();
@@ -6002,9 +6731,7 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr,
- false, // is_store
- &has_side_effects);
+ obj, key, NULL, expr, LOAD, &has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -6050,7 +6777,7 @@ HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant,
AddInstruction(constant_value);
HCheckMaps* check =
Add<HCheckMaps>(constant_value, handle(constant->map()), info);
- check->ClearGVNFlag(kDependsOnElementsKind);
+ check->ClearDependsOnFlag(kElementsKind);
return check;
}
@@ -6077,83 +6804,60 @@ void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
}
-void HOptimizedGraphBuilder::AddCheckConstantFunction(
- Handle<JSObject> holder,
- HValue* receiver,
- Handle<Map> receiver_map) {
- // Constant functions have the nice property that the map will change if they
- // are overwritten. Therefore it is enough to check the map of the holder and
- // its prototypes.
- AddCheckMap(receiver, receiver_map);
- AddCheckPrototypeMaps(holder, receiver_map);
+HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(
+ HValue* fun, int argument_count, bool pass_argument_count) {
+ return New<HCallJSFunction>(
+ fun, argument_count, pass_argument_count);
}
-class FunctionSorter {
- public:
- FunctionSorter() : index_(0), ticks_(0), ast_length_(0), src_length_(0) { }
- FunctionSorter(int index, int ticks, int ast_length, int src_length)
- : index_(index),
- ticks_(ticks),
- ast_length_(ast_length),
- src_length_(src_length) { }
-
- int index() const { return index_; }
- int ticks() const { return ticks_; }
- int ast_length() const { return ast_length_; }
- int src_length() const { return src_length_; }
-
- private:
- int index_;
- int ticks_;
- int ast_length_;
- int src_length_;
-};
-
+HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
+ HValue* fun, HValue* context,
+ int argument_count, HValue* expected_param_count) {
+ CallInterfaceDescriptor* descriptor =
+ isolate()->call_descriptor(Isolate::ArgumentAdaptorCall);
-inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) {
- int diff = lhs.ticks() - rhs.ticks();
- if (diff != 0) return diff > 0;
- diff = lhs.ast_length() - rhs.ast_length();
- if (diff != 0) return diff < 0;
- return lhs.src_length() < rhs.src_length();
-}
+ HValue* arity = Add<HConstant>(argument_count - 1);
+ HValue* op_vals[] = { fun, context, arity, expected_param_count };
-bool HOptimizedGraphBuilder::TryCallPolymorphicAsMonomorphic(
- Call* expr,
- HValue* receiver,
- SmallMapList* types,
- Handle<String> name) {
- if (types->length() > kMaxCallPolymorphism) return false;
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ HConstant* adaptor_value = Add<HConstant>(adaptor);
- PropertyAccessInfo info(isolate(), types->at(0), name);
- if (!info.CanLoadAsMonomorphic(types)) return false;
- if (!expr->ComputeTarget(info.map(), name)) return false;
+ return New<HCallWithDescriptor>(
+ adaptor_value, argument_count, descriptor,
+ Vector<HValue*>(op_vals, descriptor->environment_length()));
+}
- BuildCheckHeapObject(receiver);
- Add<HCheckMaps>(receiver, types);
- AddCheckPrototypeMaps(expr->holder(), info.map());
- if (FLAG_trace_inlining) {
- Handle<JSFunction> caller = current_info()->closure();
- SmartArrayPointer<char> caller_name =
- caller->shared()->DebugName()->ToCString();
- PrintF("Trying to inline the polymorphic call to %s from %s\n",
- *name->ToCString(), *caller_name);
- }
- if (!TryInlineCall(expr)) {
- int argument_count = expr->arguments()->length() + 1; // Includes receiver.
- HCallConstantFunction* call =
- New<HCallConstantFunction>(expr->target(), argument_count);
- PreProcessCall(call);
- AddInstruction(call);
- if (!ast_context()->IsEffect()) Push(call);
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
+ Handle<JSFunction> jsfun, int argument_count) {
+ HValue* target = Add<HConstant>(jsfun);
+ // For constant functions, we try to avoid calling the
+ // argument adaptor and instead call the function directly
+ int formal_parameter_count = jsfun->shared()->formal_parameter_count();
+ bool dont_adapt_arguments =
+ (formal_parameter_count ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ int arity = argument_count - 1;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+ if (can_invoke_directly) {
+ if (jsfun.is_identical_to(current_info()->closure())) {
+ graph()->MarkRecursive();
+ }
+ return NewPlainFunctionCall(target, argument_count, dont_adapt_arguments);
+ } else {
+ HValue* param_count_value = Add<HConstant>(formal_parameter_count);
+ HValue* context = Add<HLoadNamedField>(
+ target, static_cast<HValue*>(NULL),
+ HObjectAccess::ForFunctionContextPointer());
+ return NewArgumentAdaptorCall(target, context,
+ argument_count, param_count_value);
}
-
- return true;
+ UNREACHABLE();
+ return NULL;
}
@@ -6162,47 +6866,51 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
HValue* receiver,
SmallMapList* types,
Handle<String> name) {
- if (TryCallPolymorphicAsMonomorphic(expr, receiver, types, name)) return;
-
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
- HBasicBlock* join = NULL;
- FunctionSorter order[kMaxCallPolymorphism];
- int ordered_functions = 0;
-
- Handle<Map> initial_string_map(
- isolate()->native_context()->string_function()->initial_map());
- Handle<Map> string_marker_map(
- JSObject::cast(initial_string_map->prototype())->map());
- Handle<Map> initial_number_map(
- isolate()->native_context()->number_function()->initial_map());
- Handle<Map> number_marker_map(
- JSObject::cast(initial_number_map->prototype())->map());
- Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
+ int order[kMaxCallPolymorphism];
bool handle_smi = false;
+ bool handled_string = false;
+ int ordered_functions = 0;
for (int i = 0;
i < types->length() && ordered_functions < kMaxCallPolymorphism;
++i) {
- Handle<Map> map = types->at(i);
- if (expr->ComputeTarget(map, name)) {
- if (map.is_identical_to(number_marker_map)) handle_smi = true;
- order[ordered_functions++] =
- FunctionSorter(i,
- expr->target()->shared()->profiler_ticks(),
- InliningAstSize(expr->target()),
- expr->target()->shared()->SourceSize());
+ PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
+ if (info.CanAccessMonomorphic() &&
+ info.lookup()->IsConstant() &&
+ info.constant()->IsJSFunction()) {
+ if (info.type()->Is(Type::String())) {
+ if (handled_string) continue;
+ handled_string = true;
+ }
+ Handle<JSFunction> target = Handle<JSFunction>::cast(info.constant());
+ if (info.type()->Is(Type::Number())) {
+ handle_smi = true;
+ }
+ expr->set_target(target);
+ order[ordered_functions++] = i;
}
}
- std::sort(order, order + ordered_functions);
-
HBasicBlock* number_block = NULL;
+ HBasicBlock* join = NULL;
+ handled_string = false;
+ int count = 0;
for (int fn = 0; fn < ordered_functions; ++fn) {
- int i = order[fn].index();
- Handle<Map> map = types->at(i);
- if (fn == 0) {
+ int i = order[fn];
+ PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
+ if (info.type()->Is(Type::String())) {
+ if (handled_string) continue;
+ handled_string = true;
+ }
+ // Reloads the target.
+ info.CanAccessMonomorphic();
+ Handle<JSFunction> target = Handle<JSFunction>::cast(info.constant());
+
+ expr->set_target(target);
+ if (count == 0) {
// Only needed once.
join = graph()->CreateBasicBlock();
if (handle_smi) {
@@ -6211,59 +6919,69 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
number_block = graph()->CreateBasicBlock();
FinishCurrentBlock(New<HIsSmiAndBranch>(
receiver, empty_smi_block, not_smi_block));
- Goto(empty_smi_block, number_block);
+ GotoNoSimulate(empty_smi_block, number_block);
set_current_block(not_smi_block);
} else {
BuildCheckHeapObject(receiver);
}
}
+ ++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
HUnaryControlInstruction* compare;
- if (handle_smi && map.is_identical_to(number_marker_map)) {
- compare = New<HCompareMap>(receiver, heap_number_map, if_true, if_false);
- map = initial_number_map;
- expr->set_number_check(
- Handle<JSObject>(JSObject::cast(map->prototype())));
- } else if (map.is_identical_to(string_marker_map)) {
+ Handle<Map> map = info.map();
+ if (info.type()->Is(Type::Number())) {
+ Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
+ compare = New<HCompareMap>(receiver, heap_number_map, top_info(),
+ if_true, if_false);
+ } else if (info.type()->Is(Type::String())) {
compare = New<HIsStringAndBranch>(receiver, if_true, if_false);
- map = initial_string_map;
- expr->set_string_check(
- Handle<JSObject>(JSObject::cast(map->prototype())));
} else {
- compare = New<HCompareMap>(receiver, map, if_true, if_false);
- expr->set_map_check();
+ compare = New<HCompareMap>(receiver, map, top_info(),
+ if_true, if_false);
}
-
FinishCurrentBlock(compare);
- if (expr->check_type() == NUMBER_CHECK) {
- Goto(if_true, number_block);
+ if (info.type()->Is(Type::Number())) {
+ GotoNoSimulate(if_true, number_block);
if_true = number_block;
- number_block->SetJoinId(expr->id());
}
+
set_current_block(if_true);
- expr->ComputeTarget(map, name);
- AddCheckPrototypeMaps(expr->holder(), map);
- if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
+ AddCheckPrototypeMaps(info.holder(), map);
+
+ HValue* function = Add<HConstant>(expr->target());
+ environment()->SetExpressionStackAt(0, function);
+ Push(receiver);
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ bool needs_wrapping = NeedsWrappingFor(info.type(), target);
+ bool try_inline = FLAG_polymorphic_inlining && !needs_wrapping;
+ if (FLAG_trace_inlining && try_inline) {
Handle<JSFunction> caller = current_info()->closure();
SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
PrintF("Trying to inline the polymorphic call to %s from %s\n",
- *name->ToCString(),
- *caller_name);
+ name->ToCString().get(),
+ caller_name.get());
}
- if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
+ if (try_inline && TryInlineCall(expr)) {
// Trying to inline will signal that we should bailout from the
// entire compilation by setting stack overflow on the visitor.
if (HasStackOverflow()) return;
} else {
- HCallConstantFunction* call =
- New<HCallConstantFunction>(expr->target(), argument_count);
- PreProcessCall(call);
+ // Since HWrapReceiver currently cannot actually wrap numbers and strings,
+ // use the regular CallFunctionStub for method calls to wrap the receiver.
+ // TODO(verwaest): Support creation of value wrappers directly in
+ // HWrapReceiver.
+ HInstruction* call = needs_wrapping
+ ? NewUncasted<HCallFunction>(
+ function, argument_count, WRAP_AND_CALL)
+ : BuildCallConstantFunction(target, argument_count);
+ PushArgumentsFromEnvironment(argument_count);
AddInstruction(call);
+ Drop(1); // Drop the function.
if (!ast_context()->IsEffect()) Push(call);
}
@@ -6275,15 +6993,27 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
- // Because the deopt may be the only path in the polymorphic call, make sure
- // that the environment stack matches the depth on deopt that it otherwise
- // would have had after a successful call.
- Drop(argument_count);
- if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
- FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic call");
} else {
- HCallNamed* call = New<HCallNamed>(name, argument_count);
- PreProcessCall(call);
+ Property* prop = expr->expression()->AsProperty();
+ HInstruction* function = BuildNamedGeneric(
+ LOAD, receiver, name, NULL, prop->IsUninitialized());
+ AddInstruction(function);
+ Push(function);
+ AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+
+ environment()->SetExpressionStackAt(1, function);
+ environment()->SetExpressionStackAt(0, receiver);
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
+ CallFunctionFlags flags = receiver->type().IsJSObject()
+ ? NO_CALL_FUNCTION_FLAGS : CALL_AS_METHOD;
+ HInstruction* call = New<HCallFunction>(
+ function, argument_count, flags);
+
+ PushArgumentsFromEnvironment(argument_count);
+
+ Drop(1); // Function.
if (join != NULL) {
AddInstruction(call);
@@ -6317,10 +7047,11 @@ void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
if (reason == NULL) {
- PrintF("Inlined %s called from %s.\n", *target_name, *caller_name);
+ PrintF("Inlined %s called from %s.\n", target_name.get(),
+ caller_name.get());
} else {
PrintF("Did not inline %s called from %s (%s).\n",
- *target_name, *caller_name, reason);
+ target_name.get(), caller_name.get(), reason);
}
}
}
@@ -6337,6 +7068,11 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
Handle<JSFunction> caller = current_info()->closure();
Handle<SharedFunctionInfo> target_shared(target->shared());
+ // Always inline builtins marked for inlining.
+ if (target->IsBuiltin()) {
+ return target_shared->inline_builtin() ? 0 : kNotInlinable;
+ }
+
// Do a quick check on source code length to avoid parsing large
// inlining candidates.
if (target_shared->SourceSize() >
@@ -6346,7 +7082,7 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
}
// Target must be inlineable.
- if (!target->IsInlineable()) {
+ if (!target_shared->IsInlineable()) {
TraceInline(target, caller, "target not inlineable");
return kNotInlinable;
}
@@ -6360,13 +7096,13 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
}
-bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
- Handle<JSFunction> target,
+bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
int arguments_count,
HValue* implicit_return_value,
BailoutId ast_id,
BailoutId return_id,
- InliningKind inlining_kind) {
+ InliningKind inlining_kind,
+ HSourcePosition position) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
@@ -6377,18 +7113,6 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
return false;
}
-#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
- // Target must be able to use caller's context.
- CompilationInfo* outer_info = current_info();
- if (target->context() != outer_info->closure()->context() ||
- outer_info->scope()->contains_with() ||
- outer_info->scope()->num_heap_slots() > 0) {
- TraceInline(target, caller, "target requires context change");
- return false;
- }
-#endif
-
-
// Don't inline deeper than the maximum number of inlining levels.
HEnvironment* env = environment();
int current_level = 1;
@@ -6510,31 +7234,25 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
ASSERT(target_shared->has_deoptimization_support());
AstTyper::Run(&target_info);
+ int function_id = graph()->TraceInlinedFunction(target_shared, position);
+
// Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state = new FunctionState(
- this, &target_info, inlining_kind);
+ this, &target_info, inlining_kind, function_id);
HConstant* undefined = graph()->GetConstantUndefined();
- bool undefined_receiver = HEnvironment::UseUndefinedReceiver(
- target, function, call_kind, inlining_kind);
+
HEnvironment* inner_env =
environment()->CopyForInlining(target,
arguments_count,
function,
undefined,
- function_state()->inlining_kind(),
- undefined_receiver);
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
- // IA32, ARM and MIPS only, overwrite the caller's context in the
- // deoptimization environment with the correct one.
- //
- // TODO(kmillikin): implement the same inlining on other platforms so we
- // can remove the unsightly ifdefs in this function.
+ function_state()->inlining_kind());
+
HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
inner_env->BindContext(context);
-#endif
Add<HSimulate>(return_id);
current_block()->UpdateEnvironment(inner_env);
@@ -6557,7 +7275,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
Add<HEnterInlined>(target, arguments_count, function,
function_state()->inlining_kind(),
function->scope()->arguments(),
- arguments_object, undefined_receiver);
+ arguments_object);
function_state()->set_entry(enter_inlined);
VisitDeclarations(target_info.scope()->declarations());
@@ -6663,76 +7381,73 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
}
-bool HOptimizedGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
- // The function call we are inlining is a method call if the call
- // is a property call.
- CallKind call_kind = (expr->expression()->AsProperty() == NULL)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
-
- return TryInline(call_kind,
- expr->target(),
+bool HOptimizedGraphBuilder::TryInlineCall(Call* expr) {
+ return TryInline(expr->target(),
expr->arguments()->length(),
NULL,
expr->id(),
expr->ReturnId(),
- drop_extra ? DROP_EXTRA_ON_RETURN : NORMAL_RETURN);
+ NORMAL_RETURN,
+ ScriptPositionToSourcePosition(expr->position()));
}
bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
HValue* implicit_return_value) {
- return TryInline(CALL_AS_FUNCTION,
- expr->target(),
+ return TryInline(expr->target(),
expr->arguments()->length(),
implicit_return_value,
expr->id(),
expr->ReturnId(),
- CONSTRUCT_CALL_RETURN);
+ CONSTRUCT_CALL_RETURN,
+ ScriptPositionToSourcePosition(expr->position()));
}
bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+ Handle<Map> receiver_map,
BailoutId ast_id,
BailoutId return_id) {
- return TryInline(CALL_AS_METHOD,
- getter,
+ if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
+ return TryInline(getter,
0,
NULL,
ast_id,
return_id,
- GETTER_CALL_RETURN);
+ GETTER_CALL_RETURN,
+ source_position());
}
bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+ Handle<Map> receiver_map,
BailoutId id,
BailoutId assignment_id,
HValue* implicit_return_value) {
- return TryInline(CALL_AS_METHOD,
- setter,
+ if (TryInlineApiSetter(setter, receiver_map, id)) return true;
+ return TryInline(setter,
1,
implicit_return_value,
id, assignment_id,
- SETTER_CALL_RETURN);
+ SETTER_CALL_RETURN,
+ source_position());
}
bool HOptimizedGraphBuilder::TryInlineApply(Handle<JSFunction> function,
Call* expr,
int arguments_count) {
- return TryInline(CALL_AS_METHOD,
- function,
+ return TryInline(function,
arguments_count,
NULL,
expr->id(),
expr->ReturnId(),
- NORMAL_RETURN);
+ NORMAL_RETURN,
+ ScriptPositionToSourcePosition(expr->position()));
}
-bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
- bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
switch (id) {
@@ -6744,14 +7459,10 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
case kMathAbs:
case kMathSqrt:
case kMathLog:
- case kMathSin:
- case kMathCos:
- case kMathTan:
if (expr->arguments()->length() == 1) {
HValue* argument = Pop();
- Drop(1); // Receiver.
+ Drop(2); // Receiver and function.
HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
- if (drop_extra) Drop(1); // Optionally drop the function.
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
@@ -6760,9 +7471,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
if (expr->arguments()->length() == 2) {
HValue* right = Pop();
HValue* left = Pop();
- Drop(1); // Receiver.
+ Drop(2); // Receiver and function.
HInstruction* op = HMul::NewImul(zone(), context(), left, right);
- if (drop_extra) Drop(1); // Optionally drop the function.
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
@@ -6778,9 +7488,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
Call* expr,
HValue* receiver,
- Handle<Map> receiver_map,
- CheckType check_type) {
- ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
+ Handle<Map> receiver_map) {
// Try to inline calls like Math.* as operations in the calling function.
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
@@ -6788,13 +7496,10 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
switch (id) {
case kStringCharCodeAt:
case kStringCharAt:
- if (argument_count == 2 && check_type == STRING_CHECK) {
+ if (argument_count == 2) {
HValue* index = Pop();
HValue* string = Pop();
- ASSERT(!expr->holder().is_null());
- BuildCheckPrototypeMaps(Call::GetPrototypeForPrimitiveCheck(
- STRING_CHECK, expr->holder()->GetIsolate()),
- expr->holder());
+ Drop(1); // Function.
HInstruction* char_code =
BuildStringCharCodeAt(string, index);
if (id == kStringCharCodeAt) {
@@ -6808,10 +7513,9 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
}
break;
case kStringFromCharCode:
- if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ if (argument_count == 2) {
HValue* argument = Pop();
- Drop(1); // Receiver.
+ Drop(2); // Receiver and function.
HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
ast_context()->ReturnInstruction(result, expr->id());
return true;
@@ -6825,24 +7529,19 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
case kMathAbs:
case kMathSqrt:
case kMathLog:
- case kMathSin:
- case kMathCos:
- case kMathTan:
- if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ if (argument_count == 2) {
HValue* argument = Pop();
- Drop(1); // Receiver.
+ Drop(2); // Receiver and function.
HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
break;
case kMathPow:
- if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ if (argument_count == 3) {
HValue* right = Pop();
HValue* left = Pop();
- Pop(); // Pop receiver.
+ Drop(2); // Receiver and function.
HInstruction* result = NULL;
// Use sqrt() if exponent is 0.5 or -0.5.
if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
@@ -6869,23 +7568,12 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
return true;
}
break;
- case kMathRandom:
- if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
- Drop(1); // Receiver.
- HGlobalObject* global_object = Add<HGlobalObject>();
- HRandom* result = New<HRandom>(global_object);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
- }
- break;
case kMathMax:
case kMathMin:
- if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ if (argument_count == 3) {
HValue* right = Pop();
HValue* left = Pop();
- Drop(1); // Receiver.
+ Drop(2); // Receiver and function.
HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
: HMathMinMax::kMathMax;
HInstruction* result = NewUncasted<HMathMinMax>(left, right, op);
@@ -6894,16 +7582,110 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
}
break;
case kMathImul:
- if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ if (argument_count == 3) {
HValue* right = Pop();
HValue* left = Pop();
- Drop(1); // Receiver.
+ Drop(2); // Receiver and function.
HInstruction* result = HMul::NewImul(zone(), context(), left, right);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
+ case kArrayPop: {
+ if (receiver_map.is_null()) return false;
+ if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (!IsFastElementsKind(elements_kind)) return false;
+
+ Drop(expr->arguments()->length());
+ HValue* result;
+ HValue* reduced_length;
+ HValue* receiver = Pop();
+
+ HValue* checked_object = AddCheckMap(receiver, receiver_map);
+ HValue* length = Add<HLoadNamedField>(
+ checked_object, static_cast<HValue*>(NULL),
+ HObjectAccess::ForArrayLength(elements_kind));
+
+ Drop(1); // Function.
+
+ { NoObservableSideEffectsScope scope(this);
+ IfBuilder length_checker(this);
+
+ HValue* bounds_check = length_checker.If<HCompareNumericAndBranch>(
+ length, graph()->GetConstant0(), Token::EQ);
+ length_checker.Then();
+
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
+
+ length_checker.Else();
+ HValue* elements = AddLoadElements(checked_object);
+ // Ensure that we aren't popping from a copy-on-write array.
+ if (IsFastSmiOrObjectElementsKind(elements_kind)) {
+ elements = BuildCopyElementsOnWrite(checked_object, elements,
+ elements_kind, length);
+ }
+ reduced_length = AddUncasted<HSub>(length, graph()->GetConstant1());
+ result = AddElementAccess(elements, reduced_length, NULL,
+ bounds_check, elements_kind, LOAD);
+ Factory* factory = isolate()->factory();
+ double nan_double = FixedDoubleArray::hole_nan_as_double();
+ HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
+ ? Add<HConstant>(factory->the_hole_value())
+ : Add<HConstant>(nan_double);
+ if (IsFastSmiOrObjectElementsKind(elements_kind)) {
+ elements_kind = FAST_HOLEY_ELEMENTS;
+ }
+ AddElementAccess(
+ elements, reduced_length, hole, bounds_check, elements_kind, STORE);
+ Add<HStoreNamedField>(
+ checked_object, HObjectAccess::ForArrayLength(elements_kind),
+ reduced_length, STORE_TO_INITIALIZED_ENTRY);
+
+ if (!ast_context()->IsEffect()) Push(result);
+
+ length_checker.End();
+ }
+ result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ if (!ast_context()->IsEffect()) Drop(1);
+
+ ast_context()->ReturnValue(result);
+ return true;
+ }
+ case kArrayPush: {
+ if (receiver_map.is_null()) return false;
+ if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (!IsFastElementsKind(elements_kind)) return false;
+
+ HValue* op_vals[] = {
+ context(),
+ // Receiver.
+ environment()->ExpressionStackAt(expr->arguments()->length())
+ };
+
+ const int argc = expr->arguments()->length();
+ // Includes receiver.
+ PushArgumentsFromEnvironment(argc + 1);
+
+ CallInterfaceDescriptor* descriptor =
+ isolate()->call_descriptor(Isolate::CallHandler);
+
+ ArrayPushStub stub(receiver_map->elements_kind(), argc);
+ Handle<Code> code = stub.GetCode(isolate());
+ HConstant* code_value = Add<HConstant>(code);
+
+ ASSERT((sizeof(op_vals) / kPointerSize) ==
+ descriptor->environment_length());
+
+ HInstruction* call = New<HCallWithDescriptor>(
+ code_value, argc + 1, descriptor,
+ Vector<HValue*>(op_vals, descriptor->environment_length()));
+ Drop(1); // Drop function.
+ ast_context()->ReturnInstruction(call, expr->id());
+ return true;
+ }
default:
// Not yet supported for inlining.
break;
@@ -6912,12 +7694,188 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
}
+bool HOptimizedGraphBuilder::TryInlineApiFunctionCall(Call* expr,
+ HValue* receiver) {
+ Handle<JSFunction> function = expr->target();
+ int argc = expr->arguments()->length();
+ SmallMapList receiver_maps;
+ return TryInlineApiCall(function,
+ receiver,
+ &receiver_maps,
+ argc,
+ expr->id(),
+ kCallApiFunction);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiMethodCall(
+ Call* expr,
+ HValue* receiver,
+ SmallMapList* receiver_maps) {
+ Handle<JSFunction> function = expr->target();
+ int argc = expr->arguments()->length();
+ return TryInlineApiCall(function,
+ receiver,
+ receiver_maps,
+ argc,
+ expr->id(),
+ kCallApiMethod);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<JSFunction> function,
+ Handle<Map> receiver_map,
+ BailoutId ast_id) {
+ SmallMapList receiver_maps(1, zone());
+ receiver_maps.Add(receiver_map, zone());
+ return TryInlineApiCall(function,
+ NULL, // Receiver is on expression stack.
+ &receiver_maps,
+ 0,
+ ast_id,
+ kCallApiGetter);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<JSFunction> function,
+ Handle<Map> receiver_map,
+ BailoutId ast_id) {
+ SmallMapList receiver_maps(1, zone());
+ receiver_maps.Add(receiver_map, zone());
+ return TryInlineApiCall(function,
+ NULL, // Receiver is on expression stack.
+ &receiver_maps,
+ 1,
+ ast_id,
+ kCallApiSetter);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
+ HValue* receiver,
+ SmallMapList* receiver_maps,
+ int argc,
+ BailoutId ast_id,
+ ApiCallType call_type) {
+ CallOptimization optimization(function);
+ if (!optimization.is_simple_api_call()) return false;
+ Handle<Map> holder_map;
+ if (call_type == kCallApiFunction) {
+ // Cannot embed a direct reference to the global proxy map
+ // as it maybe dropped on deserialization.
+ CHECK(!Serializer::enabled());
+ ASSERT_EQ(0, receiver_maps->length());
+ receiver_maps->Add(handle(
+ function->context()->global_object()->global_receiver()->map()),
+ zone());
+ }
+ CallOptimization::HolderLookup holder_lookup =
+ CallOptimization::kHolderNotFound;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_maps->first(), &holder_lookup);
+ if (holder_lookup == CallOptimization::kHolderNotFound) return false;
+
+ if (FLAG_trace_inlining) {
+ PrintF("Inlining api function ");
+ function->ShortPrint();
+ PrintF("\n");
+ }
+
+ bool drop_extra = false;
+ bool is_store = false;
+ switch (call_type) {
+ case kCallApiFunction:
+ case kCallApiMethod:
+ // Need to check that none of the receiver maps could have changed.
+ Add<HCheckMaps>(receiver, receiver_maps, top_info());
+ // Need to ensure the chain between receiver and api_holder is intact.
+ if (holder_lookup == CallOptimization::kHolderFound) {
+ AddCheckPrototypeMaps(api_holder, receiver_maps->first());
+ } else {
+ ASSERT_EQ(holder_lookup, CallOptimization::kHolderIsReceiver);
+ }
+ // Includes receiver.
+ PushArgumentsFromEnvironment(argc + 1);
+ // Drop function after call.
+ drop_extra = true;
+ break;
+ case kCallApiGetter:
+ // Receiver and prototype chain cannot have changed.
+ ASSERT_EQ(0, argc);
+ ASSERT_EQ(NULL, receiver);
+ // Receiver is on expression stack.
+ receiver = Pop();
+ Add<HPushArgument>(receiver);
+ break;
+ case kCallApiSetter:
+ {
+ is_store = true;
+ // Receiver and prototype chain cannot have changed.
+ ASSERT_EQ(1, argc);
+ ASSERT_EQ(NULL, receiver);
+ // Receiver and value are on expression stack.
+ HValue* value = Pop();
+ receiver = Pop();
+ Add<HPushArgument>(receiver);
+ Add<HPushArgument>(value);
+ break;
+ }
+ }
+
+ HValue* holder = NULL;
+ switch (holder_lookup) {
+ case CallOptimization::kHolderFound:
+ holder = Add<HConstant>(api_holder);
+ break;
+ case CallOptimization::kHolderIsReceiver:
+ holder = receiver;
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate());
+ bool call_data_is_undefined = call_data_obj->IsUndefined();
+ HValue* call_data = Add<HConstant>(call_data_obj);
+ ApiFunction fun(v8::ToCData<Address>(api_call_info->callback()));
+ ExternalReference ref = ExternalReference(&fun,
+ ExternalReference::DIRECT_API_CALL,
+ isolate());
+ HValue* api_function_address = Add<HConstant>(ExternalReference(ref));
+
+ HValue* op_vals[] = {
+ Add<HConstant>(function),
+ call_data,
+ holder,
+ api_function_address,
+ context()
+ };
+
+ CallInterfaceDescriptor* descriptor =
+ isolate()->call_descriptor(Isolate::ApiFunctionCall);
+
+ CallApiFunctionStub stub(is_store, call_data_is_undefined, argc);
+ Handle<Code> code = stub.GetCode(isolate());
+ HConstant* code_value = Add<HConstant>(code);
+
+ ASSERT((sizeof(op_vals) / kPointerSize) ==
+ descriptor->environment_length());
+
+ HInstruction* call = New<HCallWithDescriptor>(
+ code_value, argc + 1, descriptor,
+ Vector<HValue*>(op_vals, descriptor->environment_length()));
+
+ if (drop_extra) Drop(1); // Drop function.
+ ast_context()->ReturnInstruction(call, ast_id);
+ return true;
+}
+
+
bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
- Expression* callee = expr->expression();
- Property* prop = callee->AsProperty();
- ASSERT(prop != NULL);
+ ASSERT(expr->expression()->IsProperty());
- if (!expr->IsMonomorphic() || expr->check_type() != RECEIVER_MAP_CHECK) {
+ if (!expr->IsMonomorphic()) {
return false;
}
Handle<Map> function_map = expr->GetReceiverTypes()->first();
@@ -6938,13 +7896,10 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
// Found pattern f.apply(receiver, arguments).
- CHECK_ALIVE_OR_RETURN(VisitForValue(prop->obj()), true);
- HValue* function = Top();
- AddCheckConstantFunction(expr->holder(), function, function_map);
- Drop(1);
-
CHECK_ALIVE_OR_RETURN(VisitForValue(args->at(0)), true);
- HValue* receiver = Pop();
+ HValue* receiver = Pop(); // receiver
+ HValue* function = Pop(); // f
+ Drop(1); // apply
if (function_state()->outer() == NULL) {
HInstruction* elements = Add<HArgumentsElements>(false);
@@ -6964,36 +7919,46 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
HArgumentsObject* args = function_state()->entry()->arguments_object();
const ZoneList<HValue*>* arguments_values = args->arguments_values();
int arguments_count = arguments_values->length();
+ Push(function);
Push(BuildWrapReceiver(receiver, function));
for (int i = 1; i < arguments_count; i++) {
Push(arguments_values->at(i));
}
Handle<JSFunction> known_function;
- if (function->IsConstant()) {
- HConstant* constant_function = HConstant::cast(function);
+ if (function->IsConstant() &&
+ HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
known_function = Handle<JSFunction>::cast(
- constant_function->handle(isolate()));
+ HConstant::cast(function)->handle(isolate()));
int args_count = arguments_count - 1; // Excluding receiver.
if (TryInlineApply(known_function, expr, args_count)) return true;
}
- Drop(arguments_count - 1);
- Push(Add<HPushArgument>(Pop()));
- for (int i = 1; i < arguments_count; i++) {
- Push(Add<HPushArgument>(arguments_values->at(i)));
- }
-
- HInvokeFunction* call = New<HInvokeFunction>(function,
- known_function,
- arguments_count);
- Drop(arguments_count);
+ PushArgumentsFromEnvironment(arguments_count);
+ HInvokeFunction* call = New<HInvokeFunction>(
+ function, known_function, arguments_count);
+ Drop(1); // Function.
ast_context()->ReturnInstruction(call, expr->id());
return true;
}
}
+HValue* HOptimizedGraphBuilder::ImplicitReceiverFor(HValue* function,
+ Handle<JSFunction> target) {
+ SharedFunctionInfo* shared = target->shared();
+ if (shared->is_classic_mode() && !shared->native()) {
+ // Cannot embed a direct reference to the global proxy
+ // as is it dropped on deserialization.
+ CHECK(!Serializer::enabled());
+ Handle<JSObject> global_receiver(
+ target->context()->global_object()->global_receiver());
+ return Add<HConstant>(global_receiver);
+ }
+ return graph()->GetConstantUndefined();
+}
+
+
void HOptimizedGraphBuilder::VisitCall(Call* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -7004,73 +7969,80 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
Property* prop = callee->AsProperty();
if (prop != NULL) {
- if (!prop->key()->IsPropertyName()) {
- // Keyed function call.
- CHECK_ALIVE(VisitArgument(prop->obj()));
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ HValue* receiver = Top();
- CHECK_ALIVE(VisitForValue(prop->key()));
- // Push receiver and key like the non-optimized code generator expects it.
- HValue* key = Pop();
- HValue* receiver = Pop();
- Push(key);
- Push(receiver);
+ SmallMapList* types;
+ ComputeReceiverTypes(expr, receiver, &types, zone());
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+ if (prop->key()->IsPropertyName() && types->length() > 0) {
+ Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+ PropertyAccessInfo info(this, LOAD, ToType(types->first()), name);
+ if (!info.CanAccessAsMonomorphic(types)) {
+ HandlePolymorphicCallNamed(expr, receiver, types, name);
+ return;
+ }
+ }
- call = New<HCallKeyed>(key, argument_count);
- Drop(argument_count + 1); // 1 is the key.
- return ast_context()->ReturnInstruction(call, expr->id());
+ HValue* key = NULL;
+ if (!prop->key()->IsPropertyName()) {
+ CHECK_ALIVE(VisitForValue(prop->key()));
+ key = Pop();
}
- // Named function call.
- if (TryCallApply(expr)) return;
+ CHECK_ALIVE(PushLoad(prop, receiver, key));
+ HValue* function = Pop();
- CHECK_ALIVE(VisitForValue(prop->obj()));
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- HValue* receiver =
- environment()->ExpressionStackAt(expr->arguments()->length());
+ // Push the function under the receiver.
+ environment()->SetExpressionStackAt(0, function);
- SmallMapList* types;
- bool was_monomorphic = expr->IsMonomorphic();
- bool monomorphic = ComputeReceiverTypes(expr, receiver, &types);
- if (!was_monomorphic && monomorphic) {
- monomorphic = expr->ComputeTarget(types->first(), name);
- }
+ Push(receiver);
- if (monomorphic) {
- Handle<Map> map = types->first();
- if (TryInlineBuiltinMethodCall(expr, receiver, map, expr->check_type())) {
+ if (function->IsConstant() &&
+ HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
+ Handle<JSFunction> known_function = Handle<JSFunction>::cast(
+ HConstant::cast(function)->handle(isolate()));
+ expr->set_target(known_function);
+
+ if (TryCallApply(expr)) return;
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
+ Handle<Map> map = types->length() == 1 ? types->first() : Handle<Map>();
+ if (TryInlineBuiltinMethodCall(expr, receiver, map)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
- expr->target()->ShortPrint();
+ known_function->ShortPrint();
PrintF("\n");
}
return;
}
-
- if (CallStubCompiler::HasCustomCallGenerator(expr->target()) ||
- expr->check_type() != RECEIVER_MAP_CHECK) {
- // When the target has a custom call IC generator, use the IC,
- // because it is likely to generate better code. Also use the IC
- // when a primitive receiver check is required.
- call = PreProcessCall(New<HCallNamed>(name, argument_count));
+ if (TryInlineApiMethodCall(expr, receiver, types)) return;
+
+ // Wrap the receiver if necessary.
+ if (NeedsWrappingFor(ToType(types->first()), known_function)) {
+ // Since HWrapReceiver currently cannot actually wrap numbers and
+ // strings, use the regular CallFunctionStub for method calls to wrap
+ // the receiver.
+ // TODO(verwaest): Support creation of value wrappers directly in
+ // HWrapReceiver.
+ call = New<HCallFunction>(
+ function, argument_count, WRAP_AND_CALL);
+ } else if (TryInlineCall(expr)) {
+ return;
} else {
- AddCheckConstantFunction(expr->holder(), receiver, map);
-
- if (TryInlineCall(expr)) return;
- call = PreProcessCall(
- New<HCallConstantFunction>(expr->target(), argument_count));
+ call = BuildCallConstantFunction(known_function, argument_count);
}
- } else if (types != NULL && types->length() > 1) {
- ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
- HandlePolymorphicCallNamed(expr, receiver, types, name);
- return;
} else {
- call = PreProcessCall(New<HCallNamed>(name, argument_count));
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ CallFunctionFlags flags = receiver->type().IsJSObject()
+ ? NO_CALL_FUNCTION_FLAGS : CALL_AS_METHOD;
+ call = New<HCallFunction>(function, argument_count, flags);
}
+ PushArgumentsFromEnvironment(argument_count);
+
} else {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
@@ -7085,32 +8057,27 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// access check is not enabled we assume that the function will not change
// and generate optimized code for calling the function.
LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, LOAD);
if (type == kUseCell &&
!current_info()->global_object()->IsAccessCheckNeeded()) {
Handle<GlobalObject> global(current_info()->global_object());
known_global_function = expr->ComputeGlobalTarget(global, &lookup);
}
+ CHECK_ALIVE(VisitForValue(expr->expression()));
+ HValue* function = Top();
if (known_global_function) {
- // Push the global object instead of the global receiver because
- // code generated by the full code generator expects it.
- HGlobalObject* global_object = Add<HGlobalObject>();
- Push(global_object);
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
-
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Pop();
Add<HCheckValue>(function, expr->target());
- // Replace the global object with the global receiver.
- HGlobalReceiver* global_receiver = Add<HGlobalReceiver>(global_object);
- // Index of the receiver from the top of the expression stack.
+ // Placeholder for the receiver.
+ Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
+ // Patch the global object on the stack by the expected receiver.
+ HValue* receiver = ImplicitReceiverFor(function, expr->target());
const int receiver_index = argument_count - 1;
- ASSERT(environment()->ExpressionStackAt(receiver_index)->
- IsGlobalObject());
- environment()->SetExpressionStackAt(receiver_index, global_receiver);
+ environment()->SetExpressionStackAt(receiver_index, receiver);
- if (TryInlineBuiltinFunctionCall(expr, false)) { // Nothing to drop.
+ if (TryInlineBuiltinFunctionCall(expr)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
expr->target()->ShortPrint();
@@ -7118,26 +8085,15 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
}
return;
}
+ if (TryInlineApiFunctionCall(expr, receiver)) return;
if (TryInlineCall(expr)) return;
- if (expr->target().is_identical_to(current_info()->closure())) {
- graph()->MarkRecursive();
- }
-
- if (CallStubCompiler::HasCustomCallGenerator(expr->target())) {
- // When the target has a custom call IC generator, use the IC,
- // because it is likely to generate better code.
- call = PreProcessCall(New<HCallNamed>(var->name(), argument_count));
- } else {
- call = PreProcessCall(New<HCallKnownGlobal>(
- expr->target(), argument_count));
- }
+ PushArgumentsFromEnvironment(argument_count);
+ call = BuildCallConstantFunction(expr->target(), argument_count);
} else {
- HGlobalObject* receiver = Add<HGlobalObject>();
- Push(Add<HPushArgument>(receiver));
+ Push(Add<HPushArgument>(graph()->GetConstantUndefined()));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
-
- call = New<HCallGlobal>(var->name(), argument_count);
+ call = New<HCallFunction>(function, argument_count);
Drop(argument_count);
}
@@ -7146,13 +8102,17 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// evaluation of the arguments.
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
- HGlobalObject* global = Add<HGlobalObject>();
- HGlobalReceiver* receiver = Add<HGlobalReceiver>(global);
- Push(receiver);
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
Add<HCheckValue>(function, expr->target());
- if (TryInlineBuiltinFunctionCall(expr, true)) { // Drop the function.
+ Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
+ HValue* receiver = ImplicitReceiverFor(function, expr->target());
+ const int receiver_index = argument_count - 1;
+ environment()->SetExpressionStackAt(receiver_index, receiver);
+
+ if (TryInlineBuiltinFunctionCall(expr)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
expr->target()->ShortPrint();
@@ -7160,32 +8120,95 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
}
return;
}
+ if (TryInlineApiFunctionCall(expr, receiver)) return;
- if (TryInlineCall(expr, true)) { // Drop function from environment.
- return;
- } else {
- call = PreProcessCall(New<HInvokeFunction>(function, expr->target(),
- argument_count));
- Drop(1); // The function.
- }
+ if (TryInlineCall(expr)) return;
+
+ call = PreProcessCall(New<HInvokeFunction>(
+ function, expr->target(), argument_count));
} else {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
- HGlobalObject* global_object = Add<HGlobalObject>();
- HGlobalReceiver* receiver = Add<HGlobalReceiver>(global_object);
+ HValue* receiver = graph()->GetConstantUndefined();
Push(Add<HPushArgument>(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
-
call = New<HCallFunction>(function, argument_count);
- Drop(argument_count + 1);
+ Drop(argument_count);
}
}
+ Drop(1); // Drop the function.
return ast_context()->ReturnInstruction(call, expr->id());
}
+void HOptimizedGraphBuilder::BuildInlinedCallNewArray(CallNew* expr) {
+ NoObservableSideEffectsScope no_effects(this);
+
+ int argument_count = expr->arguments()->length();
+ // We should at least have the constructor on the expression stack.
+ HValue* constructor = environment()->ExpressionStackAt(argument_count);
+
+ ElementsKind kind = expr->elements_kind();
+ Handle<AllocationSite> site = expr->allocation_site();
+ ASSERT(!site.is_null());
+
+ // Register on the site for deoptimization if the transition feedback changes.
+ AllocationSite::AddDependentCompilationInfo(
+ site, AllocationSite::TRANSITIONS, top_info());
+ HInstruction* site_instruction = Add<HConstant>(site);
+
+ // In the single constant argument case, we may have to adjust elements kind
+ // to avoid creating a packed non-empty array.
+ if (argument_count == 1 && !IsHoleyElementsKind(kind)) {
+ HValue* argument = environment()->Top();
+ if (argument->IsConstant()) {
+ HConstant* constant_argument = HConstant::cast(argument);
+ ASSERT(constant_argument->HasSmiValue());
+ int constant_array_size = constant_argument->Integer32Value();
+ if (constant_array_size != 0) {
+ kind = GetHoleyElementsKind(kind);
+ }
+ }
+ }
+
+ // Build the array.
+ JSArrayBuilder array_builder(this,
+ kind,
+ site_instruction,
+ constructor,
+ DISABLE_ALLOCATION_SITES);
+ HValue* new_object;
+ if (argument_count == 0) {
+ new_object = array_builder.AllocateEmptyArray();
+ } else if (argument_count == 1) {
+ HValue* argument = environment()->Top();
+ new_object = BuildAllocateArrayFromLength(&array_builder, argument);
+ } else {
+ HValue* length = Add<HConstant>(argument_count);
+ // Smi arrays need to initialize array elements with the hole because
+ // bailout could occur if the arguments don't fit in a smi.
+ //
+ // TODO(mvstanton): If all the arguments are constants in smi range, then
+ // we could set fill_with_hole to false and save a few instructions.
+ JSArrayBuilder::FillMode fill_mode = IsFastSmiElementsKind(kind)
+ ? JSArrayBuilder::FILL_WITH_HOLE
+ : JSArrayBuilder::DONT_FILL_WITH_HOLE;
+ new_object = array_builder.AllocateArray(length, length, fill_mode);
+ HValue* elements = array_builder.GetElementsLocation();
+ for (int i = 0; i < argument_count; i++) {
+ HValue* value = environment()->ExpressionStackAt(argument_count - i - 1);
+ HValue* constant_i = Add<HConstant>(i);
+ Add<HStoreKeyed>(elements, constant_i, value, kind);
+ }
+ }
+
+ Drop(argument_count + 1); // drop constructor and args.
+ ast_context()->ReturnValue(new_object);
+}
+
+
// Checks whether allocation using the given constructor can be inlined.
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
@@ -7195,22 +8218,68 @@ static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
}
+bool HOptimizedGraphBuilder::IsCallNewArrayInlineable(CallNew* expr) {
+ bool inline_ok = false;
+ Handle<JSFunction> caller = current_info()->closure();
+ Handle<JSFunction> target(isolate()->global_context()->array_function(),
+ isolate());
+ int argument_count = expr->arguments()->length();
+ // We should have the function plus array arguments on the environment stack.
+ ASSERT(environment()->length() >= (argument_count + 1));
+ Handle<AllocationSite> site = expr->allocation_site();
+ ASSERT(!site.is_null());
+
+ if (site->CanInlineCall()) {
+ // We also want to avoid inlining in certain 1 argument scenarios.
+ if (argument_count == 1) {
+ HValue* argument = Top();
+ if (argument->IsConstant()) {
+ // Do not inline if the constant length argument is not a smi or
+ // outside the valid range for a fast array.
+ HConstant* constant_argument = HConstant::cast(argument);
+ if (constant_argument->HasSmiValue()) {
+ int value = constant_argument->Integer32Value();
+ inline_ok = value >= 0 &&
+ value < JSObject::kInitialMaxFastElementArray;
+ if (!inline_ok) {
+ TraceInline(target, caller,
+ "Length outside of valid array range");
+ }
+ }
+ } else {
+ inline_ok = true;
+ }
+ } else {
+ inline_ok = true;
+ }
+ } else {
+ TraceInline(target, caller, "AllocationSite requested no inlining.");
+ }
+
+ if (inline_ok) {
+ TraceInline(target, caller, NULL);
+ }
+ return inline_ok;
+}
+
+
void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
Factory* factory = isolate()->factory();
+ // The constructor function is on the stack in the unoptimized code
+ // during evaluation of the arguments.
+ CHECK_ALIVE(VisitForValue(expr->expression()));
+ HValue* function = Top();
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
if (FLAG_inline_construct &&
expr->IsMonomorphic() &&
IsAllocationInlineable(expr->target())) {
- // The constructor function is on the stack in the unoptimized code
- // during evaluation of the arguments.
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<JSFunction> constructor = expr->target();
HValue* check = Add<HCheckValue>(function, constructor);
@@ -7229,9 +8298,8 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
// Allocate an instance of the implicit receiver object.
HValue* size_in_bytes = Add<HConstant>(instance_size);
PretenureFlag pretenure_flag =
- (FLAG_pretenuring_call_new &&
- isolate()->heap()->GetPretenureMode() == TENURED)
- ? TENURED : NOT_TENURED;
+ (FLAG_pretenuring_call_new && !FLAG_allocation_site_pretenuring) ?
+ isolate()->heap()->GetPretenureMode() : NOT_TENURED;
HAllocate* receiver =
Add<HAllocate>(size_in_bytes, HType::JSObject(), pretenure_flag,
JS_OBJECT_TYPE);
@@ -7240,28 +8308,32 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
// Load the initial map from the constructor.
HValue* constructor_value = Add<HConstant>(constructor);
HValue* initial_map_value =
- Add<HLoadNamedField>(constructor_value, HObjectAccess::ForJSObjectOffset(
- JSFunction::kPrototypeOrInitialMapOffset));
+ Add<HLoadNamedField>(constructor_value, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMapAndOffset(
+ handle(constructor->map()),
+ JSFunction::kPrototypeOrInitialMapOffset));
// Initialize map and fields of the newly allocated object.
{ NoObservableSideEffectsScope no_effects(this);
ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
Add<HStoreNamedField>(receiver,
- HObjectAccess::ForJSObjectOffset(JSObject::kMapOffset),
+ HObjectAccess::ForMapAndOffset(initial_map, JSObject::kMapOffset),
initial_map_value);
HValue* empty_fixed_array = Add<HConstant>(factory->empty_fixed_array());
Add<HStoreNamedField>(receiver,
- HObjectAccess::ForJSObjectOffset(JSObject::kPropertiesOffset),
+ HObjectAccess::ForMapAndOffset(initial_map,
+ JSObject::kPropertiesOffset),
empty_fixed_array);
Add<HStoreNamedField>(receiver,
- HObjectAccess::ForJSObjectOffset(JSObject::kElementsOffset),
+ HObjectAccess::ForMapAndOffset(initial_map,
+ JSObject::kElementsOffset),
empty_fixed_array);
if (initial_map->inobject_properties() != 0) {
HConstant* undefined = graph()->GetConstantUndefined();
for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+ int property_offset = initial_map->GetInObjectPropertyOffset(i);
Add<HStoreNamedField>(receiver,
- HObjectAccess::ForJSObjectOffset(property_offset),
+ HObjectAccess::ForMapAndOffset(initial_map, property_offset),
undefined);
}
}
@@ -7297,19 +8369,23 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
// argument to the construct call.
Handle<JSFunction> array_function(
isolate()->global_context()->array_function(), isolate());
- CHECK_ALIVE(VisitArgument(expr->expression()));
- HValue* constructor = HPushArgument::cast(Top())->argument();
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+ bool use_call_new_array = expr->target().is_identical_to(array_function);
+ if (use_call_new_array && IsCallNewArrayInlineable(expr)) {
+ // Verify we are still calling the array function for our native context.
+ Add<HCheckValue>(function, array_function);
+ BuildInlinedCallNewArray(expr);
+ return;
+ }
+
HBinaryCall* call;
- if (expr->target().is_identical_to(array_function)) {
- Handle<Cell> cell = expr->allocation_info_cell();
- Add<HCheckValue>(constructor, array_function);
- call = New<HCallNewArray>(constructor, argument_count,
- cell, expr->elements_kind());
+ if (use_call_new_array) {
+ Add<HCheckValue>(function, array_function);
+ call = New<HCallNewArray>(function, argument_count,
+ expr->elements_kind());
} else {
- call = New<HCallNew>(constructor, argument_count);
+ call = New<HCallNew>(function, argument_count);
}
- Drop(argument_count);
+ PreProcessCall(call);
return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -7331,6 +8407,186 @@ const HOptimizedGraphBuilder::InlineFunctionGenerator
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
+template <class ViewClass>
+void HGraphBuilder::BuildArrayBufferViewInitialization(
+ HValue* obj,
+ HValue* buffer,
+ HValue* byte_offset,
+ HValue* byte_length) {
+
+ for (int offset = ViewClass::kSize;
+ offset < ViewClass::kSizeWithInternalFields;
+ offset += kPointerSize) {
+ Add<HStoreNamedField>(obj,
+ HObjectAccess::ForObservableJSObjectOffset(offset),
+ graph()->GetConstant0());
+ }
+
+ Add<HStoreNamedField>(
+ obj,
+ HObjectAccess::ForJSArrayBufferViewBuffer(), buffer);
+ Add<HStoreNamedField>(
+ obj,
+ HObjectAccess::ForJSArrayBufferViewByteOffset(),
+ byte_offset);
+ Add<HStoreNamedField>(
+ obj,
+ HObjectAccess::ForJSArrayBufferViewByteLength(),
+ byte_length);
+
+ HObjectAccess weak_first_view_access =
+ HObjectAccess::ForJSArrayBufferWeakFirstView();
+ Add<HStoreNamedField>(obj,
+ HObjectAccess::ForJSArrayBufferViewWeakNext(),
+ Add<HLoadNamedField>(buffer, static_cast<HValue*>(NULL),
+ weak_first_view_access));
+ Add<HStoreNamedField>(
+ buffer, weak_first_view_access, obj);
+}
+
+
+void HOptimizedGraphBuilder::VisitDataViewInitialize(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* arguments = expr->arguments();
+
+ NoObservableSideEffectsScope scope(this);
+ ASSERT(arguments->length()== 4);
+ CHECK_ALIVE(VisitForValue(arguments->at(0)));
+ HValue* obj = Pop();
+
+ CHECK_ALIVE(VisitForValue(arguments->at(1)));
+ HValue* buffer = Pop();
+
+ CHECK_ALIVE(VisitForValue(arguments->at(2)));
+ HValue* byte_offset = Pop();
+
+ CHECK_ALIVE(VisitForValue(arguments->at(3)));
+ HValue* byte_length = Pop();
+
+ BuildArrayBufferViewInitialization<JSDataView>(
+ obj, buffer, byte_offset, byte_length);
+}
+
+
+void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* arguments = expr->arguments();
+
+ NoObservableSideEffectsScope scope(this);
+ static const int kObjectArg = 0;
+ static const int kArrayIdArg = 1;
+ static const int kBufferArg = 2;
+ static const int kByteOffsetArg = 3;
+ static const int kByteLengthArg = 4;
+ static const int kArgsLength = 5;
+ ASSERT(arguments->length() == kArgsLength);
+
+
+ CHECK_ALIVE(VisitForValue(arguments->at(kObjectArg)));
+ HValue* obj = Pop();
+
+ ASSERT(arguments->at(kArrayIdArg)->node_type() == AstNode::kLiteral);
+ Handle<Object> value =
+ static_cast<Literal*>(arguments->at(kArrayIdArg))->value();
+ ASSERT(value->IsSmi());
+ int array_id = Smi::cast(*value)->value();
+
+ CHECK_ALIVE(VisitForValue(arguments->at(kBufferArg)));
+ HValue* buffer = Pop();
+
+ HValue* byte_offset;
+ bool is_zero_byte_offset;
+
+ if (arguments->at(kByteOffsetArg)->node_type() == AstNode::kLiteral
+ && Smi::FromInt(0) ==
+ *static_cast<Literal*>(arguments->at(kByteOffsetArg))->value()) {
+ byte_offset = Add<HConstant>(static_cast<int32_t>(0));
+ is_zero_byte_offset = true;
+ } else {
+ CHECK_ALIVE(VisitForValue(arguments->at(kByteOffsetArg)));
+ byte_offset = Pop();
+ is_zero_byte_offset = false;
+ }
+
+ CHECK_ALIVE(VisitForValue(arguments->at(kByteLengthArg)));
+ HValue* byte_length = Pop();
+
+ IfBuilder byte_offset_smi(this);
+
+ if (!is_zero_byte_offset) {
+ byte_offset_smi.If<HIsSmiAndBranch>(byte_offset);
+ byte_offset_smi.Then();
+ }
+
+ { // byte_offset is Smi.
+ BuildArrayBufferViewInitialization<JSTypedArray>(
+ obj, buffer, byte_offset, byte_length);
+
+ ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
+ size_t element_size = 1; // Bogus initialization.
+ Runtime::ArrayIdToTypeAndSize(array_id, &array_type, &element_size);
+
+ HInstruction* length = AddUncasted<HDiv>(byte_length,
+ Add<HConstant>(static_cast<int32_t>(element_size)));
+
+ Add<HStoreNamedField>(obj,
+ HObjectAccess::ForJSTypedArrayLength(),
+ length);
+
+ Handle<Map> external_array_map(
+ isolate()->heap()->MapForExternalArrayType(array_type));
+
+ HValue* elements =
+ Add<HAllocate>(
+ Add<HConstant>(ExternalArray::kAlignedSize),
+ HType::JSArray(),
+ NOT_TENURED,
+ external_array_map->instance_type());
+
+ AddStoreMapConstant(elements, external_array_map);
+
+ HValue* backing_store = Add<HLoadNamedField>(
+ buffer, static_cast<HValue*>(NULL),
+ HObjectAccess::ForJSArrayBufferBackingStore());
+
+ HValue* typed_array_start;
+ if (is_zero_byte_offset) {
+ typed_array_start = backing_store;
+ } else {
+ HInstruction* external_pointer =
+ AddUncasted<HAdd>(backing_store, byte_offset);
+ // Arguments are checked prior to call to TypedArrayInitialize,
+ // including byte_offset.
+ external_pointer->ClearFlag(HValue::kCanOverflow);
+ typed_array_start = external_pointer;
+ }
+
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForExternalArrayExternalPointer(),
+ typed_array_start);
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForFixedArrayLength(),
+ length);
+ Add<HStoreNamedField>(
+ obj, HObjectAccess::ForElementsPointer(), elements);
+ }
+
+ if (!is_zero_byte_offset) {
+ byte_offset_smi.Else();
+ { // byte_offset is not Smi.
+ Push(Add<HPushArgument>(obj));
+ VisitArgument(arguments->at(kArrayIdArg));
+ Push(Add<HPushArgument>(buffer));
+ Push(Add<HPushArgument>(byte_offset));
+ Push(Add<HPushArgument>(byte_length));
+ Add<HCallRuntime>(expr->name(), expr->function(), kArgsLength);
+ Drop(kArgsLength);
+ }
+ }
+ byte_offset_smi.End();
+}
+
+
void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -7341,6 +8597,21 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
const Runtime::Function* function = expr->function();
ASSERT(function != NULL);
+
+ if (function->function_id == Runtime::kDataViewInitialize) {
+ return VisitDataViewInitialize(expr);
+ }
+
+ if (function->function_id == Runtime::kTypedArrayInitialize) {
+ return VisitTypedArrayInitialize(expr);
+ }
+
+ if (function->function_id == Runtime::kMaxSmi) {
+ ASSERT(expr->arguments()->length() == 0);
+ HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue));
+ return ast_context()->ReturnInstruction(max_smi, expr->id());
+ }
+
if (function->intrinsic_type == Runtime::INLINE) {
ASSERT(expr->name()->length() > 0);
ASSERT(expr->name()->Get(0) == '_');
@@ -7484,8 +8755,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
bool returns_original_input,
CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
- Handle<Type> info = expr->type();
- Representation rep = Representation::FromType(info);
+ Representation rep = Representation::FromType(expr->type());
if (rep.IsNone() || rep.IsTagged()) {
rep = Representation::Smi();
}
@@ -7495,7 +8765,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
// actual HChange instruction we need is (sometimes) added in a later
// phase, so it is not available now to be used as an input to HAdd and
// as the return value.
- HInstruction* number_input = Add<HForceRepresentation>(Pop(), rep);
+ HInstruction* number_input = AddUncasted<HForceRepresentation>(Pop(), rep);
if (!rep.IsDouble()) {
number_input->SetFlag(HInstruction::kFlexibleRepresentation);
number_input->SetFlag(HInstruction::kCannotBeTagged);
@@ -7540,7 +8810,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
Expression* target = expr->expression();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
@@ -7664,13 +8934,9 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
return New<HConstant>(s->Get(i));
}
}
- BuildCheckHeapObject(string);
- HValue* checkstring =
- Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING);
- HInstruction* length = BuildLoadStringLength(string, checkstring);
- AddInstruction(length);
- HInstruction* checked_index = Add<HBoundsCheck>(index, length);
- return New<HStringCharCodeAt>(string, checked_index);
+ string = BuildCheckString(string);
+ index = Add<HBoundsCheck>(index, AddLoadStringLength(string));
+ return New<HStringCharCodeAt>(string, index);
}
@@ -7740,23 +9006,24 @@ bool CanBeZero(HValue* right) {
HValue* HGraphBuilder::EnforceNumberType(HValue* number,
- Handle<Type> expected) {
+ Type* expected) {
if (expected->Is(Type::Smi())) {
- return Add<HForceRepresentation>(number, Representation::Smi());
+ return AddUncasted<HForceRepresentation>(number, Representation::Smi());
}
if (expected->Is(Type::Signed32())) {
- return Add<HForceRepresentation>(number, Representation::Integer32());
+ return AddUncasted<HForceRepresentation>(number,
+ Representation::Integer32());
}
return number;
}
-HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
+HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
Maybe<HConstant*> number = constant->CopyToTruncatedNumber(zone());
if (number.has_value) {
- *expected = handle(Type::Number(), isolate());
+ *expected = Type::Number(zone());
return AddInstruction(number.value);
}
}
@@ -7766,25 +9033,24 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
// pushes with a NoObservableSideEffectsScope.
NoObservableSideEffectsScope no_effects(this);
- Handle<Type> expected_type = *expected;
+ Type* expected_type = *expected;
// Separate the number type from the rest.
- Handle<Type> expected_obj = handle(Type::Intersect(
- expected_type, handle(Type::NonNumber(), isolate())), isolate());
- Handle<Type> expected_number = handle(Type::Intersect(
- expected_type, handle(Type::Number(), isolate())), isolate());
+ Type* expected_obj =
+ Type::Intersect(expected_type, Type::NonNumber(zone()), zone());
+ Type* expected_number =
+ Type::Intersect(expected_type, Type::Number(zone()), zone());
// We expect to get a number.
// (We need to check first, since Type::None->Is(Type::Any()) == true.
if (expected_obj->Is(Type::None())) {
- ASSERT(!expected_number->Is(Type::None()));
+ ASSERT(!expected_number->Is(Type::None(zone())));
return value;
}
- if (expected_obj->Is(Type::Undefined())) {
+ if (expected_obj->Is(Type::Undefined(zone()))) {
// This is already done by HChange.
- *expected = handle(Type::Union(
- expected_number, handle(Type::Double(), isolate())), isolate());
+ *expected = Type::Union(expected_number, Type::Double(zone()), zone());
return value;
}
@@ -7792,29 +9058,55 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
}
-HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
+HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
BinaryOperation* expr,
HValue* left,
- HValue* right) {
- Handle<Type> left_type = expr->left()->bounds().lower;
- Handle<Type> right_type = expr->right()->bounds().lower;
- Handle<Type> result_type = expr->bounds().lower;
+ HValue* right,
+ PushBeforeSimulateBehavior push_sim_result) {
+ Type* left_type = expr->left()->bounds().lower;
+ Type* right_type = expr->right()->bounds().lower;
+ Type* result_type = expr->bounds().lower;
Maybe<int> fixed_right_arg = expr->fixed_right_arg();
-
- return HGraphBuilder::BuildBinaryOperation(expr->op(), left, right,
- left_type, right_type, result_type, fixed_right_arg);
+ Handle<AllocationSite> allocation_site = expr->allocation_site();
+
+ PretenureFlag pretenure_flag = !FLAG_allocation_site_pretenuring ?
+ isolate()->heap()->GetPretenureMode() : NOT_TENURED;
+
+ HAllocationMode allocation_mode =
+ FLAG_allocation_site_pretenuring
+ ? (allocation_site.is_null()
+ ? HAllocationMode(NOT_TENURED)
+ : HAllocationMode(allocation_site))
+ : HAllocationMode(pretenure_flag);
+
+ HValue* result = HGraphBuilder::BuildBinaryOperation(
+ expr->op(), left, right, left_type, right_type, result_type,
+ fixed_right_arg, allocation_mode);
+ // Add a simulate after instructions with observable side effects, and
+ // after phis, which are the result of BuildBinaryOperation when we
+ // inlined some complex subgraph.
+ if (result->HasObservableSideEffects() || result->IsPhi()) {
+ if (push_sim_result == PUSH_BEFORE_SIMULATE) {
+ Push(result);
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Drop(1);
+ } else {
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ }
+ }
+ return result;
}
-HInstruction* HGraphBuilder::BuildBinaryOperation(
+HValue* HGraphBuilder::BuildBinaryOperation(
Token::Value op,
HValue* left,
HValue* right,
- Handle<Type> left_type,
- Handle<Type> right_type,
- Handle<Type> result_type,
+ Type* left_type,
+ Type* right_type,
+ Type* result_type,
Maybe<int> fixed_right_arg,
- bool binop_stub) {
+ HAllocationMode allocation_mode) {
Representation left_rep = Representation::FromType(left_type);
Representation right_rep = Representation::FromType(right_type);
@@ -7828,7 +9120,7 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(
Deoptimizer::SOFT);
// TODO(rossberg): we should be able to get rid of non-continuous
// defaults.
- left_type = handle(Type::Any(), isolate());
+ left_type = Type::Any(zone());
} else {
if (!maybe_string_add) left = TruncateToNumber(left, &left_type);
left_rep = Representation::FromType(left_type);
@@ -7837,7 +9129,7 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(
if (right_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
Deoptimizer::SOFT);
- right_type = handle(Type::Any(), isolate());
+ right_type = Type::Any(zone());
} else {
if (!maybe_string_add) right = TruncateToNumber(right, &right_type);
right_rep = Representation::FromType(right_type);
@@ -7848,18 +9140,12 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(
(left_type->Is(Type::String()) || right_type->Is(Type::String()))) {
// Validate type feedback for left argument.
if (left_type->Is(Type::String())) {
- IfBuilder if_isstring(this);
- if_isstring.If<HIsStringAndBranch>(left);
- if_isstring.Then();
- if_isstring.ElseDeopt("Expected string for LHS of binary operation");
+ left = BuildCheckString(left);
}
// Validate type feedback for right argument.
if (right_type->Is(Type::String())) {
- IfBuilder if_isstring(this);
- if_isstring.If<HIsStringAndBranch>(right);
- if_isstring.Then();
- if_isstring.ElseDeopt("Expected string for RHS of binary operation");
+ right = BuildCheckString(right);
}
// Convert left argument as necessary.
@@ -7871,7 +9157,7 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(
HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_RIGHT);
Add<HPushArgument>(left);
Add<HPushArgument>(right);
- return NewUncasted<HInvokeFunction>(function, 2);
+ return AddUncasted<HInvokeFunction>(function, 2);
}
// Convert right argument as necessary.
@@ -7883,13 +9169,52 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(
HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_LEFT);
Add<HPushArgument>(left);
Add<HPushArgument>(right);
- return NewUncasted<HInvokeFunction>(function, 2);
+ return AddUncasted<HInvokeFunction>(function, 2);
+ }
+
+ // Fast path for empty constant strings.
+ if (left->IsConstant() &&
+ HConstant::cast(left)->HasStringValue() &&
+ HConstant::cast(left)->StringValue()->length() == 0) {
+ return right;
+ }
+ if (right->IsConstant() &&
+ HConstant::cast(right)->HasStringValue() &&
+ HConstant::cast(right)->StringValue()->length() == 0) {
+ return left;
+ }
+
+ // Register the dependent code with the allocation site.
+ if (!allocation_mode.feedback_site().is_null()) {
+ ASSERT(!graph()->info()->IsStub());
+ Handle<AllocationSite> site(allocation_mode.feedback_site());
+ AllocationSite::AddDependentCompilationInfo(
+ site, AllocationSite::TENURING, top_info());
}
- return NewUncasted<HStringAdd>(left, right, STRING_ADD_CHECK_NONE);
+ // Inline the string addition into the stub when creating allocation
+ // mementos to gather allocation site feedback, or if we can statically
+ // infer that we're going to create a cons string.
+ if ((graph()->info()->IsStub() &&
+ allocation_mode.CreateAllocationMementos()) ||
+ (left->IsConstant() &&
+ HConstant::cast(left)->HasStringValue() &&
+ HConstant::cast(left)->StringValue()->length() + 1 >=
+ ConsString::kMinLength) ||
+ (right->IsConstant() &&
+ HConstant::cast(right)->HasStringValue() &&
+ HConstant::cast(right)->StringValue()->length() + 1 >=
+ ConsString::kMinLength)) {
+ return BuildStringAdd(left, right, allocation_mode);
+ }
+
+ // Fallback to using the string add stub.
+ return AddUncasted<HStringAdd>(
+ left, right, allocation_mode.GetPretenureMode(),
+ STRING_ADD_CHECK_NONE, allocation_mode.feedback_site());
}
- if (binop_stub) {
+ if (graph()->info()->IsStub()) {
left = EnforceNumberType(left, left_type);
right = EnforceNumberType(right, right_type);
}
@@ -7903,55 +9228,72 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(
// Only the stub is allowed to call into the runtime, since otherwise we would
// inline several instructions (including the two pushes) for every tagged
// operation in optimized code, which is more expensive, than a stub call.
- if (binop_stub && is_non_primitive) {
+ if (graph()->info()->IsStub() && is_non_primitive) {
HValue* function = AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op));
Add<HPushArgument>(left);
Add<HPushArgument>(right);
- instr = NewUncasted<HInvokeFunction>(function, 2);
+ instr = AddUncasted<HInvokeFunction>(function, 2);
} else {
switch (op) {
case Token::ADD:
- instr = NewUncasted<HAdd>(left, right);
+ instr = AddUncasted<HAdd>(left, right);
break;
case Token::SUB:
- instr = NewUncasted<HSub>(left, right);
+ instr = AddUncasted<HSub>(left, right);
break;
case Token::MUL:
- instr = NewUncasted<HMul>(left, right);
+ instr = AddUncasted<HMul>(left, right);
break;
- case Token::MOD:
- instr = NewUncasted<HMod>(left, right, fixed_right_arg);
+ case Token::MOD: {
+ if (fixed_right_arg.has_value) {
+ if (right->IsConstant()) {
+ HConstant* c_right = HConstant::cast(right);
+ if (c_right->HasInteger32Value()) {
+ ASSERT_EQ(fixed_right_arg.value, c_right->Integer32Value());
+ }
+ } else {
+ HConstant* fixed_right = Add<HConstant>(
+ static_cast<int>(fixed_right_arg.value));
+ IfBuilder if_same(this);
+ if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
+ if_same.Then();
+ if_same.ElseDeopt("Unexpected RHS of binary operation");
+ right = fixed_right;
+ }
+ }
+ instr = AddUncasted<HMod>(left, right);
break;
+ }
case Token::DIV:
- instr = NewUncasted<HDiv>(left, right);
+ instr = AddUncasted<HDiv>(left, right);
break;
case Token::BIT_XOR:
case Token::BIT_AND:
- instr = NewUncasted<HBitwise>(op, left, right);
+ instr = AddUncasted<HBitwise>(op, left, right);
break;
case Token::BIT_OR: {
HValue* operand, *shift_amount;
if (left_type->Is(Type::Signed32()) &&
right_type->Is(Type::Signed32()) &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
- instr = NewUncasted<HRor>(operand, shift_amount);
+ instr = AddUncasted<HRor>(operand, shift_amount);
} else {
- instr = NewUncasted<HBitwise>(op, left, right);
+ instr = AddUncasted<HBitwise>(op, left, right);
}
break;
}
case Token::SAR:
- instr = NewUncasted<HSar>(left, right);
+ instr = AddUncasted<HSar>(left, right);
break;
case Token::SHR:
- instr = NewUncasted<HShr>(left, right);
+ instr = AddUncasted<HShr>(left, right);
if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
CanBeZero(right)) {
graph()->RecordUint32Instruction(instr);
}
break;
case Token::SHL:
- instr = NewUncasted<HShl>(left, right);
+ instr = AddUncasted<HShl>(left, right);
break;
default:
UNREACHABLE();
@@ -7963,7 +9305,7 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(
binop->set_observed_input_representation(1, left_rep);
binop->set_observed_input_representation(2, right_rep);
binop->initialize_output_representation(result_rep);
- if (binop_stub) {
+ if (graph()->info()->IsStub()) {
// Stub should not call into stub.
instr->SetFlag(HValue::kCannotBeTagged);
// And should truncate on HForceRepresentation already.
@@ -8050,11 +9392,14 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
ASSERT(current_block() != NULL);
HValue* left_value = Top();
- if (left_value->IsConstant()) {
- HConstant* left_constant = HConstant::cast(left_value);
- if ((is_logical_and && left_constant->BooleanValue()) ||
- (!is_logical_and && !left_constant->BooleanValue())) {
- Drop(1); // left_value.
+ // Short-circuit left values that always evaluate to the same boolean value.
+ if (expr->left()->ToBooleanIsTrue() || expr->left()->ToBooleanIsFalse()) {
+ // l (evals true) && r -> r
+ // l (evals true) || r -> l
+ // l (evals false) && r -> l
+ // l (evals false) || r -> r
+ if (is_logical_and == expr->left()->ToBooleanIsTrue()) {
+ Drop(1);
CHECK_ALIVE(VisitForValue(expr->right()));
}
return ast_context()->ReturnValue(Pop());
@@ -8124,11 +9469,20 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ SetSourcePosition(expr->position());
HValue* right = Pop();
HValue* left = Pop();
- HInstruction* instr = BuildBinaryOperation(expr, left, right);
- return ast_context()->ReturnInstruction(instr, expr->id());
+ HValue* result =
+ BuildBinaryOperation(expr, left, right,
+ ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
+ : PUSH_BEFORE_SIMULATE);
+ if (FLAG_hydrogen_track_positions && result->IsBinaryOperation()) {
+ HBinaryOperation::cast(result)->SetOperandPositions(
+ zone(),
+ ScriptPositionToSourcePosition(expr->left()->position()),
+ ScriptPositionToSourcePosition(expr->right()->position()));
+ }
+ return ast_context()->ReturnValue(result);
}
@@ -8136,7 +9490,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
Expression* sub_expr,
Handle<String> check) {
CHECK_ALIVE(VisitForTypeOf(sub_expr));
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ SetSourcePosition(expr->position());
HValue* value = Pop();
HTypeofIsAndBranch* instr = New<HTypeofIsAndBranch>(value, check);
return ast_context()->ReturnControl(instr, expr->id());
@@ -8160,7 +9514,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
// Check for a few fast cases. The AST visiting behavior must be in sync
// with the full codegen: We don't push both left and right values onto
@@ -8188,16 +9542,15 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnControl(instr, expr->id());
}
- Handle<Type> left_type = expr->left()->bounds().lower;
- Handle<Type> right_type = expr->right()->bounds().lower;
- Handle<Type> combined_type = expr->combined_type();
- Representation combined_rep = Representation::FromType(combined_type);
- Representation left_rep = Representation::FromType(left_type);
- Representation right_rep = Representation::FromType(right_type);
+ Type* left_type = expr->left()->bounds().lower;
+ Type* right_type = expr->right()->bounds().lower;
+ Type* combined_type = expr->combined_type();
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
+ if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+
HValue* right = Pop();
HValue* left = Pop();
Token::Value op = expr->op();
@@ -8256,39 +9609,69 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnInstruction(result, expr->id());
}
+ PushBeforeSimulateBehavior push_behavior =
+ ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
+ : PUSH_BEFORE_SIMULATE;
+ HControlInstruction* compare = BuildCompareInstruction(
+ op, left, right, left_type, right_type, combined_type,
+ ScriptPositionToSourcePosition(expr->left()->position()),
+ ScriptPositionToSourcePosition(expr->right()->position()),
+ push_behavior, expr->id());
+ if (compare == NULL) return; // Bailed out.
+ return ast_context()->ReturnControl(compare, expr->id());
+}
+
+
+HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
+ Token::Value op,
+ HValue* left,
+ HValue* right,
+ Type* left_type,
+ Type* right_type,
+ Type* combined_type,
+ HSourcePosition left_position,
+ HSourcePosition right_position,
+ PushBeforeSimulateBehavior push_sim_result,
+ BailoutId bailout_id) {
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
if (combined_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for combined type "
"of binary operation",
Deoptimizer::SOFT);
- combined_type = left_type = right_type = handle(Type::Any(), isolate());
+ combined_type = left_type = right_type = Type::Any(zone());
}
+ Representation left_rep = Representation::FromType(left_type);
+ Representation right_rep = Representation::FromType(right_type);
+ Representation combined_rep = Representation::FromType(combined_type);
+
if (combined_type->Is(Type::Receiver())) {
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT: {
- // Can we get away with map check and not instance type check?
- if (combined_type->IsClass()) {
- Handle<Map> map = combined_type->AsClass();
- AddCheckMap(left, map);
- AddCheckMap(right, map);
- HCompareObjectEqAndBranch* result =
- New<HCompareObjectEqAndBranch>(left, right);
- return ast_context()->ReturnControl(result, expr->id());
- } else {
- BuildCheckHeapObject(left);
- Add<HCheckInstanceType>(left, HCheckInstanceType::IS_SPEC_OBJECT);
- BuildCheckHeapObject(right);
- Add<HCheckInstanceType>(right, HCheckInstanceType::IS_SPEC_OBJECT);
- HCompareObjectEqAndBranch* result =
- New<HCompareObjectEqAndBranch>(left, right);
- return ast_context()->ReturnControl(result, expr->id());
+ if (Token::IsEqualityOp(op)) {
+ // Can we get away with map check and not instance type check?
+ HValue* operand_to_check =
+ left->block()->block_id() < right->block()->block_id() ? left : right;
+ if (combined_type->IsClass()) {
+ Handle<Map> map = combined_type->AsClass();
+ AddCheckMap(operand_to_check, map);
+ HCompareObjectEqAndBranch* result =
+ New<HCompareObjectEqAndBranch>(left, right);
+ if (FLAG_hydrogen_track_positions) {
+ result->set_operand_position(zone(), 0, left_position);
+ result->set_operand_position(zone(), 1, right_position);
}
+ return result;
+ } else {
+ BuildCheckHeapObject(operand_to_check);
+ Add<HCheckInstanceType>(operand_to_check,
+ HCheckInstanceType::IS_SPEC_OBJECT);
+ HCompareObjectEqAndBranch* result =
+ New<HCompareObjectEqAndBranch>(left, right);
+ return result;
}
- default:
- return Bailout(kUnsupportedNonPrimitiveCompare);
+ } else {
+ Bailout(kUnsupportedNonPrimitiveCompare);
+ return NULL;
}
} else if (combined_type->Is(Type::InternalizedString()) &&
Token::IsEqualityOp(op)) {
@@ -8298,7 +9681,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Add<HCheckInstanceType>(right, HCheckInstanceType::IS_INTERNALIZED_STRING);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
- return ast_context()->ReturnControl(result, expr->id());
+ return result;
} else if (combined_type->Is(Type::String())) {
BuildCheckHeapObject(left);
Add<HCheckInstanceType>(left, HCheckInstanceType::IS_STRING);
@@ -8306,18 +9689,32 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Add<HCheckInstanceType>(right, HCheckInstanceType::IS_STRING);
HStringCompareAndBranch* result =
New<HStringCompareAndBranch>(left, right, op);
- return ast_context()->ReturnControl(result, expr->id());
+ return result;
} else {
if (combined_rep.IsTagged() || combined_rep.IsNone()) {
- HCompareGeneric* result = New<HCompareGeneric>(left, right, op);
+ HCompareGeneric* result = Add<HCompareGeneric>(left, right, op);
result->set_observed_input_representation(1, left_rep);
result->set_observed_input_representation(2, right_rep);
- return ast_context()->ReturnInstruction(result, expr->id());
+ if (result->HasObservableSideEffects()) {
+ if (push_sim_result == PUSH_BEFORE_SIMULATE) {
+ Push(result);
+ AddSimulate(bailout_id, REMOVABLE_SIMULATE);
+ Drop(1);
+ } else {
+ AddSimulate(bailout_id, REMOVABLE_SIMULATE);
+ }
+ }
+ // TODO(jkummerow): Can we make this more efficient?
+ HBranch* branch = New<HBranch>(result);
+ return branch;
} else {
HCompareNumericAndBranch* result =
New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
- return ast_context()->ReturnControl(result, expr->id());
+ if (FLAG_hydrogen_track_positions) {
+ result->SetOperandPositions(zone(), left_position, right_position);
+ }
+ return result;
}
}
}
@@ -8330,7 +9727,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
if (expr->op() == Token::EQ_STRICT) {
@@ -8342,9 +9739,8 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
return ast_context()->ReturnControl(instr, expr->id());
} else {
ASSERT_EQ(Token::EQ, expr->op());
- Handle<Type> type = expr->combined_type()->Is(Type::None())
- ? handle(Type::Any(), isolate_)
- : expr->combined_type();
+ Type* type = expr->combined_type()->Is(Type::None())
+ ? Type::Any(zone()) : expr->combined_type();
HIfContinuation continuation;
BuildCompareNil(value, type, &continuation);
return ast_context()->ReturnContinuation(&continuation, expr->id());
@@ -8366,7 +9762,7 @@ HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
Handle<JSObject> boilerplate_object,
- AllocationSiteContext* site_context) {
+ AllocationSiteUsageContext* site_context) {
NoObservableSideEffectsScope no_effects(this);
InstanceType instance_type = boilerplate_object->map()->instance_type();
ASSERT(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
@@ -8375,8 +9771,17 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
? HType::JSArray() : HType::JSObject();
HValue* object_size_constant = Add<HConstant>(
boilerplate_object->map()->instance_size());
+
+ PretenureFlag pretenure_flag = isolate()->heap()->GetPretenureMode();
+ if (FLAG_allocation_site_pretenuring) {
+ pretenure_flag = site_context->current()->GetPretenureMode();
+ Handle<AllocationSite> site(site_context->current());
+ AllocationSite::AddDependentCompilationInfo(
+ site, AllocationSite::TENURING, top_info());
+ }
+
HInstruction* object = Add<HAllocate>(object_size_constant, type,
- isolate()->heap()->GetPretenureMode(), instance_type);
+ pretenure_flag, instance_type, site_context->current());
BuildEmitObjectHeader(boilerplate_object, object);
@@ -8389,11 +9794,19 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
if (elements_size > 0) {
HValue* object_elements_size = Add<HConstant>(elements_size);
if (boilerplate_object->HasFastDoubleElements()) {
+ // Allocation folding will not be able to fold |object| and
+ // |object_elements| together if they are pre-tenured.
+ if (pretenure_flag == TENURED) {
+ HConstant* empty_fixed_array = Add<HConstant>(
+ isolate()->factory()->empty_fixed_array());
+ Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+ empty_fixed_array);
+ }
object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
- isolate()->heap()->GetPretenureMode(), FIXED_DOUBLE_ARRAY_TYPE);
+ pretenure_flag, FIXED_DOUBLE_ARRAY_TYPE, site_context->current());
} else {
object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
- isolate()->heap()->GetPretenureMode(), FIXED_ARRAY_TYPE);
+ pretenure_flag, FIXED_ARRAY_TYPE, site_context->current());
}
}
BuildInitElementsInObjectHeader(boilerplate_object, object, object_elements);
@@ -8406,7 +9819,8 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
// Copy in-object properties.
if (boilerplate_object->map()->NumberOfFields() != 0) {
- BuildEmitInObjectProperties(boilerplate_object, object, site_context);
+ BuildEmitInObjectProperties(boilerplate_object, object, site_context,
+ pretenure_flag);
}
return object;
}
@@ -8459,10 +9873,11 @@ void HOptimizedGraphBuilder::BuildInitElementsInObjectHeader(
void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Handle<JSObject> boilerplate_object,
HInstruction* object,
- AllocationSiteContext* site_context) {
- Handle<DescriptorArray> descriptors(
- boilerplate_object->map()->instance_descriptors());
- int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
+ AllocationSiteUsageContext* site_context,
+ PretenureFlag pretenure_flag) {
+ Handle<Map> boilerplate_map(boilerplate_object->map());
+ Handle<DescriptorArray> descriptors(boilerplate_map->instance_descriptors());
+ int limit = boilerplate_map->NumberOfOwnDescriptors();
int copied_fields = 0;
for (int i = 0; i < limit; i++) {
@@ -8479,7 +9894,7 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
// The access for the store depends on the type of the boilerplate.
HObjectAccess access = boilerplate_object->IsJSArray() ?
HObjectAccess::ForJSArrayOffset(property_offset) :
- HObjectAccess::ForJSObjectOffset(property_offset);
+ HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
@@ -8490,25 +9905,29 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Add<HStoreNamedField>(object, access, result);
} else {
Representation representation = details.representation();
- HInstruction* value_instruction = Add<HConstant>(value);
+ HInstruction* value_instruction;
if (representation.IsDouble()) {
// Allocate a HeapNumber box and store the value into it.
HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
- // TODO(mvstanton): This heap number alloc does not have a corresponding
+ // This heap number alloc does not have a corresponding
// AllocationSite. That is okay because
// 1) it's a child object of another object with a valid allocation site
// 2) we can just use the mode of the parent object for pretenuring
- // The todo is replace GetPretenureMode() with
- // site_context->top()->GetPretenureMode().
HInstruction* double_box =
Add<HAllocate>(heap_number_constant, HType::HeapNumber(),
- isolate()->heap()->GetPretenureMode(), HEAP_NUMBER_TYPE);
+ pretenure_flag, HEAP_NUMBER_TYPE);
AddStoreMapConstant(double_box,
isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(),
- value_instruction);
+ Add<HConstant>(value));
value_instruction = double_box;
+ } else if (representation.IsSmi() && value->IsUninitialized()) {
+ value_instruction = graph()->GetConstant0();
+ // Ensure that Constant0 is stored as smi.
+ access = access.WithRepresentation(representation);
+ } else {
+ value_instruction = Add<HConstant>(value);
}
Add<HStoreNamedField>(object, access, value_instruction);
@@ -8521,7 +9940,8 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
for (int i = copied_fields; i < inobject_properties; i++) {
ASSERT(boilerplate_object->IsJSObject());
int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
- HObjectAccess access = HObjectAccess::ForJSObjectOffset(property_offset);
+ HObjectAccess access =
+ HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset);
Add<HStoreNamedField>(object, access, value_instruction);
}
}
@@ -8531,7 +9951,7 @@ void HOptimizedGraphBuilder::BuildEmitElements(
Handle<JSObject> boilerplate_object,
Handle<FixedArrayBase> elements,
HValue* object_elements,
- AllocationSiteContext* site_context) {
+ AllocationSiteUsageContext* site_context) {
ElementsKind kind = boilerplate_object->map()->elements_kind();
int elements_length = elements->length();
HValue* object_elements_length = Add<HConstant>(elements_length);
@@ -8572,7 +9992,7 @@ void HOptimizedGraphBuilder::BuildEmitFixedArray(
Handle<FixedArrayBase> elements,
ElementsKind kind,
HValue* object_elements,
- AllocationSiteContext* site_context) {
+ AllocationSiteUsageContext* site_context) {
HInstruction* boilerplate_elements = Add<HConstant>(elements);
int elements_length = elements->length();
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
@@ -8774,6 +10194,15 @@ void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateIsMinusZero(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HCompareMinusZeroAndBranch* result = New<HCompareMinusZeroAndBranch>(value);
+ return ast_context()->ReturnControl(result, call->id());
+}
+
+
void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -8890,9 +10319,28 @@ void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HValueOf* result = New<HValueOf>(value);
- return ast_context()->ReturnInstruction(result, call->id());
+ HValue* object = Pop();
+
+ IfBuilder if_objectisvalue(this);
+ HValue* objectisvalue = if_objectisvalue.If<HHasInstanceTypeAndBranch>(
+ object, JS_VALUE_TYPE);
+ if_objectisvalue.Then();
+ {
+ // Return the actual value.
+ Push(Add<HLoadNamedField>(
+ object, objectisvalue,
+ HObjectAccess::ForObservableJSObjectOffset(
+ JSValue::kValueOffset)));
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+ if_objectisvalue.Else();
+ {
+ // If the object is not a value return the object.
+ Push(object);
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+ if_objectisvalue.End();
+ return ast_context()->ReturnValue(Pop());
}
@@ -8910,30 +10358,34 @@ void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
CallRuntime* call) {
ASSERT(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ // We need to follow the evaluation order of full codegen.
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
- HValue* string = Pop();
- HSeqStringSetChar* result = New<HSeqStringSetChar>(
- String::ONE_BYTE_ENCODING, string, index, value);
- return ast_context()->ReturnInstruction(result, call->id());
+ Add<HSeqStringSetChar>(String::ONE_BYTE_ENCODING, string,
+ index, value);
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
CallRuntime* call) {
ASSERT(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ // We need to follow the evaluation order of full codegen.
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
- HValue* string = Pop();
- HSeqStringSetChar* result = New<HSeqStringSetChar>(
- String::TWO_BYTE_ENCODING, string, index, value);
- return ast_context()->ReturnInstruction(result, call->id());
+ Add<HSeqStringSetChar>(String::TWO_BYTE_ENCODING, string,
+ index, value);
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
@@ -8943,31 +10395,33 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* value = Pop();
HValue* object = Pop();
- // Check if object is a not a smi.
- HBasicBlock* if_smi = graph()->CreateBasicBlock();
- HBasicBlock* if_heap_object = graph()->CreateBasicBlock();
- HBasicBlock* join = graph()->CreateBasicBlock();
- FinishCurrentBlock(New<HIsSmiAndBranch>(object, if_smi, if_heap_object));
- Goto(if_smi, join);
// Check if object is a JSValue.
- set_current_block(if_heap_object);
- HHasInstanceTypeAndBranch* typecheck =
- New<HHasInstanceTypeAndBranch>(object, JS_VALUE_TYPE);
- HBasicBlock* if_js_value = graph()->CreateBasicBlock();
- HBasicBlock* not_js_value = graph()->CreateBasicBlock();
- typecheck->SetSuccessorAt(0, if_js_value);
- typecheck->SetSuccessorAt(1, not_js_value);
- FinishCurrentBlock(typecheck);
- Goto(not_js_value, join);
-
- // Create in-object property store to kValueOffset.
- set_current_block(if_js_value);
- Add<HStoreNamedField>(object,
- HObjectAccess::ForJSObjectOffset(JSValue::kValueOffset), value);
- Goto(if_js_value, join);
- join->SetJoinId(call->id());
- set_current_block(join);
+ IfBuilder if_objectisvalue(this);
+ if_objectisvalue.If<HHasInstanceTypeAndBranch>(object, JS_VALUE_TYPE);
+ if_objectisvalue.Then();
+ {
+ // Create in-object property store to kValueOffset.
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset),
+ value);
+ if (!ast_context()->IsEffect()) {
+ Push(value);
+ }
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+ if_objectisvalue.Else();
+ {
+ // Nothing to do in this case.
+ if (!ast_context()->IsEffect()) {
+ Push(value);
+ }
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+ if_objectisvalue.End();
+ if (!ast_context()->IsEffect()) {
+ Drop(1);
+ }
return ast_context()->ReturnValue(value);
}
@@ -9027,14 +10481,6 @@ void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
}
-// Fast support for Math.random().
-void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
- HGlobalObject* global_object = Add<HGlobalObject>();
- HRandom* result = New<HRandom>(global_object);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
// Fast support for StringAdd.
void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
@@ -9042,7 +10488,7 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HInstruction* result = New<HStringAdd>(left, right, STRING_ADD_CHECK_BOTH);
+ HInstruction* result = NewUncasted<HStringAdd>(left, right);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -9080,10 +10526,14 @@ void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
// Construct a RegExp exec result with two in-object properties.
void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HCallStub* result = New<HCallStub>(CodeStub::RegExpConstructResult, 3);
- Drop(3);
- return ast_context()->ReturnInstruction(result, call->id());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ HValue* input = Pop();
+ HValue* index = Pop();
+ HValue* length = Pop();
+ HValue* result = BuildRegExpConstructResult(length, index, input);
+ return ast_context()->ReturnValue(result);
}
@@ -9098,8 +10548,7 @@ void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* number = Pop();
- HValue* result = BuildNumberToString(
- number, handle(Type::Number(), isolate()));
+ HValue* result = BuildNumberToString(number, Type::Any(zone()));
return ast_context()->ReturnValue(result);
}
@@ -9117,31 +10566,39 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
HValue* function = Pop();
- // Branch for function proxies, or other non-functions.
- HHasInstanceTypeAndBranch* typecheck =
- New<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE);
- HBasicBlock* if_jsfunction = graph()->CreateBasicBlock();
- HBasicBlock* if_nonfunction = graph()->CreateBasicBlock();
- HBasicBlock* join = graph()->CreateBasicBlock();
- typecheck->SetSuccessorAt(0, if_jsfunction);
- typecheck->SetSuccessorAt(1, if_nonfunction);
- FinishCurrentBlock(typecheck);
-
- set_current_block(if_jsfunction);
- HInstruction* invoke_result = Add<HInvokeFunction>(function, arg_count);
- Drop(arg_count);
- Push(invoke_result);
- Goto(if_jsfunction, join);
-
- set_current_block(if_nonfunction);
- HInstruction* call_result = Add<HCallFunction>(function, arg_count);
- Drop(arg_count);
- Push(call_result);
- Goto(if_nonfunction, join);
+ IfBuilder if_is_jsfunction(this);
+ if_is_jsfunction.If<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE);
- set_current_block(join);
- join->SetJoinId(call->id());
- return ast_context()->ReturnValue(Pop());
+ if_is_jsfunction.Then();
+ {
+ HInstruction* invoke_result =
+ Add<HInvokeFunction>(function, arg_count);
+ Drop(arg_count);
+ if (!ast_context()->IsEffect()) {
+ Push(invoke_result);
+ }
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+
+ if_is_jsfunction.Else();
+ {
+ HInstruction* call_result =
+ Add<HCallFunction>(function, arg_count);
+ Drop(arg_count);
+ if (!ast_context()->IsEffect()) {
+ Push(call_result);
+ }
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+ if_is_jsfunction.End();
+
+ if (ast_context()->IsEffect()) {
+ // EffectContext::ReturnValue ignores the value, so we can just pass
+ // 'undefined' (as we do not have the call result anymore).
+ return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+ } else {
+ return ast_context()->ReturnValue(Pop());
+ }
}
@@ -9157,42 +10614,11 @@ void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::SIN);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::COS);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::TAN);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::LOG);
- Drop(1);
+ ASSERT(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathLog);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -9201,17 +10627,11 @@ void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HInstruction* result = New<HUnaryMathOperation>(value, kMathSqrt);
+ HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathSqrt);
return ast_context()->ReturnInstruction(result, call->id());
}
-// Check whether two RegExps are equivalent
-void HOptimizedGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
- return Bailout(kInlinedRuntimeFunctionIsRegExpEquivalent);
-}
-
-
void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -9462,8 +10882,7 @@ HEnvironment* HEnvironment::CopyForInlining(
int arguments,
FunctionLiteral* function,
HConstant* undefined,
- InliningKind inlining_kind,
- bool undefined_receiver) const {
+ InliningKind inlining_kind) const {
ASSERT(frame_type() == JS_FUNCTION);
// Outer environment is a copy of this one without the arguments.
@@ -9501,12 +10920,6 @@ HEnvironment* HEnvironment::CopyForInlining(
ExpressionStackAt(arguments - i) : undefined;
inner->SetValueAt(i, push);
}
- // If the function we are inlining is a strict mode function or a
- // builtin function, pass undefined as the receiver for function
- // calls (instead of the global receiver).
- if (undefined_receiver) {
- inner->SetValueAt(0, undefined);
- }
inner->SetValueAt(arity + 1, context());
for (int i = arity + 2; i < inner->length(); ++i) {
inner->SetValueAt(i, undefined);
@@ -9542,7 +10955,7 @@ void HEnvironment::PrintToStd() {
HeapStringAllocator string_allocator;
StringStream trace(&string_allocator);
PrintTo(&trace);
- PrintF("%s", *trace.ToCString());
+ PrintF("%s", trace.ToCString().get());
}
@@ -9550,8 +10963,11 @@ void HTracer::TraceCompilation(CompilationInfo* info) {
Tag tag(this, "compilation");
if (info->IsOptimizing()) {
Handle<String> name = info->function()->debug_name();
- PrintStringProperty("name", *name->ToCString());
- PrintStringProperty("method", *name->ToCString());
+ PrintStringProperty("name", name->ToCString().get());
+ PrintIndent();
+ trace_.Add("method \"%s:%d\"\n",
+ name->ToCString().get(),
+ info->optimization_id());
} else {
CodeStub::Major major_key = info->code_stub()->MajorKey();
PrintStringProperty("name", CodeStub::MajorName(major_key, false));
@@ -9562,7 +10978,7 @@ void HTracer::TraceCompilation(CompilationInfo* info) {
void HTracer::TraceLithium(const char* name, LChunk* chunk) {
- ASSERT(!FLAG_concurrent_recompilation);
+ ASSERT(!chunk->isolate()->concurrent_recompilation_enabled());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, chunk->graph(), chunk);
@@ -9570,7 +10986,7 @@ void HTracer::TraceLithium(const char* name, LChunk* chunk) {
void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
- ASSERT(!FLAG_concurrent_recompilation);
+ ASSERT(!graph->isolate()->concurrent_recompilation_enabled());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, graph, NULL);
@@ -9611,10 +11027,21 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
}
PrintEmptyProperty("xhandlers");
- const char* flags = current->IsLoopSuccessorDominator()
- ? "dom-loop-succ"
- : "";
- PrintStringProperty("flags", flags);
+
+ {
+ PrintIndent();
+ trace_.Add("flags");
+ if (current->IsLoopSuccessorDominator()) {
+ trace_.Add(" \"dom-loop-succ\"");
+ }
+ if (current->IsUnreachable()) {
+ trace_.Add(" \"dead\"");
+ }
+ if (current->is_osr_entry()) {
+ trace_.Add(" \"osr\"");
+ }
+ trace_.Add("\n");
+ }
if (current->dominator() != NULL) {
PrintBlockProperty("dominator", current->dominator()->block_id());
@@ -9654,13 +11081,22 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
Tag HIR_tag(this, "HIR");
for (HInstructionIterator it(current); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
- int bci = 0;
int uses = instruction->UseCount();
PrintIndent();
- trace_.Add("%d %d ", bci, uses);
+ trace_.Add("0 %d ", uses);
instruction->PrintNameTo(&trace_);
trace_.Add(" ");
instruction->PrintTo(&trace_);
+ if (FLAG_hydrogen_track_positions &&
+ instruction->has_position() &&
+ instruction->position().raw() != 0) {
+ const HSourcePosition pos = instruction->position();
+ trace_.Add(" pos:");
+ if (pos.inlining_id() != 0) {
+ trace_.Add("%d_", pos.inlining_id());
+ }
+ trace_.Add("%d", pos.position());
+ }
trace_.Add(" <|@\n");
}
}
@@ -9679,6 +11115,9 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
trace_.Add("%d ",
LifetimePosition::FromInstructionIndex(i).Value());
linstr->PrintTo(&trace_);
+ trace_.Add(" [hir:");
+ linstr->hydrogen_value()->PrintNameTo(&trace_);
+ trace_.Add("]");
trace_.Add(" <|@\n");
}
}
@@ -9767,7 +11206,8 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type,
void HTracer::FlushToFile() {
- AppendChars(filename_.start(), *trace_.ToCString(), trace_.length(), false);
+ AppendChars(filename_.start(), trace_.ToCString().get(), trace_.length(),
+ false);
trace_.Reset();
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index 8f4878d93e..b9d53be94e 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -110,8 +110,9 @@ class HBasicBlock V8_FINAL : public ZoneObject {
bool IsFinished() const { return end_ != NULL; }
void AddPhi(HPhi* phi);
void RemovePhi(HPhi* phi);
- void AddInstruction(HInstruction* instr, int position);
+ void AddInstruction(HInstruction* instr, HSourcePosition position);
bool Dominates(HBasicBlock* other) const;
+ bool EqualToOrDominates(HBasicBlock* other) const;
int LoopNestingDepth() const;
void SetInitialEnvironment(HEnvironment* env);
@@ -136,7 +137,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
int PredecessorIndexOf(HBasicBlock* predecessor) const;
HPhi* AddNewPhi(int merged_index);
HSimulate* AddNewSimulate(BailoutId ast_id,
- int position,
+ HSourcePosition position,
RemovableSimulate removable = FIXED_SIMULATE) {
HSimulate* instr = CreateSimulate(ast_id, removable);
AddInstruction(instr, position);
@@ -173,6 +174,8 @@ class HBasicBlock V8_FINAL : public ZoneObject {
dominates_loop_successors_ = true;
}
+ void MarkSuccEdgeUnreachable(int succ);
+
inline Zone* zone() const;
#ifdef DEBUG
@@ -183,13 +186,13 @@ class HBasicBlock V8_FINAL : public ZoneObject {
friend class HGraphBuilder;
HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
- void Finish(HControlInstruction* last, int position);
- void FinishExit(HControlInstruction* instruction, int position);
+ void Finish(HControlInstruction* last, HSourcePosition position);
+ void FinishExit(HControlInstruction* instruction, HSourcePosition position);
void Goto(HBasicBlock* block,
- int position,
+ HSourcePosition position,
FunctionState* state = NULL,
bool add_simulate = true);
- void GotoNoSimulate(HBasicBlock* block, int position) {
+ void GotoNoSimulate(HBasicBlock* block, HSourcePosition position) {
Goto(block, position, NULL, false);
}
@@ -197,7 +200,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
// instruction and updating the bailout environment.
void AddLeaveInlined(HValue* return_value,
FunctionState* state,
- int position);
+ HSourcePosition position);
private:
void RegisterPredecessor(HBasicBlock* pred);
@@ -468,6 +471,16 @@ class HGraph V8_FINAL : public ZoneObject {
void DecrementInNoSideEffectsScope() { no_side_effects_scope_count_--; }
bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
+ // If we are tracking source positions then this function assigns a unique
+ // identifier to each inlining and dumps function source if it was inlined
+ // for the first time during the current optimization.
+ int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+ HSourcePosition position);
+
+ // Converts given HSourcePosition to the absolute offset from the start of
+ // the corresponding script.
+ int SourcePositionToScriptPosition(HSourcePosition position);
+
private:
HConstant* ReinsertConstantIfNecessary(HConstant* constant);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
@@ -513,6 +526,23 @@ class HGraph V8_FINAL : public ZoneObject {
int no_side_effects_scope_count_;
bool disallow_adding_new_values_;
+ class InlinedFunctionInfo {
+ public:
+ explicit InlinedFunctionInfo(Handle<SharedFunctionInfo> shared)
+ : shared_(shared), start_position_(shared->start_position()) {
+ }
+
+ Handle<SharedFunctionInfo> shared() const { return shared_; }
+ int start_position() const { return start_position_; }
+
+ private:
+ Handle<SharedFunctionInfo> shared_;
+ int start_position_;
+ };
+
+ int next_inline_id_;
+ ZoneList<InlinedFunctionInfo> inlined_functions_;
+
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -640,16 +670,7 @@ class HEnvironment V8_FINAL : public ZoneObject {
int arguments,
FunctionLiteral* function,
HConstant* undefined,
- InliningKind inlining_kind,
- bool undefined_receiver) const;
-
- static bool UseUndefinedReceiver(Handle<JSFunction> closure,
- FunctionLiteral* function,
- CallKind call_kind,
- InliningKind inlining_kind) {
- return (closure->shared()->native() || !function->is_classic_mode()) &&
- call_kind == CALL_AS_FUNCTION && inlining_kind != CONSTRUCT_CALL_RETURN;
- }
+ InliningKind inlining_kind) const;
HEnvironment* DiscardInlined(bool drop_extra) {
HEnvironment* outer = outer_;
@@ -888,7 +909,8 @@ class FunctionState V8_FINAL {
public:
FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- InliningKind inlining_kind);
+ InliningKind inlining_kind,
+ int inlining_id);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
@@ -918,6 +940,8 @@ class FunctionState V8_FINAL {
bool arguments_pushed() { return arguments_elements() != NULL; }
+ int inlining_id() const { return inlining_id_; }
+
private:
HOptimizedGraphBuilder* owner_;
@@ -947,13 +971,19 @@ class FunctionState V8_FINAL {
HArgumentsObject* arguments_object_;
HArgumentsElements* arguments_elements_;
+ int inlining_id_;
+ HSourcePosition outer_source_position_;
+
FunctionState* outer_;
};
class HIfContinuation V8_FINAL {
public:
- HIfContinuation() : continuation_captured_(false) {}
+ HIfContinuation()
+ : continuation_captured_(false),
+ true_branch_(NULL),
+ false_branch_(NULL) {}
HIfContinuation(HBasicBlock* true_branch,
HBasicBlock* false_branch)
: continuation_captured_(true), true_branch_(true_branch),
@@ -992,13 +1022,43 @@ class HIfContinuation V8_FINAL {
};
+class HAllocationMode V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit HAllocationMode(Handle<AllocationSite> feedback_site)
+ : current_site_(NULL), feedback_site_(feedback_site),
+ pretenure_flag_(NOT_TENURED) {}
+ explicit HAllocationMode(HValue* current_site)
+ : current_site_(current_site), pretenure_flag_(NOT_TENURED) {}
+ explicit HAllocationMode(PretenureFlag pretenure_flag)
+ : current_site_(NULL), pretenure_flag_(pretenure_flag) {}
+
+ HValue* current_site() const { return current_site_; }
+ Handle<AllocationSite> feedback_site() const { return feedback_site_; }
+
+ bool CreateAllocationMementos() const V8_WARN_UNUSED_RESULT {
+ return current_site() != NULL;
+ }
+
+ PretenureFlag GetPretenureMode() const V8_WARN_UNUSED_RESULT {
+ if (!feedback_site().is_null()) return feedback_site()->GetPretenureMode();
+ return pretenure_flag_;
+ }
+
+ private:
+ HValue* current_site_;
+ Handle<AllocationSite> feedback_site_;
+ PretenureFlag pretenure_flag_;
+};
+
+
class HGraphBuilder {
public:
explicit HGraphBuilder(CompilationInfo* info)
: info_(info),
graph_(NULL),
current_block_(NULL),
- position_(RelocInfo::kNoPosition) {}
+ position_(HSourcePosition::Unknown()),
+ start_position_(0) {}
virtual ~HGraphBuilder() {}
HBasicBlock* current_block() const { return current_block_; }
@@ -1028,7 +1088,7 @@ class HGraphBuilder {
HBasicBlock* target,
FunctionState* state = NULL,
bool add_simulate = true) {
- from->Goto(target, position_, state, add_simulate);
+ from->Goto(target, source_position(), state, add_simulate);
}
void Goto(HBasicBlock* target,
FunctionState* state = NULL,
@@ -1044,7 +1104,7 @@ class HGraphBuilder {
void AddLeaveInlined(HBasicBlock* block,
HValue* return_value,
FunctionState* state) {
- block->AddLeaveInlined(return_value, state, position_);
+ block->AddLeaveInlined(return_value, state, source_position());
}
void AddLeaveInlined(HValue* return_value, FunctionState* state) {
return AddLeaveInlined(current_block(), return_value, state);
@@ -1054,13 +1114,13 @@ class HGraphBuilder {
HInstruction* NewUncasted() { return I::New(zone(), context()); }
template<class I>
- I* New() { return I::cast(NewUncasted<I>()); }
+ I* New() { return I::New(zone(), context()); }
template<class I>
HInstruction* AddUncasted() { return AddInstruction(NewUncasted<I>());}
template<class I>
- I* Add() { return I::cast(AddUncasted<I>());}
+ I* Add() { return AddInstructionTyped(New<I>());}
template<class I, class P1>
HInstruction* NewUncasted(P1 p1) {
@@ -1068,7 +1128,7 @@ class HGraphBuilder {
}
template<class I, class P1>
- I* New(P1 p1) { return I::cast(NewUncasted<I>(p1)); }
+ I* New(P1 p1) { return I::New(zone(), context(), p1); }
template<class I, class P1>
HInstruction* AddUncasted(P1 p1) {
@@ -1082,7 +1142,12 @@ class HGraphBuilder {
template<class I, class P1>
I* Add(P1 p1) {
- return I::cast(AddUncasted<I>(p1));
+ I* result = AddInstructionTyped(New<I>(p1));
+ // Specializations must have their parameters properly casted
+ // to avoid landing here.
+ ASSERT(!result->IsReturn() && !result->IsSimulate() &&
+ !result->IsDeoptimize());
+ return result;
}
template<class I, class P1, class P2>
@@ -1092,7 +1157,7 @@ class HGraphBuilder {
template<class I, class P1, class P2>
I* New(P1 p1, P2 p2) {
- return I::cast(NewUncasted<I>(p1, p2));
+ return I::New(zone(), context(), p1, p2);
}
template<class I, class P1, class P2>
@@ -1106,7 +1171,11 @@ class HGraphBuilder {
template<class I, class P1, class P2>
I* Add(P1 p1, P2 p2) {
- return I::cast(AddUncasted<I>(p1, p2));
+ I* result = AddInstructionTyped(New<I>(p1, p2));
+ // Specializations must have their parameters properly casted
+ // to avoid landing here.
+ ASSERT(!result->IsSimulate());
+ return result;
}
template<class I, class P1, class P2, class P3>
@@ -1116,7 +1185,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3>
I* New(P1 p1, P2 p2, P3 p3) {
- return I::cast(NewUncasted<I>(p1, p2, p3));
+ return I::New(zone(), context(), p1, p2, p3);
}
template<class I, class P1, class P2, class P3>
@@ -1126,7 +1195,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3>
I* Add(P1 p1, P2 p2, P3 p3) {
- return I::cast(AddUncasted<I>(p1, p2, p3));
+ return AddInstructionTyped(New<I>(p1, p2, p3));
}
template<class I, class P1, class P2, class P3, class P4>
@@ -1136,7 +1205,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4>
I* New(P1 p1, P2 p2, P3 p3, P4 p4) {
- return I::cast(NewUncasted<I>(p1, p2, p3, p4));
+ return I::New(zone(), context(), p1, p2, p3, p4);
}
template<class I, class P1, class P2, class P3, class P4>
@@ -1146,7 +1215,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4>
I* Add(P1 p1, P2 p2, P3 p3, P4 p4) {
- return I::cast(AddUncasted<I>(p1, p2, p3, p4));
+ return AddInstructionTyped(New<I>(p1, p2, p3, p4));
}
template<class I, class P1, class P2, class P3, class P4, class P5>
@@ -1156,7 +1225,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4, class P5>
I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- return I::cast(NewUncasted<I>(p1, p2, p3, p4, p5));
+ return I::New(zone(), context(), p1, p2, p3, p4, p5);
}
template<class I, class P1, class P2, class P3, class P4, class P5>
@@ -1166,7 +1235,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4, class P5>
I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- return I::cast(AddUncasted<I>(p1, p2, p3, p4, p5));
+ return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5));
}
template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
@@ -1176,7 +1245,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
- return I::cast(NewUncasted<I>(p1, p2, p3, p4, p5, p6));
+ return I::New(zone(), context(), p1, p2, p3, p4, p5, p6);
}
template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
@@ -1186,7 +1255,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
- return I::cast(AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6)));
+ return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6));
}
template<class I, class P1, class P2, class P3, class P4,
@@ -1198,7 +1267,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4,
class P5, class P6, class P7>
I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
- return I::cast(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7));
+ return I::New(zone(), context(), p1, p2, p3, p4, p5, p6, p7);
}
template<class I, class P1, class P2, class P3,
@@ -1210,8 +1279,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3,
class P4, class P5, class P6, class P7>
I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
- return I::cast(AddInstruction(NewUncasted<I>(p1, p2, p3, p4,
- p5, p6, p7)));
+ return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7));
}
template<class I, class P1, class P2, class P3, class P4,
@@ -1224,7 +1292,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4,
class P5, class P6, class P7, class P8>
I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
- return I::cast(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8));
+ return I::New(zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8);
}
template<class I, class P1, class P2, class P3, class P4,
@@ -1237,14 +1305,11 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4,
class P5, class P6, class P7, class P8>
I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
- return I::cast(
- AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8)));
+ return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7, p8));
}
void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
- int position() const { return position_; }
-
protected:
virtual bool BuildGraph() = 0;
@@ -1253,6 +1318,7 @@ class HGraphBuilder {
HValue* BuildCheckHeapObject(HValue* object);
HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
+ HValue* BuildCheckString(HValue* string);
HValue* BuildWrapReceiver(HValue* object, HValue* function);
// Building common constructs
@@ -1261,7 +1327,8 @@ class HGraphBuilder {
ElementsKind kind,
HValue* length,
HValue* key,
- bool is_js_array);
+ bool is_js_array,
+ PropertyAccessType access_type);
HValue* BuildCopyElementsOnWrite(HValue* object,
HValue* elements,
@@ -1274,7 +1341,43 @@ class HGraphBuilder {
ElementsKind to_kind,
bool is_jsarray);
- HValue* BuildNumberToString(HValue* object, Handle<Type> type);
+ HValue* BuildNumberToString(HValue* object, Type* type);
+
+ HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
+ HValue* key);
+
+ HValue* BuildRegExpConstructResult(HValue* length,
+ HValue* index,
+ HValue* input);
+
+ // Allocates a new object according with the given allocation properties.
+ HAllocate* BuildAllocate(HValue* object_size,
+ HType type,
+ InstanceType instance_type,
+ HAllocationMode allocation_mode);
+ // Computes the sum of two string lengths, taking care of overflow handling.
+ HValue* BuildAddStringLengths(HValue* left_length, HValue* right_length);
+ // Creates a cons string using the two input strings.
+ HValue* BuildCreateConsString(HValue* length,
+ HValue* left,
+ HValue* right,
+ HAllocationMode allocation_mode);
+ // Copies characters from one sequential string to another.
+ void BuildCopySeqStringChars(HValue* src,
+ HValue* src_offset,
+ String::Encoding src_encoding,
+ HValue* dst,
+ HValue* dst_offset,
+ String::Encoding dst_encoding,
+ HValue* length);
+ // Both operands are non-empty strings.
+ HValue* BuildUncheckedStringAdd(HValue* left,
+ HValue* right,
+ HAllocationMode allocation_mode);
+ // Add two strings using allocation mode, validating type feedback.
+ HValue* BuildStringAdd(HValue* left,
+ HValue* right,
+ HAllocationMode allocation_mode);
HInstruction* BuildUncheckedMonomorphicElementAccess(
HValue* checked_object,
@@ -1282,7 +1385,7 @@ class HGraphBuilder {
HValue* val,
bool is_js_array,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode);
@@ -1292,13 +1395,24 @@ class HGraphBuilder {
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE);
- HLoadNamedField* BuildLoadNamedField(HValue* object, HObjectAccess access);
- HInstruction* AddLoadNamedField(HValue* object, HObjectAccess access);
- HInstruction* BuildLoadStringLength(HValue* object, HValue* checked_value);
- HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map>);
+ HInstruction* AddLoadStringInstanceType(HValue* string);
+ HInstruction* AddLoadStringLength(HValue* string);
+ HStoreNamedField* AddStoreMapNoWriteBarrier(HValue* object, HValue* map) {
+ HStoreNamedField* store_map = Add<HStoreNamedField>(
+ object, HObjectAccess::ForMap(), map);
+ store_map->SkipWriteBarrier();
+ return store_map;
+ }
+ HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map> map);
+ HStoreNamedField* AddStoreMapConstantNoWriteBarrier(HValue* object,
+ Handle<Map> map) {
+ HStoreNamedField* store_map = AddStoreMapConstant(object, map);
+ store_map->SkipWriteBarrier();
+ return store_map;
+ }
HLoadNamedField* AddLoadElements(HValue* object);
bool MatchRotateRight(HValue* left,
@@ -1306,24 +1420,23 @@ class HGraphBuilder {
HValue** operand,
HValue** shift_amount);
- HInstruction* BuildBinaryOperation(Token::Value op,
- HValue* left,
- HValue* right,
- Handle<Type> left_type,
- Handle<Type> right_type,
- Handle<Type> result_type,
- Maybe<int> fixed_right_arg,
- bool binop_stub = false);
+ HValue* BuildBinaryOperation(Token::Value op,
+ HValue* left,
+ HValue* right,
+ Type* left_type,
+ Type* right_type,
+ Type* result_type,
+ Maybe<int> fixed_right_arg,
+ HAllocationMode allocation_mode);
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin);
- HValue* EnforceNumberType(HValue* number, Handle<Type> expected);
- HValue* TruncateToNumber(HValue* value, Handle<Type>* expected);
+ HValue* EnforceNumberType(HValue* number, Type* expected);
+ HValue* TruncateToNumber(HValue* value, Type** expected);
- void FinishExitWithHardDeoptimization(const char* reason,
- HBasicBlock* continuation);
+ void FinishExitWithHardDeoptimization(const char* reason);
void AddIncrementCounter(StatsCounter* counter);
@@ -1467,6 +1580,10 @@ class HGraphBuilder {
void End();
void Deopt(const char* reason);
+ void ThenDeopt(const char* reason) {
+ Then();
+ Deopt(reason);
+ }
void ElseDeopt(const char* reason) {
Else();
Deopt(reason);
@@ -1479,21 +1596,41 @@ class HGraphBuilder {
HGraphBuilder* builder() const { return builder_; }
+ void AddMergeAtJoinBlock(bool deopt);
+
+ void Finish();
+ void Finish(HBasicBlock** then_continuation,
+ HBasicBlock** else_continuation);
+
+ class MergeAtJoinBlock : public ZoneObject {
+ public:
+ MergeAtJoinBlock(HBasicBlock* block,
+ bool deopt,
+ MergeAtJoinBlock* next)
+ : block_(block),
+ deopt_(deopt),
+ next_(next) {}
+ HBasicBlock* block_;
+ bool deopt_;
+ MergeAtJoinBlock* next_;
+ };
+
HGraphBuilder* builder_;
bool finished_ : 1;
- bool deopt_then_ : 1;
- bool deopt_else_ : 1;
bool did_then_ : 1;
bool did_else_ : 1;
+ bool did_else_if_ : 1;
bool did_and_ : 1;
bool did_or_ : 1;
bool captured_ : 1;
bool needs_compare_ : 1;
+ bool pending_merge_block_ : 1;
HBasicBlock* first_true_block_;
- HBasicBlock* last_true_block_;
HBasicBlock* first_false_block_;
HBasicBlock* split_edge_merge_block_;
- HBasicBlock* merge_block_;
+ MergeAtJoinBlock* merge_at_join_blocks_;
+ int normal_merge_at_join_block_count_;
+ int deopt_merge_at_join_block_count_;
};
class LoopBuilder V8_FINAL {
@@ -1557,12 +1694,20 @@ class HGraphBuilder {
JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
- HValue* constructor_function);
+ HValue* constructor_function = NULL);
+
+ enum FillMode {
+ DONT_FILL_WITH_HOLE,
+ FILL_WITH_HOLE
+ };
+
+ ElementsKind kind() { return kind_; }
HValue* AllocateEmptyArray();
HValue* AllocateArray(HValue* capacity, HValue* length_field,
- bool fill_with_hole);
+ FillMode fill_mode = FILL_WITH_HOLE);
HValue* GetElementsLocation() { return elements_location_; }
+ HValue* EmitMapCode();
private:
Zone* zone() const { return builder_->zone(); }
@@ -1576,12 +1721,12 @@ class HGraphBuilder {
return JSArray::kPreallocatedArrayElements;
}
- HValue* EmitMapCode();
HValue* EmitInternalMapCode();
HValue* EstablishEmptyArrayAllocationSize();
HValue* EstablishAllocationSize(HValue* length_node);
HValue* AllocateArray(HValue* size_in_bytes, HValue* capacity,
- HValue* length_field, bool fill_with_hole);
+ HValue* length_field,
+ FillMode fill_mode = FILL_WITH_HOLE);
HGraphBuilder* builder_;
ElementsKind kind_;
@@ -1591,6 +1736,9 @@ class HGraphBuilder {
HInnerAllocatedObject* elements_location_;
};
+ HValue* BuildAllocateArrayFromLength(JSArrayBuilder* array_builder,
+ HValue* length_argument);
+
HValue* BuildAllocateElements(ElementsKind kind,
HValue* capacity);
@@ -1637,44 +1785,84 @@ class HGraphBuilder {
ElementsKind kind,
int length);
+ HValue* BuildElementIndexHash(HValue* index);
+
void BuildCompareNil(
HValue* value,
- Handle<Type> type,
+ Type* type,
HIfContinuation* continuation);
- HValue* BuildCreateAllocationMemento(HValue* previous_object,
- int previous_object_size,
- HValue* payload);
+ void BuildCreateAllocationMemento(HValue* previous_object,
+ HValue* previous_object_size,
+ HValue* payload);
HInstruction* BuildConstantMapCheck(Handle<JSObject> constant,
CompilationInfo* info);
HInstruction* BuildCheckPrototypeMaps(Handle<JSObject> prototype,
Handle<JSObject> holder);
+ HInstruction* BuildGetNativeContext(HValue* closure);
HInstruction* BuildGetNativeContext();
HInstruction* BuildGetArrayFunction();
protected:
void SetSourcePosition(int position) {
ASSERT(position != RelocInfo::kNoPosition);
+ position_.set_position(position - start_position_);
+ }
+
+ void EnterInlinedSource(int start_position, int id) {
+ if (FLAG_hydrogen_track_positions) {
+ start_position_ = start_position;
+ position_.set_inlining_id(id);
+ }
+ }
+
+ // Convert the given absolute offset from the start of the script to
+ // the HSourcePosition assuming that this position corresponds to the
+ // same function as current position_.
+ HSourcePosition ScriptPositionToSourcePosition(int position) {
+ HSourcePosition pos = position_;
+ pos.set_position(position - start_position_);
+ return pos;
+ }
+
+ HSourcePosition source_position() { return position_; }
+ void set_source_position(HSourcePosition position) {
position_ = position;
}
+ template <typename ViewClass>
+ void BuildArrayBufferViewInitialization(HValue* obj,
+ HValue* buffer,
+ HValue* byte_offset,
+ HValue* byte_length);
+
private:
HGraphBuilder();
- void PadEnvironmentForContinuation(HBasicBlock* from,
- HBasicBlock* continuation);
+ HValue* BuildUncheckedDictionaryElementLoadHelper(
+ HValue* elements,
+ HValue* key,
+ HValue* hash,
+ HValue* mask,
+ int current_probe);
+
+ template <class I>
+ I* AddInstructionTyped(I* instr) {
+ return I::cast(AddInstruction(instr));
+ }
CompilationInfo* info_;
HGraph* graph_;
HBasicBlock* current_block_;
- int position_;
+ HSourcePosition position_;
+ int start_position_;
};
template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
+inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
const char* reason, Deoptimizer::BailoutType type) {
if (type == Deoptimizer::SOFT) {
isolate()->counters()->soft_deopts_requested()->Increment();
@@ -1694,14 +1882,14 @@ inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
template<>
-inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
+inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
const char* reason, Deoptimizer::BailoutType type) {
- return static_cast<HDeoptimize*>(AddUncasted<HDeoptimize>(reason, type));
+ return Add<HDeoptimize>(reason, type);
}
template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(
+inline HSimulate* HGraphBuilder::Add<HSimulate>(
BailoutId id,
RemovableSimulate removable) {
HSimulate* instr = current_block()->CreateSimulate(id, removable);
@@ -1711,13 +1899,20 @@ inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(
template<>
+inline HSimulate* HGraphBuilder::Add<HSimulate>(
+ BailoutId id) {
+ return Add<HSimulate>(id, FIXED_SIMULATE);
+}
+
+
+template<>
inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(BailoutId id) {
- return AddUncasted<HSimulate>(id, FIXED_SIMULATE);
+ return Add<HSimulate>(id, FIXED_SIMULATE);
}
template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HValue* value) {
+inline HReturn* HGraphBuilder::Add<HReturn>(HValue* value) {
int num_parameters = graph()->info()->num_parameters();
HValue* params = AddUncasted<HConstant>(num_parameters);
HReturn* return_instruction = New<HReturn>(value, params);
@@ -1727,13 +1922,24 @@ inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HValue* value) {
template<>
+inline HReturn* HGraphBuilder::Add<HReturn>(HConstant* value) {
+ return Add<HReturn>(static_cast<HValue*>(value));
+}
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HValue* value) {
+ return Add<HReturn>(value);
+}
+
+
+template<>
inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HConstant* value) {
- return AddUncasted<HReturn>(static_cast<HValue*>(value));
+ return Add<HReturn>(value);
}
template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HCallRuntime>(
+inline HCallRuntime* HGraphBuilder::Add<HCallRuntime>(
Handle<String> name,
const Runtime::Function* c_function,
int argument_count) {
@@ -1750,11 +1956,25 @@ inline HInstruction* HGraphBuilder::AddUncasted<HCallRuntime>(
template<>
-inline HInstruction* HGraphBuilder::NewUncasted<HContext>() {
+inline HInstruction* HGraphBuilder::AddUncasted<HCallRuntime>(
+ Handle<String> name,
+ const Runtime::Function* c_function,
+ int argument_count) {
+ return Add<HCallRuntime>(name, c_function, argument_count);
+}
+
+
+template<>
+inline HContext* HGraphBuilder::New<HContext>() {
return HContext::New(zone());
}
+template<>
+inline HInstruction* HGraphBuilder::NewUncasted<HContext>() {
+ return New<HContext>();
+}
+
class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
public:
// A class encapsulating (lazily-allocated) break and continue blocks for
@@ -2005,6 +2225,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
// Remove the arguments from the bailout environment and emit instructions
// to push them as outgoing parameters.
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
+ void PushArgumentsFromEnvironment(int count);
void SetUpScope(Scope* scope);
virtual void VisitStatements(ZoneList<Statement*>* statements) V8_OVERRIDE;
@@ -2013,6 +2234,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
+ Type* ToType(Handle<Map> map) { return IC::MapToType<Type>(map, zone()); }
+
private:
// Helpers for flow graph construction.
enum GlobalPropertyAccess {
@@ -2021,7 +2244,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
};
GlobalPropertyAccess LookupGlobalProperty(Variable* var,
LookupResult* lookup,
- bool is_store);
+ PropertyAccessType access_type);
void EnsureArgumentsArePushedForAccess();
bool TryArgumentsAccess(Property* expr);
@@ -2029,21 +2252,26 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
// Try to optimize fun.apply(receiver, arguments) pattern.
bool TryCallApply(Call* expr);
+ HValue* ImplicitReceiverFor(HValue* function,
+ Handle<JSFunction> target);
+
int InliningAstSize(Handle<JSFunction> target);
- bool TryInline(CallKind call_kind,
- Handle<JSFunction> target,
+ bool TryInline(Handle<JSFunction> target,
int arguments_count,
HValue* implicit_return_value,
BailoutId ast_id,
BailoutId return_id,
- InliningKind inlining_kind);
+ InliningKind inlining_kind,
+ HSourcePosition position);
- bool TryInlineCall(Call* expr, bool drop_extra = false);
+ bool TryInlineCall(Call* expr);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
bool TryInlineGetter(Handle<JSFunction> getter,
+ Handle<Map> receiver_map,
BailoutId ast_id,
BailoutId return_id);
bool TryInlineSetter(Handle<JSFunction> setter,
+ Handle<Map> receiver_map,
BailoutId id,
BailoutId assignment_id,
HValue* implicit_return_value);
@@ -2052,9 +2280,30 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
int arguments_count);
bool TryInlineBuiltinMethodCall(Call* expr,
HValue* receiver,
- Handle<Map> receiver_map,
- CheckType check_type);
- bool TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra);
+ Handle<Map> receiver_map);
+ bool TryInlineBuiltinFunctionCall(Call* expr);
+ enum ApiCallType {
+ kCallApiFunction,
+ kCallApiMethod,
+ kCallApiGetter,
+ kCallApiSetter
+ };
+ bool TryInlineApiMethodCall(Call* expr,
+ HValue* receiver,
+ SmallMapList* receiver_types);
+ bool TryInlineApiFunctionCall(Call* expr, HValue* receiver);
+ bool TryInlineApiGetter(Handle<JSFunction> function,
+ Handle<Map> receiver_map,
+ BailoutId ast_id);
+ bool TryInlineApiSetter(Handle<JSFunction> function,
+ Handle<Map> receiver_map,
+ BailoutId ast_id);
+ bool TryInlineApiCall(Handle<JSFunction> function,
+ HValue* receiver,
+ SmallMapList* receiver_maps,
+ int argc,
+ BailoutId ast_id,
+ ApiCallType call_type);
// If --trace-inlining, print a line of the inlining trace. Inlining
// succeeded if the reason string is NULL and failed if there is a
@@ -2069,24 +2318,38 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicLoadNamedField(BailoutId ast_id,
- BailoutId return_id,
- HValue* object,
- SmallMapList* types,
- Handle<String> name);
+ void HandlePolymorphicNamedFieldAccess(PropertyAccessType access_type,
+ BailoutId ast_id,
+ BailoutId return_id,
+ HValue* object,
+ HValue* value,
+ SmallMapList* types,
+ Handle<String> name);
+
+ void VisitTypedArrayInitialize(CallRuntime* expr);
+
+ bool IsCallNewArrayInlineable(CallNew* expr);
+ void BuildInlinedCallNewArray(CallNew* expr);
+
+ void VisitDataViewInitialize(CallRuntime* expr);
class PropertyAccessInfo {
public:
- PropertyAccessInfo(Isolate* isolate, Handle<Map> map, Handle<String> name)
- : lookup_(isolate),
- map_(map),
+ PropertyAccessInfo(HOptimizedGraphBuilder* builder,
+ PropertyAccessType access_type,
+ Type* type,
+ Handle<String> name)
+ : lookup_(builder->isolate()),
+ builder_(builder),
+ access_type_(access_type),
+ type_(type),
name_(name),
access_(HObjectAccess::ForMap()) { }
// Checkes whether this PropertyAccessInfo can be handled as a monomorphic
// load named. It additionally fills in the fields necessary to generate the
// lookup code.
- bool CanLoadMonomorphic();
+ bool CanAccessMonomorphic();
// Checks whether all types behave uniform when loading name. If all maps
// behave the same, a single monomorphic load instruction can be emitted,
@@ -2094,56 +2357,69 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
// an instance of any of the types.
// This method skips the first type in types, assuming that this
// PropertyAccessInfo is built for types->first().
- bool CanLoadAsMonomorphic(SmallMapList* types);
+ bool CanAccessAsMonomorphic(SmallMapList* types);
+
+ Handle<Map> map() {
+ if (type_->Is(Type::Number())) {
+ Context* context = current_info()->closure()->context();
+ context = context->native_context();
+ return handle(context->number_function()->initial_map());
+ } else if (type_->Is(Type::Boolean())) {
+ Context* context = current_info()->closure()->context();
+ context = context->native_context();
+ return handle(context->boolean_function()->initial_map());
+ } else if (type_->Is(Type::String())) {
+ Context* context = current_info()->closure()->context();
+ context = context->native_context();
+ return handle(context->string_function()->initial_map());
+ } else {
+ return type_->AsClass();
+ }
+ }
+ Type* type() const { return type_; }
+ Handle<String> name() const { return name_; }
bool IsJSObjectFieldAccessor() {
int offset; // unused
- return Accessors::IsJSObjectFieldAccessor(map_, name_, &offset);
+ return Accessors::IsJSObjectFieldAccessor<Type>(type_, name_, &offset);
}
bool GetJSObjectFieldAccess(HObjectAccess* access) {
- if (IsStringLength()) {
- *access = HObjectAccess::ForStringLength();
- return true;
- } else if (IsArrayLength()) {
- *access = HObjectAccess::ForArrayLength(map_->elements_kind());
- return true;
- } else {
- int offset;
- if (Accessors::IsJSObjectFieldAccessor(map_, name_, &offset)) {
- *access = HObjectAccess::ForJSObjectOffset(offset);
- return true;
+ int offset;
+ if (Accessors::IsJSObjectFieldAccessor<Type>(type_, name_, &offset)) {
+ if (type_->Is(Type::String())) {
+ ASSERT(name_->Equals(isolate()->heap()->length_string()));
+ *access = HObjectAccess::ForStringLength();
+ } else if (type_->Is(Type::Array())) {
+ ASSERT(name_->Equals(isolate()->heap()->length_string()));
+ *access = HObjectAccess::ForArrayLength(map()->elements_kind());
+ } else {
+ *access = HObjectAccess::ForMapAndOffset(map(), offset);
}
- return false;
+ return true;
}
+ return false;
}
bool has_holder() { return !holder_.is_null(); }
+ bool IsLoad() const { return access_type_ == LOAD; }
LookupResult* lookup() { return &lookup_; }
- Handle<Map> map() { return map_; }
Handle<JSObject> holder() { return holder_; }
Handle<JSFunction> accessor() { return accessor_; }
Handle<Object> constant() { return constant_; }
+ Handle<Map> transition() { return handle(lookup_.GetTransitionTarget()); }
HObjectAccess access() { return access_; }
private:
+ Type* ToType(Handle<Map> map) { return builder_->ToType(map); }
Isolate* isolate() { return lookup_.isolate(); }
-
- bool IsStringLength() {
- return map_->instance_type() < FIRST_NONSTRING_TYPE &&
- name_->Equals(isolate()->heap()->length_string());
- }
-
- bool IsArrayLength() {
- return map_->instance_type() == JS_ARRAY_TYPE &&
- name_->Equals(isolate()->heap()->length_string());
- }
+ CompilationInfo* current_info() { return builder_->current_info(); }
bool LoadResult(Handle<Map> map);
bool LookupDescriptor();
bool LookupInPrototypes();
- bool IsCompatibleForLoad(PropertyAccessInfo* other);
+ bool IsCompatible(PropertyAccessInfo* other);
void GeneralizeRepresentation(Representation r) {
access_ = access_.WithRepresentation(
@@ -2151,39 +2427,38 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
}
LookupResult lookup_;
- Handle<Map> map_;
+ HOptimizedGraphBuilder* builder_;
+ PropertyAccessType access_type_;
+ Type* type_;
Handle<String> name_;
Handle<JSObject> holder_;
Handle<JSFunction> accessor_;
+ Handle<JSObject> api_holder_;
Handle<Object> constant_;
HObjectAccess access_;
};
- HInstruction* BuildLoadMonomorphic(PropertyAccessInfo* info,
- HValue* object,
- HInstruction* checked_object,
- BailoutId ast_id,
- BailoutId return_id,
- bool can_inline_accessor = true);
-
- void HandlePolymorphicStoreNamedField(BailoutId assignment_id,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name);
- bool TryStorePolymorphicAsMonomorphic(BailoutId assignment_id,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name);
+ HInstruction* BuildMonomorphicAccess(PropertyAccessInfo* info,
+ HValue* object,
+ HValue* checked_object,
+ HValue* value,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool can_inline_accessor = true);
+
+ HInstruction* BuildNamedAccess(PropertyAccessType access,
+ BailoutId ast_id,
+ BailoutId reutrn_id,
+ Expression* expr,
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ bool is_uninitialized = false);
+
void HandlePolymorphicCallNamed(Call* expr,
HValue* receiver,
SmallMapList* types,
Handle<String> name);
- bool TryCallPolymorphicAsMonomorphic(Call* expr,
- HValue* receiver,
- SmallMapList* types,
- Handle<String> name);
void HandleLiteralCompareTypeof(CompareOperation* expr,
Expression* sub_expr,
Handle<String> check);
@@ -2191,15 +2466,37 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
Expression* sub_expr,
NilValue nil);
+ enum PushBeforeSimulateBehavior {
+ PUSH_BEFORE_SIMULATE,
+ NO_PUSH_BEFORE_SIMULATE
+ };
+
+ HControlInstruction* BuildCompareInstruction(
+ Token::Value op,
+ HValue* left,
+ HValue* right,
+ Type* left_type,
+ Type* right_type,
+ Type* combined_type,
+ HSourcePosition left_position,
+ HSourcePosition right_position,
+ PushBeforeSimulateBehavior push_sim_result,
+ BailoutId bailout_id);
+
HInstruction* BuildStringCharCodeAt(HValue* string,
HValue* index);
- HInstruction* BuildBinaryOperation(BinaryOperation* expr,
- HValue* left,
- HValue* right);
+
+ HValue* BuildBinaryOperation(
+ BinaryOperation* expr,
+ HValue* left,
+ HValue* right,
+ PushBeforeSimulateBehavior push_sim_result);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
- HInstruction* BuildLoadKeyedGeneric(HValue* object,
- HValue* key);
+ HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
+ HValue* object,
+ HValue* key,
+ HValue* value);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
HValue* key,
@@ -2213,14 +2510,14 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* val,
HValue* dependency,
Handle<Map> map,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode);
HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
SmallMapList* maps,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode,
bool* has_side_effects);
@@ -2228,12 +2525,14 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* key,
HValue* val,
Expression* expr,
- bool is_store,
+ PropertyAccessType access_type,
bool* has_side_effects);
- HInstruction* BuildLoadNamedGeneric(HValue* object,
- Handle<String> name,
- Property* expr);
+ HInstruction* BuildNamedGeneric(PropertyAccessType access,
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ bool is_uninitialized = false);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
@@ -2257,28 +2556,18 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
BailoutId return_id,
bool is_uninitialized = false);
- HInstruction* BuildStoreNamedField(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map,
- LookupResult* lookup);
- HInstruction* BuildStoreNamedGeneric(HValue* object,
- Handle<String> name,
- HValue* value);
- HInstruction* BuildStoreNamedMonomorphic(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map);
- HInstruction* BuildStoreKeyedGeneric(HValue* object,
- HValue* key,
- HValue* value);
+ HInstruction* BuildLoadNamedField(PropertyAccessInfo* info,
+ HValue* checked_object);
+ HInstruction* BuildStoreNamedField(PropertyAccessInfo* info,
+ HValue* checked_object,
+ HValue* value);
HValue* BuildContextChainWalk(Variable* var);
HInstruction* BuildThisFunction();
HInstruction* BuildFastLiteral(Handle<JSObject> boilerplate_object,
- AllocationSiteContext* site_context);
+ AllocationSiteUsageContext* site_context);
void BuildEmitObjectHeader(Handle<JSObject> boilerplate_object,
HInstruction* object);
@@ -2289,12 +2578,13 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
HInstruction* object,
- AllocationSiteContext* site_context);
+ AllocationSiteUsageContext* site_context,
+ PretenureFlag pretenure_flag);
void BuildEmitElements(Handle<JSObject> boilerplate_object,
Handle<FixedArrayBase> elements,
HValue* object_elements,
- AllocationSiteContext* site_context);
+ AllocationSiteUsageContext* site_context);
void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
ElementsKind kind,
@@ -2303,14 +2593,21 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void BuildEmitFixedArray(Handle<FixedArrayBase> elements,
ElementsKind kind,
HValue* object_elements,
- AllocationSiteContext* site_context);
+ AllocationSiteUsageContext* site_context);
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
- void AddCheckConstantFunction(Handle<JSObject> holder,
- HValue* receiver,
- Handle<Map> receiver_map);
+ HInstruction* NewPlainFunctionCall(HValue* fun,
+ int argument_count,
+ bool pass_argument_count);
+
+ HInstruction* NewArgumentAdaptorCall(HValue* fun, HValue* context,
+ int argument_count,
+ HValue* expected_param_count);
+
+ HInstruction* BuildCallConstantFunction(Handle<JSFunction> target,
+ int argument_count);
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index dbff6e5f52..5c97c6b8ee 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -257,7 +257,8 @@ Handle<ObjectTemplateInfo> GetEternal(Isolate* isolate) {
return Handle<ObjectTemplateInfo>::cast(
isolate->eternal_handles()->GetSingleton(field));
}
- v8::Local<v8::ObjectTemplate> raw_template(v8::ObjectTemplate::New());
+ v8::Local<v8::ObjectTemplate> raw_template =
+ v8::ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate));
raw_template->SetInternalFieldCount(internal_fields);
return Handle<ObjectTemplateInfo>::cast(
isolate->eternal_handles()->CreateSingleton(
@@ -864,15 +865,24 @@ icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
}
-void DateFormat::DeleteDateFormat(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param) {
- // First delete the hidden C++ object.
- delete reinterpret_cast<icu::SimpleDateFormat*>(Handle<JSObject>::cast(
- v8::Utils::OpenPersistent(object))->GetInternalField(0));
+template<class T>
+void DeleteNativeObjectAt(const v8::WeakCallbackData<v8::Value, void>& data,
+ int index) {
+ v8::Local<v8::Object> obj = v8::Handle<v8::Object>::Cast(data.GetValue());
+ delete reinterpret_cast<T*>(obj->GetAlignedPointerFromInternalField(index));
+}
+
+
+static void DestroyGlobalHandle(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
+}
+
- // Then dispose of the persistent handle to JS object.
- object->Dispose();
+void DateFormat::DeleteDateFormat(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ DeleteNativeObjectAt<icu::SimpleDateFormat>(data, 0);
+ DestroyGlobalHandle(data);
}
@@ -928,15 +938,10 @@ icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
}
-void NumberFormat::DeleteNumberFormat(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param) {
- // First delete the hidden C++ object.
- delete reinterpret_cast<icu::DecimalFormat*>(Handle<JSObject>::cast(
- v8::Utils::OpenPersistent(object))->GetInternalField(0));
-
- // Then dispose of the persistent handle to JS object.
- object->Dispose();
+void NumberFormat::DeleteNumberFormat(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ DeleteNativeObjectAt<icu::DecimalFormat>(data, 0);
+ DestroyGlobalHandle(data);
}
@@ -989,15 +994,10 @@ icu::Collator* Collator::UnpackCollator(Isolate* isolate,
}
-void Collator::DeleteCollator(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param) {
- // First delete the hidden C++ object.
- delete reinterpret_cast<icu::Collator*>(Handle<JSObject>::cast(
- v8::Utils::OpenPersistent(object))->GetInternalField(0));
-
- // Then dispose of the persistent handle to JS object.
- object->Dispose();
+void Collator::DeleteCollator(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ DeleteNativeObjectAt<icu::Collator>(data, 0);
+ DestroyGlobalHandle(data);
}
@@ -1053,18 +1053,11 @@ icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
}
-void BreakIterator::DeleteBreakIterator(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param) {
- // First delete the hidden C++ object.
- delete reinterpret_cast<icu::BreakIterator*>(Handle<JSObject>::cast(
- v8::Utils::OpenPersistent(object))->GetInternalField(0));
-
- delete reinterpret_cast<icu::UnicodeString*>(Handle<JSObject>::cast(
- v8::Utils::OpenPersistent(object))->GetInternalField(1));
-
- // Then dispose of the persistent handle to JS object.
- object->Dispose();
+void BreakIterator::DeleteBreakIterator(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ DeleteNativeObjectAt<icu::BreakIterator>(data, 0);
+ DeleteNativeObjectAt<icu::UnicodeString>(data, 1);
+ DestroyGlobalHandle(data);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/i18n.h b/deps/v8/src/i18n.h
index 08e7f2b713..50beb49ba1 100644
--- a/deps/v8/src/i18n.h
+++ b/deps/v8/src/i18n.h
@@ -71,9 +71,9 @@ class DateFormat {
// Release memory we allocated for the DateFormat once the JS object that
// holds the pointer gets garbage collected.
- static void DeleteDateFormat(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param);
+ static void DeleteDateFormat(
+ const v8::WeakCallbackData<v8::Value, void>& data);
+
private:
DateFormat();
};
@@ -95,9 +95,9 @@ class NumberFormat {
// Release memory we allocated for the NumberFormat once the JS object that
// holds the pointer gets garbage collected.
- static void DeleteNumberFormat(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param);
+ static void DeleteNumberFormat(
+ const v8::WeakCallbackData<v8::Value, void>& data);
+
private:
NumberFormat();
};
@@ -118,9 +118,9 @@ class Collator {
// Release memory we allocated for the Collator once the JS object that holds
// the pointer gets garbage collected.
- static void DeleteCollator(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param);
+ static void DeleteCollator(
+ const v8::WeakCallbackData<v8::Value, void>& data);
+
private:
Collator();
};
@@ -141,9 +141,8 @@ class BreakIterator {
// Release memory we allocated for the BreakIterator once the JS object that
// holds the pointer gets garbage collected.
- static void DeleteBreakIterator(v8::Isolate* isolate,
- Persistent<v8::Value>* object,
- void* param);
+ static void DeleteBreakIterator(
+ const v8::WeakCallbackData<v8::Value, void>& data);
private:
BreakIterator();
diff --git a/deps/v8/src/i18n.js b/deps/v8/src/i18n.js
index a64c7e6784..2e5485749a 100644
--- a/deps/v8/src/i18n.js
+++ b/deps/v8/src/i18n.js
@@ -45,6 +45,11 @@ var AVAILABLE_SERVICES = ['collator',
'dateformat',
'breakiterator'];
+var NORMALIZATION_FORMS = ['NFC',
+ 'NFD',
+ 'NFKC',
+ 'NFKD'];
+
/**
* Caches available locales for each service.
*/
@@ -1302,10 +1307,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
*/
function formatNumber(formatter, value) {
// Spec treats -0 and +0 as 0.
- var number = $Number(value);
- if (number === -0) {
- number = 0;
- }
+ var number = $Number(value) + 0;
return %InternalNumberFormat(formatter.formatter, number);
}
@@ -1990,6 +1992,40 @@ $Object.defineProperty($String.prototype, 'localeCompare', {
/**
+ * Unicode normalization. This method is called with one argument that
+ * specifies the normalization form.
+ * If none is specified, "NFC" is assumed.
+ * If the form is not one of "NFC", "NFD", "NFKC", or "NFKD", then throw
+ * a RangeError Exception.
+ */
+$Object.defineProperty($String.prototype, 'normalize', {
+ value: function(that) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
+
+ var form = $String(%_Arguments(0) || 'NFC');
+
+ var normalizationForm = NORMALIZATION_FORMS.indexOf(form);
+ if (normalizationForm === -1) {
+ throw new $RangeError('The normalization form should be one of '
+ + NORMALIZATION_FORMS.join(', ') + '.');
+ }
+
+ return %StringNormalize(this, normalizationForm);
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($String.prototype.normalize, 'normalize');
+%FunctionRemovePrototype($String.prototype.normalize);
+%SetNativeFlag($String.prototype.normalize);
+
+
+/**
* Formats a Number object (this) using locale and options values.
* If locale or options are omitted, defaults are used.
*/
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 05cc23a71d..ee5d991e38 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -125,12 +125,6 @@ Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
}
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return &Memory::Object_at(pc_);
-}
-
-
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
@@ -145,9 +139,9 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
}
-Address* RelocInfo::target_reference_address() {
+Address RelocInfo::target_reference() {
ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
+ return Memory::Address_at(pc_);
}
@@ -249,6 +243,18 @@ Object** RelocInfo::call_object_address() {
}
+void RelocInfo::WipeOut() {
+ if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
+ Memory::Address_at(pc_) = NULL;
+ } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
+ // Effectively write zero into the relocation.
+ Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
bool RelocInfo::IsPatchedReturnSequence() {
return *pc_ == kCallOpcode;
}
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 0557ed8853..733432028a 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -89,8 +89,6 @@ const char* IntelDoubleRegister::AllocationIndexToString(int index) {
}
-// The Probe method needs executable memory, so it uses Heap::CreateCode.
-// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() {
ASSERT(!initialized_);
ASSERT(supported_ == 0);
@@ -553,6 +551,16 @@ void Assembler::mov_w(const Operand& dst, Register src) {
}
+void Assembler::mov_w(const Operand& dst, int16_t imm16) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ EMIT(static_cast<int8_t>(imm16 & 0xff));
+ EMIT(static_cast<int8_t>(imm16 >> 8));
+}
+
+
void Assembler::mov(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
@@ -2059,7 +2067,26 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+void Assembler::andps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::orps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::xorps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x57);
@@ -2067,42 +2094,68 @@ void Assembler::xorps(XMMRegister dst, XMMRegister src) {
}
-void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+void Assembler::addps(XMMRegister dst, const Operand& src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- EMIT(0xF2);
EMIT(0x0F);
- EMIT(0x51);
+ EMIT(0x58);
emit_sse_operand(dst, src);
}
-void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+void Assembler::subps(XMMRegister dst, const Operand& src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- EMIT(0x66);
EMIT(0x0F);
- EMIT(0x54);
+ EMIT(0x5C);
emit_sse_operand(dst, src);
}
-void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+void Assembler::mulps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::andpd(XMMRegister dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
- EMIT(0x56);
+ EMIT(0x54);
emit_sse_operand(dst, src);
}
-void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
+void Assembler::orpd(XMMRegister dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
- EMIT(0x2E);
+ EMIT(0x56);
emit_sse_operand(dst, src);
}
@@ -2179,6 +2232,17 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
}
+void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
+ ASSERT(IsEnabled(SSE2));
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC6);
+ emit_sse_operand(dst, src);
+ EMIT(imm8);
+}
+
+
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2271,16 +2335,6 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
}
-void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x10);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::movss(const Operand& dst, XMMRegister src ) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2301,16 +2355,6 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
}
-void Assembler::movss(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x10);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::movd(XMMRegister dst, const Operand& src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2344,14 +2388,6 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
}
-void Assembler::andps(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x54);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index f46c6478db..6ed0bc6d66 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -638,13 +638,6 @@ class Assembler : public AssemblerBase {
set_target_address_at(instruction_payload, target);
}
- // This sets the branch destination (which is in the instruction on x86).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
static const int kSpecialTargetSize = kPointerSize;
// Distance between the address of the code target in the call instruction
@@ -735,6 +728,7 @@ class Assembler : public AssemblerBase {
void mov_w(Register dst, const Operand& src);
void mov_w(const Operand& dst, Register src);
+ void mov_w(const Operand& dst, int16_t imm16);
void mov(Register dst, int32_t imm32);
void mov(Register dst, const Immediate& x);
@@ -1018,11 +1012,30 @@ class Assembler : public AssemblerBase {
void cpuid();
// SSE instructions
- void andps(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
+ void movaps(XMMRegister dst, XMMRegister src);
+ void shufps(XMMRegister dst, XMMRegister src, byte imm8);
+
+ void andps(XMMRegister dst, const Operand& src);
+ void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); }
+ void xorps(XMMRegister dst, const Operand& src);
+ void xorps(XMMRegister dst, XMMRegister src) { xorps(dst, Operand(src)); }
+ void orps(XMMRegister dst, const Operand& src);
+ void orps(XMMRegister dst, XMMRegister src) { orps(dst, Operand(src)); }
+
+ void addps(XMMRegister dst, const Operand& src);
+ void addps(XMMRegister dst, XMMRegister src) { addps(dst, Operand(src)); }
+ void subps(XMMRegister dst, const Operand& src);
+ void subps(XMMRegister dst, XMMRegister src) { subps(dst, Operand(src)); }
+ void mulps(XMMRegister dst, const Operand& src);
+ void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); }
+ void divps(XMMRegister dst, const Operand& src);
+ void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); }
// SSE2 instructions
void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, XMMRegister src) {
+ cvttss2si(dst, Operand(src));
+ }
void cvttsd2si(Register dst, const Operand& src);
void cvtsd2si(Register dst, XMMRegister src);
@@ -1043,7 +1056,7 @@ class Assembler : public AssemblerBase {
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
- void ucomisd(XMMRegister dst, XMMRegister src);
+ void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void ucomisd(XMMRegister dst, const Operand& src);
enum RoundingMode {
@@ -1061,8 +1074,6 @@ class Assembler : public AssemblerBase {
void cmpltsd(XMMRegister dst, XMMRegister src);
void pcmpeqd(XMMRegister dst, XMMRegister src);
- void movaps(XMMRegister dst, XMMRegister src);
-
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
@@ -1079,14 +1090,14 @@ class Assembler : public AssemblerBase {
void movd(XMMRegister dst, const Operand& src);
void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
void movd(const Operand& dst, XMMRegister src);
- void movsd(XMMRegister dst, XMMRegister src);
+ void movsd(XMMRegister dst, XMMRegister src) { movsd(dst, Operand(src)); }
void movsd(XMMRegister dst, const Operand& src);
void movsd(const Operand& dst, XMMRegister src);
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
- void movss(XMMRegister dst, XMMRegister src);
+ void movss(XMMRegister dst, XMMRegister src) { movss(dst, Operand(src)); }
void extractps(Register dst, XMMRegister src, byte imm8);
void pand(XMMRegister dst, XMMRegister src);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 5a3fa78e33..d748d23622 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -32,6 +32,7 @@
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
@@ -74,19 +75,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function.
__ push(edi);
- // Push call kind information.
- __ push(ecx);
// Function is also the parameter to the runtime call.
__ push(edi);
__ CallRuntime(function_id, 1);
- // Restore call kind information.
- __ pop(ecx);
// Restore receiver.
__ pop(edi);
}
@@ -100,7 +97,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
}
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(eax);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
@@ -112,22 +115,14 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
- // Tail call to returned code.
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+ CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
- GenerateTailCallToSharedCode(masm);
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -369,13 +364,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ __ call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ NullCallWrapper());
}
// Store offset of return address for deoptimizer.
@@ -488,7 +481,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
} else {
ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ NullCallWrapper());
}
// Exit the internal frame. Notice that this also removes the empty.
@@ -509,19 +502,37 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyCompile);
- // Do a tail-call of the compiled function.
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
- // Do a tail-call of the compiled function.
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function.
+ __ push(edi);
+ // Function is also the parameter to the runtime call.
+ __ push(edi);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kCompileOptimized, 2);
+ // Restore receiver.
+ __ pop(edi);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
}
@@ -760,14 +771,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
__ jmp(&patch_receiver);
- // Use the global receiver object from the called function as the
- // receiver.
__ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(esi, kGlobalIndex));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, FieldOperand(ebx, kGlobalIndex));
+ __ mov(ebx,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -818,13 +824,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ push(edi); // re-add proxy object as additional argument
__ push(edx);
__ inc(eax);
- __ SetCallKind(ecx, CALL_AS_FUNCTION);
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&non_proxy);
- __ SetCallKind(ecx, CALL_AS_METHOD);
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -839,14 +843,12 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ SmiUntag(ebx);
- __ SetCallKind(ecx, CALL_AS_METHOD);
__ cmp(eax, ebx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
ParameterCount expected(0);
- __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
@@ -898,7 +900,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(ebx, Operand(ebp, kReceiverOffset));
// Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
+ Label push_receiver, use_global_receiver;
__ mov(edi, Operand(ebp, kFunctionOffset));
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &push_receiver);
@@ -908,7 +910,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Compute the receiver.
// Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
+ Label call_to_object;
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
@@ -939,13 +941,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(ebx, eax);
__ jmp(&push_receiver);
- // Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(esi, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
+ __ mov(ebx,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
@@ -979,7 +977,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ cmp(ecx, Operand(ebp, kLimitOffset));
__ j(not_equal, &loop);
- // Invoke the function.
+ // Call the function.
Label call_proxy;
__ mov(eax, ecx);
ParameterCount actual(eax);
@@ -987,18 +985,16 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(edi, Operand(ebp, kFunctionOffset));
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &call_proxy);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ ret(3 * kPointerSize); // remove this, receiver, and arguments
- // Invoke the function proxy.
+ // Call the function proxy.
__ bind(&call_proxy);
__ push(edi); // add function proxy as last argument
__ inc(eax);
__ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_METHOD);
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -1233,14 +1229,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
// -- ebx : expected number of arguments
- // -- ecx : call kind information
- // -- edx : code entry to call
+ // -- edi : function (passed through to callee)
// -----------------------------------
Label invoke, dont_adapt_arguments;
__ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
Label enough, too_few;
+ __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ cmp(eax, ebx);
__ j(less, &too_few);
__ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
@@ -1321,17 +1317,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Lookup and calculate pc offset.
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
- __ mov(ebx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ sub(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ sub(edx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
- __ SmiTag(edx);
-
- // Pass both function and pc offset as arguments.
+ // Pass function as argument.
__ push(eax);
- __ push(edx);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
Label skip;
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index b6bbe04b33..22709e41a0 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -54,6 +54,16 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
}
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edi };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -82,7 +92,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
}
@@ -100,8 +110,8 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { ebx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { ebx, edx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -118,6 +128,28 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ecx, ebx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -167,16 +199,22 @@ static void InitializeArrayConstructorDescriptor(
// register state
// eax -- number of arguments
// edi -- function
- // ebx -- type info cell with elements kind
- static Register registers[] = { edi, ebx };
- descriptor->register_param_count_ = 2;
+ // ebx -- allocation site with elements kind
+ static Register registers_variable_args[] = { edi, ebx, eax };
+ static Register registers_no_args[] = { edi, ebx };
- if (constant_stack_parameter_count != 0) {
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
descriptor->stack_parameter_count_ = eax;
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
@@ -190,15 +228,21 @@ static void InitializeInternalArrayConstructorDescriptor(
// register state
// eax -- number of arguments
// edi -- constructor function
- static Register registers[] = { edi };
- descriptor->register_param_count_ = 1;
+ static Register registers_variable_args[] = { edi, eax };
+ static Register registers_no_args[] = { edi };
- if (constant_stack_parameter_count != 0) {
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
descriptor->stack_parameter_count_ = eax;
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
@@ -294,7 +338,7 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
-void BinaryOpStub::InitializeInterfaceDescriptor(
+void BinaryOpICStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx, eax };
@@ -306,6 +350,112 @@ void BinaryOpStub::InitializeInterfaceDescriptor(
}
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ecx, edx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { edi, // JSFunction
+ esi, // context
+ eax, // actual number of arguments
+ ebx, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { esi, // context
+ ecx, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { esi, // context
+ ecx, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { esi, // context
+ edx, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { eax, // callee
+ ebx, // call_data
+ ecx, // holder
+ edx, // api_function_address
+ esi, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -333,118 +483,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize,
- eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Set up the object header.
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- factory->function_context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // Set up the fixed slots.
- __ Set(ebx, Immediate(0)); // Set to NULL.
- __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
- __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
- __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
-
- // Copy the global object from the previous context.
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), ebx);
-
- // Initialize the rest of the slots to undefined.
- __ mov(ebx, factory->undefined_value());
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
- }
-
- // Return and remove the on-stack parameter.
- __ mov(esi, eax);
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + (1 * kPointerSize)]: function
- // [esp + (2 * kPointerSize)]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate(FixedArray::SizeFor(length), eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function or sentinel from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Get the serialized scope info from the stack.
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
-
- // Set up the object header.
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- factory->block_context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
- if (FLAG_debug_code) {
- __ cmp(ecx, 0);
- __ Assert(equal, kExpected0AsASmiSentinel);
- }
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
- __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots.
- __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
- __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
- __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
-
- // Copy the global object from the previous context.
- __ mov(ebx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
- __ mov(ContextOperand(eax, Context::GLOBAL_OBJECT_INDEX), ebx);
-
- // Initialize the rest of the slots to the hole value.
- if (slots_ == 1) {
- __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
- factory->the_hole_value());
- } else {
- __ mov(ebx, factory->the_hole_value());
- for (int i = 0; i < slots_; i++) {
- __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
- }
- }
-
- // Return and remove the on-stack parameters.
- __ mov(esi, eax);
- __ ret(2 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@@ -648,316 +686,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // TAGGED case:
- // Input:
- // esp[4]: tagged number input argument (should be number).
- // esp[0]: return address.
- // Output:
- // eax: tagged double result.
- // UNTAGGED case:
- // Input::
- // esp[0]: return address.
- // xmm1: untagged double input argument
- // Output:
- // xmm1: untagged double result.
-
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label skip_cache;
- const bool tagged = (argument_type_ == TAGGED);
- if (tagged) {
- // Test that eax is a number.
- Label input_not_smi;
- Label loaded;
- __ mov(eax, Operand(esp, kPointerSize));
- __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the low and high words of the double into ebx, edx.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ sar(eax, 1);
- __ sub(esp, Immediate(2 * kPointerSize));
- __ mov(Operand(esp, 0), eax);
- __ fild_s(Operand(esp, 0));
- __ fst_d(Operand(esp, 0));
- __ pop(edx);
- __ pop(ebx);
- __ jmp(&loaded, Label::kNear);
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ebx, Immediate(factory->heap_number_map()));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // low and high words into ebx, edx.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
-
- __ bind(&loaded);
- } else { // UNTAGGED.
- CpuFeatureScope scope(masm, SSE2);
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse4_scope(masm, SSE4_1);
- __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
- } else {
- __ pshufd(xmm0, xmm1, 0x1);
- __ movd(edx, xmm0);
- }
- __ movd(ebx, xmm1);
- }
-
- // ST[0] or xmm1 == double value
- // ebx = low 32 bits of double value
- // edx = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ mov(ecx, ebx);
- __ xor_(ecx, edx);
- __ mov(eax, ecx);
- __ sar(eax, 16);
- __ xor_(ecx, eax);
- __ mov(eax, ecx);
- __ sar(eax, 8);
- __ xor_(ecx, eax);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ and_(ecx,
- Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // ST[0] or xmm1 == double value.
- // ebx = low 32 bits of double value.
- // edx = high 32 bits of double value.
- // ecx = TranscendentalCache::hash(double value).
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(masm->isolate());
- __ mov(eax, Immediate(cache_array));
- int cache_array_index =
- type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
- __ mov(eax, Operand(eax, cache_array_index));
- // Eax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ test(eax, eax);
- __ j(zero, &runtime_call_clear_stack);
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
- // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
- __ lea(ecx, Operand(ecx, ecx, times_2, 0));
- __ lea(ecx, Operand(eax, ecx, times_4, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmp(ebx, Operand(ecx, 0));
- __ j(not_equal, &cache_miss, Label::kNear);
- __ cmp(edx, Operand(ecx, kIntSize));
- __ j(not_equal, &cache_miss, Label::kNear);
- // Cache hit!
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->transcendental_cache_hit(), 1);
- __ mov(eax, Operand(ecx, 2 * kIntSize));
- if (tagged) {
- __ fstp(0);
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- CpuFeatureScope scope(masm, SSE2);
- __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- __ bind(&cache_miss);
- __ IncrementCounter(counters->transcendental_cache_miss(), 1);
- // Update cache with new value.
- // We are short on registers, so use no_reg as scratch.
- // This gives slightly larger code.
- if (tagged) {
- __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
- } else { // UNTAGGED.
- CpuFeatureScope scope(masm, SSE2);
- __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ sub(esp, Immediate(kDoubleSize));
- __ movsd(Operand(esp, 0), xmm1);
- __ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- }
- GenerateOperation(masm, type_);
- __ mov(Operand(ecx, 0), ebx);
- __ mov(Operand(ecx, kIntSize), edx);
- __ mov(Operand(ecx, 2 * kIntSize), eax);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- if (tagged) {
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- CpuFeatureScope scope(masm, SSE2);
- __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
-
- // Skip cache and return answer directly, only in untagged case.
- __ bind(&skip_cache);
- __ sub(esp, Immediate(kDoubleSize));
- __ movsd(Operand(esp, 0), xmm1);
- __ fld_d(Operand(esp, 0));
- GenerateOperation(masm, type_);
- __ fstp_d(Operand(esp, 0));
- __ movsd(xmm1, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- // We return the value in xmm1 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Allocate an unused object bigger than a HeapNumber.
- __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-
- // Call runtime, doing whatever allocation and cleanup is necessary.
- if (tagged) {
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- ExternalReference runtime =
- ExternalReference(RuntimeFunction(), masm->isolate());
- __ TailCallExternalReference(runtime, 1, 1);
- } else { // UNTAGGED.
- CpuFeatureScope scope(masm, SSE2);
- __ bind(&runtime_call_clear_stack);
- __ bind(&runtime_call);
- __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(eax);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
- }
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(
- MacroAssembler* masm, TranscendentalCache::Type type) {
- // Only free register is edi.
- // Input value is on FP stack, and also in ebx/edx.
- // Input value is possibly in xmm1.
- // Address of result (a newly allocated HeapNumber) may be in eax.
- if (type == TranscendentalCache::SIN ||
- type == TranscendentalCache::COS ||
- type == TranscendentalCache::TAN) {
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range, done;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ mov(edi, edx);
- __ and_(edi, Immediate(0x7ff00000)); // Exponent only.
- int supported_exponent_limit =
- (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
- __ cmp(edi, Immediate(supported_exponent_limit));
- __ j(below, &in_range, Label::kNear);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmp(edi, Immediate(0x7ff00000));
- Label non_nan_result;
- __ j(not_equal, &non_nan_result, Label::kNear);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ push(Immediate(0x7ff80000));
- __ push(Immediate(0));
- __ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(2 * kPointerSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ mov(edi, eax); // Save eax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ test(eax, Immediate(5));
- __ j(zero, &no_exceptions, Label::kNear);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ test(eax, Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- __ fstp(0);
- __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
-
- // FPU Stack: input % 2*pi
- __ bind(&in_range);
- switch (type) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- case TranscendentalCache::TAN:
- // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
- // FP register stack.
- __ fptan();
- __ fstp(0); // Pop FP register stack.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
- } else {
- ASSERT(type == TranscendentalCache::LOG);
- __ fldln2();
- __ fxch();
- __ fyl2x();
- }
-}
-
-
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
@@ -2260,88 +1988,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ mov(ebx, Operand(esp, kPointerSize * 3));
- __ JumpIfNotSmi(ebx, &slowcase);
- __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ Allocate(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- ebx, // In: Number of elements as a smi
- REGISTER_VALUE_IS_SMI,
- eax, // Out: Start of allocation (tagged).
- ecx, // Out: End of allocation.
- edx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // eax: Start of allocated area, object-tagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ mov(edx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
- Factory* factory = masm->isolate()->factory();
- __ mov(ecx, Immediate(factory->empty_fixed_array()));
- __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
- __ mov(edx, FieldOperand(edx, GlobalObject::kNativeContextOffset));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
- __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
-
- // Set input, index and length fields from arguments.
- __ mov(ecx, Operand(esp, kPointerSize * 1));
- __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
- __ mov(ecx, Operand(esp, kPointerSize * 2));
- __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
- __ mov(ecx, Operand(esp, kPointerSize * 3));
- __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
-
- // Fill out the elements FixedArray.
- // eax: JSArray.
- // ebx: FixedArray.
- // ecx: Number of elements in array, as smi.
-
- // Set map.
- __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(factory->fixed_array_map()));
- // Set length.
- __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
- // Fill contents of fixed-array with undefined.
- __ SmiUntag(ecx);
- __ mov(edx, Immediate(factory->undefined_value()));
- __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
- // Fill fixed array elements with undefined.
- // eax: JSArray.
- // ecx: Number of elements to fill.
- // ebx: Start of elements in FixedArray.
- // edx: undefined.
- Label loop;
- __ test(ecx, ecx);
- __ bind(&loop);
- __ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero.
- __ sub(ecx, Immediate(1));
- __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
- __ jmp(&loop);
-
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -2676,66 +2322,74 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// eax : number of arguments to the construct function
- // ebx : cache cell for call target
+ // ebx : Feedback vector
+ // edx : slot in feedback vector (Smi)
// edi : the function to call
Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function;
+ Label check_array, initialize_array, initialize_non_array, megamorphic, done;
// Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmp(ecx, edi);
- __ j(equal, &done);
- __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ j(equal, &done);
-
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in ecx.
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
- __ j(not_equal, &miss);
+ __ j(equal, &done, Label::kFar);
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ j(equal, &done, Label::kFar);
- // Load the global or builtins object from the current context
+ // Load the global or builtins object from the current context and check
+ // if we're dealing with the Array function or not.
__ LoadGlobalContext(ecx);
- // Make sure the function is the Array() function
__ cmp(edi, Operand(ecx,
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ j(equal, &check_array);
+
+ // Non-array cache: Reload the cache state and check it.
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::PremonomorphicSentinel(isolate)));
+ __ j(equal, &initialize_non_array);
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
__ j(not_equal, &megamorphic);
- __ jmp(&done);
- __ bind(&miss);
+ // Non-array cache: Uninitialized -> premonomorphic. The sentinel is an
+ // immortal immovable object (null) so no write-barrier is needed.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::PremonomorphicSentinel(isolate)));
+ __ jmp(&done, Label::kFar);
+
+ // Array cache: Reload the cache state and check to see if we're in a
+ // monomorphic state where the state object is an AllocationSite object.
+ __ bind(&check_array);
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map = isolate->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ j(equal, &done, Label::kFar);
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
- __ j(equal, &initialize);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ bind(&megamorphic);
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ jmp(&done, Label::kNear);
+ // Array cache: Uninitialized or premonomorphic -> monomorphic.
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+ __ j(equal, &initialize_array);
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::PremonomorphicSentinel(isolate)));
+ __ j(equal, &initialize_array);
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
- __ bind(&initialize);
- __ LoadGlobalContext(ecx);
- // Make sure the function is the Array() function
- __ cmp(edi, Operand(ecx,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
- __ j(not_equal, &not_array_function);
+ // Both caches: Monomorphic -> megamorphic. The sentinel is an
+ // immortal immovable object (undefined) so no write-barrier is needed.
+ __ bind(&megamorphic);
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ jmp(&done, Label::kFar);
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
+ // Array cache: Uninitialized or premonomorphic -> monomorphic.
+ __ bind(&initialize_array);
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2743,120 +2397,147 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ SmiTag(eax);
__ push(eax);
__ push(edi);
+ __ push(edx);
__ push(ebx);
CreateAllocationSiteStub create_stub;
__ CallStub(&create_stub);
__ pop(ebx);
+ __ pop(edx);
__ pop(edi);
__ pop(eax);
__ SmiUntag(eax);
}
__ jmp(&done);
- __ bind(&not_array_function);
- __ mov(FieldOperand(ebx, Cell::kValueOffset), edi);
- // No need for a write barrier here - cells are rescanned.
+ // Non-array cache: Premonomorphic -> monomorphic.
+ __ bind(&initialize_non_array);
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ edi);
+ __ push(edi);
+ __ push(ebx);
+ __ push(edx);
+ __ RecordWriteArray(ebx, edi, edx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ pop(edx);
+ __ pop(ebx);
+ __ pop(edi);
__ bind(&done);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
- // ebx : cache cell for call target
+ // ebx : feedback vector
+ // edx : (only if ebx is not undefined) slot in feedback vector (Smi)
// edi : the function to call
Isolate* isolate = masm->isolate();
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label receiver_ok;
- // Get the receiver from the stack.
- // +1 ~ return address
- __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
- // Call as function is indicated with the hole.
- __ cmp(eax, isolate->factory()->the_hole_value());
- __ j(not_equal, &receiver_ok, Label::kNear);
- // Patch the receiver on the stack with the global receiver object.
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx);
- __ bind(&receiver_ok);
- }
+ Label slow, non_function, wrap, cont;
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &non_function);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
+ if (NeedsChecks()) {
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(edi, &non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
}
// Fast-case: Just invoke the function.
ParameterCount actual(argc_);
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ cmp(eax, isolate->factory()->the_hole_value());
- __ j(equal, &call_as_function);
- __ InvokeFunction(edi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(edi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
+ if (CallAsMethod()) {
+ if (NeedsChecks()) {
+ // Do not transform the receiver for strict mode functions.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &cont);
+
+ // Do not transform the receiver for natives (shared already in ecx).
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &cont);
+ }
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+ // Load the receiver from the stack.
+ __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
+
+ if (NeedsChecks()) {
+ __ JumpIfSmi(eax, &wrap);
+
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, &wrap);
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
}
- // Check for function proxy.
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function);
- __ pop(ecx);
- __ push(edi); // put proxy as additional argument under return address
- __ push(ecx);
- __ Set(eax, Immediate(argc_ + 1));
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_FUNCTION);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- {
+
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (NeedsChecks()) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ if (RecordCallTarget()) {
+ // If there is a call target cache, mark it megamorphic in the
+ // non-function case. MegamorphicSentinel is an immortal immovable
+ // object (undefined) so no write barrier is needed.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ }
+ // Check for function proxy.
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, &non_function);
+ __ pop(ecx);
+ __ push(edi); // put proxy as additional argument under return address
+ __ push(ecx);
+ __ Set(eax, Immediate(argc_ + 1));
+ __ Set(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
+ __ jmp(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ bind(&non_function);
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
+ __ Set(eax, Immediate(argc_));
+ __ Set(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(adaptor, RelocInfo::CODE_TARGET);
}
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
- __ Set(eax, Immediate(argc_));
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
+ if (CallAsMethod()) {
+ __ bind(&wrap);
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ push(edi);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(edi);
+ }
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
+ __ jmp(&cont);
+ }
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
- // ebx : cache cell for call target
+ // ebx : feedback vector
+ // edx : (only if ebx is not undefined) slot in feedback vector (Smi)
// edi : constructor function
Label slow, non_function_call;
@@ -2895,7 +2576,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Set(ebx, Immediate(0));
Handle<Code> arguments_adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ SetCallKind(ecx, CALL_AS_METHOD);
__ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
}
@@ -2905,25 +2585,20 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated(Isolate* isolate) {
- return (!save_doubles_ || isolate->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
if (Serializer::enabled()) {
PlatformFeatureScope sse2(SSE2);
- BinaryOpStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
} else {
- BinaryOpStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
}
@@ -2937,7 +2612,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
save_doubles_code = *(save_doubles.GetCode(isolate));
}
- save_doubles_code->set_is_pregenerated(true);
isolate->set_fp_stubs_generated(true);
}
}
@@ -2945,8 +2619,7 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
+ stub.GetCode(isolate);
}
@@ -3621,396 +3294,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- // Load the two arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- // Otherwise, at least one of the arguments is definitely a string,
- // and we convert the one that is not known to be a string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
- __ JumpIfSmi(eax, &call_runtime);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &call_runtime);
-
- // First argument is a a string, test second.
- __ JumpIfSmi(edx, &call_runtime);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &call_runtime);
- } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
-
- // Both arguments are strings.
- // eax: first string
- // edx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ecx, ecx);
- __ j(not_zero, &second_not_zero_length, Label::kNear);
- // Second string is empty, result is first string which is already in eax.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ebx, ebx);
- __ j(not_zero, &both_not_zero_length, Label::kNear);
- // First string is empty, result is second string which is in edx.
- __ mov(eax, edx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // eax: first string
- // ebx: length of first string as a smi
- // ecx: length of second string as a smi
- // edx: second string
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
- __ add(ebx, ecx);
- STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
- // Handle exceptionally long strings in the runtime system.
- __ j(overflow, &call_runtime);
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return an internalized string here.
- __ cmp(ebx, Immediate(Smi::FromInt(2)));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
-
- // Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_two_character_string_no_reload;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, ebx, ecx, eax, edx, edi,
- &make_two_character_string_no_reload, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Allocate a two character string.
- __ bind(&make_two_character_string);
- // Reload the arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
- // Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
- __ bind(&make_two_character_string_no_reload);
- __ IncrementCounter(counters->string_add_make_two_char(), 1);
- __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
- // Pack both characters in ebx.
- __ shl(ecx, kBitsPerByte);
- __ or_(ebx, ecx);
- // Set the characters in the new string.
- __ mov_w(FieldOperand(eax, SeqOneByteString::kHeaderSize), ebx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
- __ j(below, &string_add_flat_result);
-
- // If result is not supposed to be flat allocate a cons string object. If both
- // strings are ASCII the result is an ASCII cons string.
- Label non_ascii, allocated, ascii_data;
- __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ and_(ecx, edi);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(ecx, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an ASCII cons string.
- __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ AssertSmi(ebx);
- __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
- __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-
- Label skip_write_barrier, after_writing;
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(masm->isolate());
- __ test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
- __ j(zero, &skip_write_barrier);
-
- __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
- __ RecordWriteField(ecx,
- ConsString::kFirstOffset,
- eax,
- ebx,
- kDontSaveFPRegs);
- __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
- __ RecordWriteField(ecx,
- ConsString::kSecondOffset,
- edx,
- ebx,
- kDontSaveFPRegs);
- __ jmp(&after_writing);
-
- __ bind(&skip_write_barrier);
- __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
- __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
-
- __ bind(&after_writing);
-
- __ mov(eax, ecx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only one byte characters.
- // ecx: first instance type AND second instance type.
- // edi: second instance type.
- __ test(ecx, Immediate(kOneByteDataHintMask));
- __ j(not_zero, &ascii_data);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ xor_(edi, ecx);
- STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
- __ and_(edi, kOneByteStringTag | kOneByteDataHintTag);
- __ cmp(edi, kOneByteStringTag | kOneByteDataHintTag);
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- Label first_prepared, second_prepared;
- Label first_is_sequential, second_is_sequential;
- __ bind(&string_add_flat_result);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- // ecx: instance type of first string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(ecx, kStringRepresentationMask);
- __ j(zero, &first_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(ecx, kShortExternalStringMask);
- __ j(not_zero, &call_runtime);
- __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ jmp(&first_prepared, Label::kNear);
- __ bind(&first_is_sequential);
- __ add(eax, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ bind(&first_prepared);
-
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- // Check whether both strings have same encoding.
- // edi: instance type of second string
- __ xor_(ecx, edi);
- __ test_b(ecx, kStringEncodingMask);
- __ j(not_zero, &call_runtime);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(edi, kStringRepresentationMask);
- __ j(zero, &second_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(edi, kShortExternalStringMask);
- __ j(not_zero, &call_runtime);
- __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ jmp(&second_prepared, Label::kNear);
- __ bind(&second_is_sequential);
- __ add(edx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ bind(&second_prepared);
-
- // Push the addresses of both strings' first characters onto the stack.
- __ push(edx);
- __ push(eax);
-
- Label non_ascii_string_add_flat_result, call_runtime_drop_two;
- // edi: instance type of second string
- // First string and second string have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ test_b(edi, kStringEncodingMask);
- __ j(zero, &non_ascii_string_add_flat_result);
-
- // Both strings are ASCII strings.
- // ebx: length of resulting flat string as a smi
- __ SmiUntag(ebx);
- __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(ecx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // Load first argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- // Load second argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Handle creating a flat two byte result.
- // eax: first string - known to be two byte
- // ebx: length of resulting flat string as a smi
- // edx: second string
- __ bind(&non_ascii_string_add_flat_result);
- // Both strings are two byte strings.
- __ SmiUntag(ebx);
- __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load second argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- // Load second argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Recover stack pointer before jumping to runtime.
- __ bind(&call_runtime_drop_two);
- __ Drop(2);
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(eax);
- __ push(edx);
-}
-
-
-void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm,
- Register temp) {
- __ pop(temp);
- __ pop(edx);
- __ pop(eax);
- __ push(temp);
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
- __ j(below, &done);
-
- // Check the number to string cache.
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
- __ mov(arg, scratch1);
- __ mov(Operand(esp, stack_offset), arg);
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(src, Immediate(1));
- __ add(dest, Immediate(1));
- } else {
- __ mov_w(scratch, Operand(src, 0));
- __ mov_w(Operand(dest, 0), scratch);
- __ add(src, Immediate(2));
- __ add(dest, Immediate(2));
- }
- __ sub(count, Immediate(1));
- __ j(not_zero, &loop);
-}
-
-
void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
Register dest,
Register src,
@@ -4071,128 +3354,6 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
}
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_probed,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ mov(scratch, c1);
- __ sub(scratch, Immediate(static_cast<int>('0')));
- __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index, Label::kNear);
- __ mov(scratch, c2);
- __ sub(scratch, Immediate(static_cast<int>('0')));
- __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_probed);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, kBitsPerByte);
- __ or_(chars, c2);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ mov(mask, FieldOperand(string_table, StringTable::kCapacityOffset));
- __ SmiUntag(mask);
- __ sub(mask, Immediate(1));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // string_table: string table
- // mask: capacity mask
- // scratch: -
-
- // Perform a number of probes in the string table.
- static const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes], next_probe_pop_mask[kProbes];
- Register candidate = scratch; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- __ mov(scratch, hash);
- if (i > 0) {
- __ add(scratch, Immediate(StringTable::GetProbeOffset(i)));
- }
- __ and_(scratch, mask);
-
- // Load the entry from the string table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ mov(candidate,
- FieldOperand(string_table,
- scratch,
- times_pointer_size,
- StringTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- Factory* factory = masm->isolate()->factory();
- __ cmp(candidate, factory->undefined_value());
- __ j(equal, not_found);
- __ cmp(candidate, factory->the_hole_value());
- __ j(equal, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ cmp(FieldOperand(candidate, String::kLengthOffset),
- Immediate(Smi::FromInt(2)));
- __ j(not_equal, &next_probe[i]);
-
- // As we are out of registers save the mask on the stack and use that
- // register as a temporary.
- __ push(mask);
- Register temp = mask;
-
- // Check that the candidate is a non-external ASCII string.
- __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe_pop_mask[i]);
-
- // Check if the two characters match.
- __ mov(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
- __ and_(temp, 0x0000ffff);
- __ cmp(chars, temp);
- __ j(equal, &found_in_string_table);
- __ bind(&next_probe_pop_mask[i]);
- __ pop(mask);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ pop(mask); // Pop saved mask from the stack.
- if (!result.is(eax)) {
- __ mov(eax, result);
- }
-}
-
-
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character,
@@ -4657,6 +3818,227 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
+void ArrayPushStub::Generate(MacroAssembler* masm) {
+ int argc = arguments_count();
+
+ if (argc == 0) {
+ // Noop, return the length.
+ __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
+ __ ret((argc + 1) * kPointerSize);
+ return;
+ }
+
+ Isolate* isolate = masm->isolate();
+
+ if (argc != 1) {
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ Label call_builtin, attempt_to_grow_elements, with_write_barrier;
+
+ // Get the elements array of the object.
+ __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ // Check that the elements are in fast mode and writable.
+ __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+ isolate->factory()->fixed_array_map());
+ __ j(not_equal, &call_builtin);
+ }
+
+ // Get the array's length into eax and calculate new length.
+ __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ add(eax, Immediate(Smi::FromInt(argc)));
+
+ // Get the elements' length into ecx.
+ __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ cmp(eax, ecx);
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ __ j(greater, &attempt_to_grow_elements);
+
+ // Check if value is a smi.
+ __ mov(ecx, Operand(esp, argc * kPointerSize));
+ __ JumpIfNotSmi(ecx, &with_write_barrier);
+
+ // Store the value.
+ __ mov(FieldOperand(edi, eax, times_half_pointer_size,
+ FixedArray::kHeaderSize - argc * kPointerSize),
+ ecx);
+ } else {
+ __ j(greater, &call_builtin);
+
+ __ mov(ecx, Operand(esp, argc * kPointerSize));
+ __ StoreNumberToDoubleElements(
+ ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
+ }
+
+ // Save new length.
+ __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+ __ ret((argc + 1) * kPointerSize);
+
+ if (IsFastDoubleElementsKind(elements_kind())) {
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ __ bind(&with_write_barrier);
+
+ if (IsFastSmiElementsKind(elements_kind())) {
+ if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
+
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ isolate->factory()->heap_number_map());
+ __ j(equal, &call_builtin);
+
+ ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
+ ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ __ mov(ebx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, ContextOperand(ebx, Context::JS_ARRAY_MAPS_INDEX));
+ const int header_size = FixedArrayBase::kHeaderSize;
+ // Verify that the object can be transitioned in place.
+ const int origin_offset = header_size + elements_kind() * kPointerSize;
+ __ mov(edi, FieldOperand(ebx, origin_offset));
+ __ cmp(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ j(not_equal, &call_builtin);
+
+ const int target_offset = header_size + target_kind * kPointerSize;
+ __ mov(ebx, FieldOperand(ebx, target_offset));
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, DONT_TRACK_ALLOCATION_SITE, NULL);
+ // Restore edi used as a scratch register for the write barrier used while
+ // setting the map.
+ __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
+ }
+
+ // Save new length.
+ __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+
+ // Store the value.
+ __ lea(edx, FieldOperand(edi, eax, times_half_pointer_size,
+ FixedArray::kHeaderSize - argc * kPointerSize));
+ __ mov(Operand(edx, 0), ecx);
+
+ __ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&attempt_to_grow_elements);
+ if (!FLAG_inline_new) {
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ __ mov(ebx, Operand(esp, argc * kPointerSize));
+ // Growing elements that are SMI-only requires special handling in case the
+ // new element is non-Smi. For now, delegate to the builtin.
+ if (IsFastSmiElementsKind(elements_kind())) {
+ __ JumpIfNotSmi(ebx, &call_builtin);
+ }
+
+ // We could be lucky and the elements array could be at the top of new-space.
+ // In this case we can just grow it in place by moving the allocation pointer
+ // up.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate);
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate);
+
+ const int kAllocationDelta = 4;
+ ASSERT(kAllocationDelta >= argc);
+ // Load top.
+ __ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
+
+ // Check if it's the end of elements.
+ __ lea(edx, FieldOperand(edi, eax, times_half_pointer_size,
+ FixedArray::kHeaderSize - argc * kPointerSize));
+ __ cmp(edx, ecx);
+ __ j(not_equal, &call_builtin);
+ __ add(ecx, Immediate(kAllocationDelta * kPointerSize));
+ __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
+ __ j(above, &call_builtin);
+
+ // We fit and could grow elements.
+ __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
+
+ // Push the argument...
+ __ mov(Operand(edx, 0), ebx);
+ // ... and fill the rest with holes.
+ for (int i = 1; i < kAllocationDelta; i++) {
+ __ mov(Operand(edx, i * kPointerSize),
+ isolate->factory()->the_hole_value());
+ }
+
+ if (IsFastObjectElementsKind(elements_kind())) {
+ // We know the elements array is in new space so we don't need the
+ // remembered set, but we just pushed a value onto it so we may have to tell
+ // the incremental marker to rescan the object that we just grew. We don't
+ // need to worry about the holes because they are in old space and already
+ // marked black.
+ __ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
+ }
+
+ // Restore receiver to edx as finish sequence assumes it's here.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ // Increment element's and array's sizes.
+ __ add(FieldOperand(edi, FixedArray::kLengthOffset),
+ Immediate(Smi::FromInt(kAllocationDelta)));
+
+ // NOTE: This only happen in new-space, where we don't care about the
+ // black-byte-count on pages. Otherwise we should update that too if the
+ // object is black.
+
+ __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edx : left
+ // -- eax : right
+ // -- esp[0] : return address
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ // Load ecx with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ mov(ecx, handle(isolate->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_equal, kExpectedAllocationSite);
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ isolate->factory()->allocation_site_map());
+ __ Assert(equal, kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(state_);
+ __ TailCallStub(&stub);
+}
+
+
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMI);
Label miss;
@@ -5240,90 +4622,13 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
- { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField and
- // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
- // GenerateStoreField calls the stub with two different permutations of
- // registers. This is the second.
- { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
- // StoreIC::GenerateNormal via GenerateDictionaryStore
- { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
- // KeyedStoreIC::GenerateGeneric.
- { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
- { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
- { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
- { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
- // StoreArrayLiteralElementStub::Generate
- { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub and StringAddStub::Generate
- { REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
- // StringAddStub::Generate
- { REG(ecx), REG(eax), REG(ebx), EMIT_REMEMBERED_SET},
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub(kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub2.GetCode(isolate);
}
}
@@ -5638,10 +4943,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- // It's always safe to call the entry hook stub, as the hook itself
- // is not allowed to call back to V8.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
ProfileEntryHookStub stub;
masm->CallStub(&stub);
}
@@ -5685,7 +4986,6 @@ static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
T stub(GetInitialFastElementsKind(),
- CONTEXT_CHECK_REQUIRED,
mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
@@ -5711,7 +5011,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- // ebx - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // ebx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// edx - kind (if mode != DISABLE_ALLOCATION_SITES)
// eax - number of arguments
// edi - constructor?
@@ -5741,31 +5041,31 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind holey_initial = GetHoleyElementsKind(initial);
ArraySingleArgumentConstructorStub stub_holey(holey_initial,
- CONTEXT_CHECK_REQUIRED,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
ArraySingleArgumentConstructorStub stub(initial,
- CONTEXT_CHECK_REQUIRED,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry.
__ inc(edx);
- __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
+
if (FLAG_debug_code) {
Handle<Map> allocation_site_map =
masm->isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
- __ Assert(equal, kExpectedAllocationSiteInCell);
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ Assert(equal, kExpectedAllocationSite);
}
- // Save the resulting elements kind in type info
- __ SmiTag(edx);
- __ mov(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset), edx);
- __ SmiUntag(edx);
+ // Save the resulting elements kind in type info. We can't just store r3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ add(FieldOperand(ebx, AllocationSite::kTransitionInfoOffset),
+ Immediate(Smi::FromInt(kFastElementsKindPackedToHoley)));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -5790,20 +5090,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- ElementsKind initial_kind = GetInitialFastElementsKind();
- ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
-
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
- (!FLAG_track_allocation_sites &&
- (kind == initial_kind || kind == initial_holey_kind))) {
- T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode(isolate);
}
}
}
@@ -5825,11 +5120,11 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ stubh1.GetCode(isolate);
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ stubh2.GetCode(isolate);
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ stubh3.GetCode(isolate);
}
}
@@ -5865,7 +5160,8 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc (only if argument_count_ == ANY)
- // -- ebx : type info cell
+ // -- ebx : feedback vector (fixed array or undefined)
+ // -- edx : slot index (if ebx is fixed array)
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
@@ -5886,28 +5182,36 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in ebx or a valid cell
+ // We should either have undefined in ebx or a valid fixed array.
Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
+ Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &okay_here);
- __ cmp(FieldOperand(ebx, 0), Immediate(cell_map));
- __ Assert(equal, kExpectedPropertyCellInRegisterEbx);
+ __ cmp(FieldOperand(ebx, 0), Immediate(fixed_array_map));
+ __ Assert(equal, kExpectedFixedArrayInRegisterEbx);
+
+ // edx should be a smi if we don't have undefined in ebx.
+ __ AssertSmi(edx);
+
__ bind(&okay_here);
}
Label no_info;
- // If the type cell is undefined, or contains anything other than an
+ // If the feedback vector is undefined, or contains anything other than an
// AllocationSite, call an array constructor that doesn't use AllocationSites.
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
- __ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
- __ cmp(FieldOperand(edx, 0), Immediate(
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(FieldOperand(ebx, 0), Immediate(
masm->isolate()->factory()->allocation_site_map()));
__ j(not_equal, &no_info);
- __ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset));
+ // Only look at the lower 16 bits of the transition info.
+ __ mov(edx, FieldOperand(ebx, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(edx);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ and_(edx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
@@ -5954,7 +5258,6 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
- // -- ebx : type info cell
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
@@ -6003,6 +5306,165 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : callee
+ // -- ebx : call_data
+ // -- ecx : holder
+ // -- edx : api_function_address
+ // -- esi : context
+ // --
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[argc * 4] : first argument
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ Register callee = eax;
+ Register call_data = ebx;
+ Register holder = ecx;
+ Register api_function_address = edx;
+ Register return_address = edi;
+ Register context = esi;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ Isolate* isolate = masm->isolate();
+
+ __ pop(return_address);
+
+ // context save
+ __ push(context);
+ // load context from callee
+ __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+
+ // callee
+ __ push(callee);
+
+ // call data
+ __ push(call_data);
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ // return value
+ __ push(Immediate(isolate->factory()->undefined_value()));
+ // return value default
+ __ push(Immediate(isolate->factory()->undefined_value()));
+ } else {
+ // return value
+ __ push(scratch);
+ // return value default
+ __ push(scratch);
+ }
+ // isolate
+ __ push(Immediate(reinterpret_cast<int>(isolate)));
+ // holder
+ __ push(holder);
+
+ __ mov(scratch, esp);
+
+ // return address
+ __ push(return_address);
+
+ // API function gets reference to the v8::Arguments. If CPU profiler
+ // is enabled wrapper function will be called and we need to pass
+ // address of the callback as additional parameter, always allocate
+ // space for it.
+ const int kApiArgc = 1 + 1;
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
+
+ // FunctionCallbackInfo::implicit_args_.
+ __ mov(ApiParameterOperand(2), scratch);
+ __ add(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ mov(ApiParameterOperand(3), scratch);
+ // FunctionCallbackInfo::length_.
+ __ Set(ApiParameterOperand(4), Immediate(argc));
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Set(ApiParameterOperand(5), Immediate(0));
+
+ // v8::InvocationCallback's argument.
+ __ lea(scratch, ApiParameterOperand(2));
+ __ mov(ApiParameterOperand(0), scratch);
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+
+ Operand context_restore_operand(ebp,
+ (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ Operand return_value_operand(ebp, return_value_offset * kPointerSize);
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_address,
+ ApiParameterOperand(1),
+ argc + FCA::kArgsLength + 1,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- edx : api_function_address
+ // -----------------------------------
+
+ // array for v8::Arguments::values_, handler for name and pointer
+ // to the values (it considered as smi in GC).
+ const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
+ // Allocate space for opional callback address parameter in case
+ // CPU profiler is active.
+ const int kApiArgc = 2 + 1;
+
+ Register api_function_address = edx;
+ Register scratch = ebx;
+
+ // load address of name
+ __ lea(scratch, Operand(esp, 1 * kPointerSize));
+
+ __ PrepareCallApiFunction(kApiArgc);
+ __ mov(ApiParameterOperand(0), scratch); // name.
+ __ add(scratch, Immediate(kPointerSize));
+ __ mov(ApiParameterOperand(1), scratch); // arguments pointer.
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_address,
+ ApiParameterOperand(2),
+ kStackSpace,
+ Operand(ebp, 7 * kPointerSize),
+ NULL);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 006651c9c8..e383a9d7e9 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -40,30 +40,6 @@ void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
Label* call_generic_code);
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) {}
- void Generate(MacroAssembler* masm);
- static void GenerateOperation(MacroAssembler* masm,
- TranscendentalCache::Type type);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
@@ -74,7 +50,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -88,17 +63,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
// Generate code for copying characters using the rep movs instruction.
// Copies ecx characters from esi to edi. Copying of overlapping regions is
// not supported.
@@ -109,23 +73,6 @@ class StringHelper : public AllStatic {
Register scratch, // Neither of above.
bool ascii);
- // Probe the string table for a two character string. If the string
- // requires non-standard hashing a jump to the label not_probed is
- // performed and registers c1 and c2 are preserved. In all other
- // cases they are clobbered. If the string is not found by probing a
- // jump to the label not_found is performed. This jump does not
- // guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in
- // register eax.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_probed,
- Label* not_found);
-
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
@@ -144,31 +91,6 @@ class StringHelper : public AllStatic {
};
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow);
-
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateRegisterArgsPop(MacroAssembler* masm, Register temp);
-
- const StringAddFlags flags_;
-};
-
-
class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -303,8 +225,6 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index d09a85f8b1..350a8fb224 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -57,57 +57,12 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) {
- // Fallback to library function if function cannot be created.
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- }
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // esp[1 * kPointerSize]: raw double input
- // esp[0 * kPointerSize]: return address
- // Move double input into registers.
-
- __ push(ebx);
- __ push(edx);
- __ push(edi);
- __ fld_d(Operand(esp, 4 * kPointerSize));
- __ mov(ebx, Operand(esp, 4 * kPointerSize));
- __ mov(edx, Operand(esp, 5 * kPointerSize));
- TranscendentalCacheStub::GenerateOperation(&masm, type);
- // The return value is expected to be on ST(0) of the FPU stack.
- __ pop(edi);
- __ pop(edx);
- __ pop(ebx);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(SSE2)) return &exp;
- if (!FLAG_fast_math) return &exp;
+ if (!CpuFeatures::IsSupported(SSE2)) return &std::exp;
+ if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
+ if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -148,7 +103,7 @@ UnaryMathFunction CreateSqrtFunction() {
true));
// If SSE2 is not available, we can use libc's implementation to ensure
// consistency since code by fullcodegen's calls into runtime in that case.
- if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
+ if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &std::sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
@@ -1110,7 +1065,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
__ movsd(double_scratch, Operand::StaticArray(
temp2, times_8, ExternalReference::math_exp_log_table()));
- __ por(input, double_scratch);
+ __ orps(input, double_scratch);
__ mulsd(result, input);
__ bind(&done);
}
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 6a207ca9b5..2ef8043078 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -34,43 +34,6 @@
namespace v8 {
namespace internal {
-// Forward declarations
-class CompilationInfo;
-
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator {
- public:
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info, const char* kind);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
-
- static Operand FixedArrayElementOperand(Register array,
- Register index_as_smi,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
class StringCharLoadGenerator : public AllStatic {
public:
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 76a7003bfe..4c76f7dfe1 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -280,10 +280,12 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-ia32.cc).
// ----------- S t a t e -------------
- // -- ebx: cache cell for call target
+ // -- ebx: feedback array
+ // -- edx: slot in feedback array
// -- edi: function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), 0, false);
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
+ 0, false);
}
@@ -306,11 +308,13 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// above IC call.
// ----------- S t a t e -------------
// -- eax: number of arguments (not smi)
- // -- ebx: cache cell for call target
+ // -- ebx: feedback array
+ // -- edx: feedback slot (smi)
// -- edi: constructor function
// -----------------------------------
// The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), eax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
+ eax.bit(), false);
}
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 789dbfbdce..5300dde9a2 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -202,7 +202,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
intptr_t handler =
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
- int params = descriptor->environment_length();
+ int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(eax.code(), params);
output_frame->SetRegister(ebx.code(), handler);
}
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 13cf6bc49a..6a7f3bc837 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -1007,6 +1007,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
{ byte f0byte = data[1];
const char* f0mnem = F0Mnem(f0byte);
if (f0byte == 0x18) {
+ data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
const char* suffix[] = {"nta", "1", "2", "3"};
@@ -1042,22 +1043,30 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
- } else if (f0byte == 0x54) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("andps %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (f0byte == 0x57) {
+ } else if (f0byte >= 0x53 && f0byte <= 0x5F) {
+ const char* const pseudo_op[] = {
+ "rcpps",
+ "andps",
+ "andnps",
+ "orps",
+ "xorps",
+ "addps",
+ "mulps",
+ "cvtps2pd",
+ "cvtdq2ps",
+ "subps",
+ "minps",
+ "divps",
+ "maxps",
+ };
+
data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xorps %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
+ AppendToBuffer("%s %s,",
+ pseudo_op[f0byte - 0x53],
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (f0byte == 0x50) {
data += 2;
int mod, regop, rm;
@@ -1066,6 +1075,17 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte== 0xC6) {
+ // shufps xmm, xmm/m128, imm8
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("shufps %s,%s,%d",
+ NameOfXMMRegister(rm),
+ NameOfXMMRegister(regop),
+ static_cast<int>(imm8));
+ data += 2;
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
@@ -1197,6 +1217,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("mov_w ");
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else if (*data == 0xC7) {
+ data++;
+ AppendToBuffer("%s ", "mov_w");
+ data += PrintRightOperand(data);
+ int imm = *reinterpret_cast<int16_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 2;
} else if (*data == 0x0F) {
data++;
if (*data == 0x38) {
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index 5570811768..9859ebb0e5 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -40,10 +40,24 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return ebp; }
Register JavaScriptFrame::context_register() { return esi; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
Register StubFailureTrampolineFrame::fp_register() { return ebp; }
Register StubFailureTrampolineFrame::context_register() { return esi; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index 8606125101..e0f3e32f7c 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -73,6 +73,8 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
+ static const int kFrameSize = 2 * kPointerSize;
+
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 704fb4e7d2..fd4079cb4f 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -107,7 +107,6 @@ class JumpPatchSite BASE_EMBEDDED {
// formal parameter count expected by the function.
//
// The live registers are:
-// o ecx: CallKind
// o edi: the JS function object being called (i.e. ourselves)
// o esi: our context
// o ebp: our caller's frame pointer
@@ -119,6 +118,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -133,22 +135,23 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). ecx is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
+ // Classic mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->is_classic_mode() && !info->is_native()) {
Label ok;
- __ test(ecx, ecx);
- __ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(ecx, Operand(esp, receiver_offset));
- __ JumpIfSmi(ecx, &ok);
- __ CmpObjectType(ecx, JS_GLOBAL_PROXY_TYPE, ecx);
+
+ __ cmp(ecx, isolate()->factory()->undefined_value());
__ j(not_equal, &ok, Label::kNear);
- __ mov(Operand(esp, receiver_offset),
- Immediate(isolate()->factory()->undefined_value()));
+
+ __ mov(ecx, GlobalObjectOperand());
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+
+ __ mov(Operand(esp, receiver_offset), ecx);
+
__ bind(&ok);
}
@@ -182,20 +185,22 @@ void FullCodeGenerator::Generate() {
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in edi.
- __ push(edi);
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ push(edi);
__ Push(info->scope()->GetScopeInfo());
__ CallRuntime(Runtime::kNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
+ __ push(edi);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ // Context is returned in eax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in esi.
+ __ mov(esi, eax);
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
// Copy parameters into context if necessary.
int num_parameters = info->scope()->num_parameters();
@@ -319,10 +324,6 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
__ mov(ebx, Immediate(profiling_counter_));
__ mov(FieldOperand(ebx, Cell::kValueOffset),
Immediate(Smi::FromInt(reset_value)));
@@ -334,13 +335,10 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
__ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
@@ -372,31 +370,24 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(eax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- __ push(eax);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- __ call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- }
- __ pop(eax);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ j(positive, &ok, Label::kNear);
+ __ push(eax);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ pop(eax);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
@@ -638,7 +629,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
+ CallIC(ic, condition->test_id());
__ test(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -989,8 +980,18 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ jmp(&skip, Label::kNear);
+ PrepareForBailout(clause, TOS_REG);
+ __ cmp(eax, isolate()->factory()->true_value());
+ __ j(not_equal, &next_test);
+ __ Drop(1);
+ __ jmp(clause->body_target());
+ __ bind(&skip);
+
__ test(eax, eax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
@@ -1023,6 +1024,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
+
SetStatementPosition(stmt);
Label loop, exit;
@@ -1101,20 +1104,22 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(ebx, cell);
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+
+ // No need for a write barrier, we are storing a Smi in the feedback vector.
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)),
+ Immediate(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
__ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
__ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
__ j(above, &non_proxy);
- __ mov(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
+ __ Set(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
__ bind(&non_proxy);
__ push(ebx); // Smi
__ push(eax); // Array
@@ -1335,11 +1340,11 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// load IC call.
__ mov(edx, GlobalObjectOperand());
__ mov(ecx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- CallIC(ic, mode);
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+
+ CallLoadIC(mode);
}
@@ -1414,13 +1419,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object in eax.
__ mov(edx, GlobalObjectOperand());
__ mov(ecx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallLoadIC(CONTEXTUAL);
context()->Plug(eax);
break;
}
@@ -1428,9 +1432,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1492,12 +1495,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ push(esi); // Context.
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kLoadContextSlot, 2);
@@ -1574,6 +1577,8 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
@@ -1634,10 +1639,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(ecx, Immediate(key->value()));
__ mov(edx, Operand(esp, 0));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1702,6 +1704,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
@@ -1713,6 +1720,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
@@ -1725,35 +1739,27 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
- } else if (expr->depth() > 1) {
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_elements));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ __ push(Immediate(Smi::FromInt(flags)));
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
if (has_constant_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -2045,29 +2051,33 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result = receiver[f](arg);
__ bind(&l_call);
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
- CallIC(ic);
+ __ mov(edx, Operand(esp, kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ mov(edi, eax);
+ __ mov(Operand(esp, 2 * kPointerSize), edi);
+ CallFunctionStub stub(1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The key is still on the stack; drop it.
+ __ Drop(1); // The function is still on the stack; drop it.
// if (!result.done) goto l_try;
__ bind(&l_loop);
__ push(eax); // save result
__ mov(edx, eax); // result
__ mov(ecx, isolate()->factory()->done_string()); // "done"
- Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(done_ic); // result.done in eax
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in eax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ test(eax, eax);
__ j(zero, &l_try);
// result.value
- __ pop(edx); // result
+ __ pop(edx); // result
__ mov(ecx, isolate()->factory()->value_string()); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in eax
- context()->DropAndPlug(2, eax); // drop iter and g
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in eax
+ context()->DropAndPlug(2, eax); // drop iter and g
break;
}
}
@@ -2078,19 +2088,21 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in eax, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. ebx
- // will hold the generator object until the activation has been resumed.
+ // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // ebx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
__ pop(ebx);
// Check generator state.
- Label wrong_state, done;
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ Label wrong_state, closed_state, done;
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
__ cmp(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(0)));
- __ j(less_equal, &wrong_state);
+ __ j(equal, &closed_state);
+ __ j(less, &wrong_state);
// Load suspended function and context.
__ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
@@ -2160,6 +2172,20 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(eax);
+ __ CallRuntime(Runtime::kThrow, 1);
+ }
+ __ jmp(&done);
+
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(ebx);
@@ -2210,15 +2236,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
ASSERT(!key->value()->IsSmi());
__ mov(ecx, Immediate(key->value()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2238,9 +2263,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
- BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ BinaryOpICStub stub(op, mode);
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2323,10 +2347,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(edx);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(eax);
}
@@ -2364,10 +2387,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(edx, eax);
__ pop(eax); // Restore value.
__ mov(ecx, prop->key()->AsLiteral()->value());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2388,48 +2408,58 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ mov(location, eax);
+ if (var->IsContextSlot()) {
+ __ mov(edx, eax);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, LanguageMode mode) {
+ __ push(eax); // Value.
+ __ push(esi); // Context.
+ __ push(Immediate(name));
+ __ push(Immediate(Smi::FromInt(mode)));
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(ecx, var->name());
__ mov(edx, GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallStoreIC();
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ mov(edx, StackOperand(var));
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip);
- __ mov(StackOperand(var), eax);
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ if (var->IsLookupSlot()) {
__ push(eax);
__ push(esi);
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &skip, Label::kNear);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), language_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2440,18 +2470,16 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&assign);
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
} else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), language_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, ecx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2459,20 +2487,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ Check(equal, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2492,11 +2507,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
SetSourcePosition(expr->position());
__ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2515,7 +2526,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
@@ -2544,72 +2555,108 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
ic_total_count_++;
- __ call(code, rmode, ast_id);
+ __ call(code, RelocInfo::CODE_TARGET, ast_id);
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithIC(Call* expr) {
+ Expression* callee = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
+
+ CallFunctionFlags flags;
+ // Get the target function.
+ if (callee->IsVariableProxy()) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a classic mode method.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ flags = NO_CALL_FUNCTION_FLAGS;
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ mov(edx, Operand(esp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, kPointerSize), eax);
+ flags = CALL_AS_METHOD;
+ }
+
+ // Load the arguments.
{ PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
- __ Set(ecx, Immediate(name));
}
+
// Record source position of the IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
+ CallFunctionStub stub(arg_count, flags);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
+
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
+
+ context()->DropAndPlug(1, eax);
}
+// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(ecx);
- __ push(eax);
- __ push(ecx);
-
- // Load the arguments.
+ Expression* callee = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
+
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ mov(edx, Operand(esp, 0));
+ // Move the key into the right register for the keyed load IC.
+ __ mov(ecx, eax);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, kPointerSize), eax);
+
+ // Load the arguments.
{ PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
}
+
// Record source position of the IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
+ CallFunctionStub stub(arg_count, CALL_AS_METHOD);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
+
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax); // Drop the key still on the stack.
+
+ context()->DropAndPlug(1, eax);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2621,17 +2668,16 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(ebx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(edx, Immediate(Smi::FromInt(expr->CallFeedbackSlot())));
- CallFunctionStub stub(arg_count, flags);
+ // Record call targets in unoptimized code.
+ CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2670,10 +2716,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
+ Call::CallType call_type = expr->GetCallType(isolate());
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
@@ -2700,7 +2745,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
@@ -2708,13 +2753,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, eax);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ push(GlobalObjectOperand());
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithIC(expr);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
{ PreservePositionScope scope(masm()->positions_recorder());
// Generate code for loading from variables potentially shadowed by
@@ -2740,37 +2784,34 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ push(eax);
// The receiver is implicitly the global receiver. Indicate this by
// passing the hole to the call function stub.
- __ push(Immediate(isolate()->factory()->the_hole_value()));
+ __ push(Immediate(isolate()->factory()->undefined_value()));
__ bind(&call);
}
// The receiver is either the global receiver or an object found by
- // LoadContextSlot. That object could be the hole if the receiver is
- // implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
+ // LoadContextSlot.
+ EmitCallWithStub(expr);
- } else if (property != NULL) {
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
}
if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->value(),
- RelocInfo::CODE_TARGET);
+ EmitCallWithIC(expr);
} else {
EmitKeyedCallWithIC(expr, property->key());
}
} else {
+ ASSERT(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
- // Load global receiver object.
- __ mov(ebx, GlobalObjectOperand());
- __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+ __ push(Immediate(isolate()->factory()->undefined_value()));
// Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ EmitCallWithStub(expr);
}
#ifdef DEBUG
@@ -2808,10 +2849,10 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(ebx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(edx, Immediate(Smi::FromInt(expr->CallNewFeedbackSlot())));
CallConstructStub stub(RECORD_CALL_TARGET);
__ call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -3053,6 +3094,32 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(eax, map, if_false, DO_SMI_CHECK);
+ __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x80000000));
+ __ j(not_equal, if_false);
+ __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x00000000));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -3273,57 +3340,6 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(edi, eax);
-
- __ bind(&heapnumber_allocated);
-
- __ PrepareCallCFunction(1, ebx);
- __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- // This is implemented on both SSE2 and FPU.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(masm(), SSE2);
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, ebx);
- __ movd(xmm0, eax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorps(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movsd(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
- } else {
- // 0x4130000000000000 is 1.0 x 2^20 as a double.
- __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
- Immediate(0x41300000));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ fsubp(1);
- __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
- }
- __ mov(eax, edi);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
@@ -3415,32 +3431,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask) {
- __ test(index, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiIndex);
- __ test(value, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiValue);
-
- __ cmp(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, kIndexIsTooLarge);
-
- __ cmp(index, Immediate(Smi::FromInt(0)));
- __ Check(greater_equal, kIndexIsNegative);
-
- __ push(value);
- __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- __ cmp(value, Immediate(encoding_mask));
- __ Check(equal, kUnexpectedStringType);
- __ pop(value);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
@@ -3451,18 +3441,26 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(value);
- __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
+ __ pop(value);
+ __ pop(index);
if (FLAG_debug_code) {
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ test(value, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
+ __ test(index, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
}
__ SmiUntag(value);
__ SmiUntag(index);
+
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
__ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
value);
context()->Plug(string);
@@ -3479,13 +3477,19 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
__ pop(value);
__ pop(index);
- VisitForAccumulatorValue(args->at(0)); // string
if (FLAG_debug_code) {
+ __ test(value, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
+ __ test(index, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
+ __ SmiUntag(index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ SmiTag(index);
}
__ SmiUntag(value);
@@ -3671,11 +3675,11 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(1));
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
+ __ pop(edx);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
__ CallStub(&stub);
context()->Plug(eax);
}
@@ -3694,50 +3698,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
+ // Load the argument on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallStub(&stub);
+ __ CallRuntime(Runtime::kMath_log, 1);
context()->Plug(eax);
}
@@ -3771,8 +3737,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
// InvokeFunction requires the function in edi. Move it in there.
__ mov(edi, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(edi, count, CALL_FUNCTION, NullCallWrapper());
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
@@ -3792,7 +3757,9 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
+ VisitForAccumulatorValue(args->at(2));
+ __ pop(ebx);
+ __ pop(ecx);
__ CallStub(&stub);
context()->Plug(eax);
}
@@ -3827,13 +3794,13 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
Label done, not_found;
- // tmp now holds finger offset as a smi.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
- __ cmp(key, CodeGenerator::FixedArrayElementOperand(cache, tmp));
+ // tmp now holds finger offset as a smi.
+ __ cmp(key, FixedArrayElementOperand(cache, tmp));
__ j(not_equal, &not_found);
- __ mov(eax, CodeGenerator::FixedArrayElementOperand(cache, tmp, 1));
+ __ mov(eax, FixedArrayElementOperand(cache, tmp, 1));
__ jmp(&done);
__ bind(&not_found);
@@ -3847,44 +3814,6 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = eax;
- Register left = ebx;
- Register tmp = ecx;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmp(left, right);
- __ j(equal, &ok);
- // Fail if either is a non-HeapObject.
- __ mov(tmp, left);
- __ and_(tmp, right);
- __ JumpIfSmi(tmp, &fail);
- __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
- __ j(not_equal, &fail);
- __ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail);
- __ mov(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok);
- __ bind(&fail);
- __ mov(eax, Immediate(isolate()->factory()->false_value()));
- __ jmp(&done);
- __ bind(&ok);
- __ mov(eax, Immediate(isolate()->factory()->true_value()));
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -4199,31 +4128,47 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
+ // Push the builtins object as receiver.
__ mov(eax, GlobalObjectOperand());
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
- }
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ // Load the function from the receiver.
+ __ mov(edx, Operand(esp, 0));
+ __ mov(ecx, Immediate(expr->name()));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- if (expr->is_jsruntime()) {
- // Call the JS runtime function via a call IC.
- __ Set(ecx, Immediate(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
+ // Push the target function under the receiver.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, kPointerSize), eax);
+
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, eax);
+
} else {
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
+
+ context()->Plug(eax);
}
- context()->Plug(eax);
}
@@ -4399,14 +4344,50 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
+ // Inline smi case if we are in a loop.
+ Label done, stub_call;
+ JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(expr->op())) {
- __ JumpIfSmi(eax, &no_conversion, Label::kNear);
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(eax, &slow, Label::kNear);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(eax);
+ break;
+ case NAMED_PROPERTY:
+ __ mov(Operand(esp, kPointerSize), eax);
+ break;
+ case KEYED_PROPERTY:
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ break;
+ }
+ }
+ }
+
+ if (expr->op() == Token::INC) {
+ __ add(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ }
+ __ j(no_overflow, &done, Label::kNear);
+ // Call stub. Undo operation first.
+ if (expr->op() == Token::INC) {
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(eax, Immediate(Smi::FromInt(1)));
+ }
+ __ jmp(&stub_call, Label::kNear);
+ __ bind(&slow);
}
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4428,40 +4409,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // Inline smi case if we are in a loop.
- Label done, stub_call;
- JumpPatchSite patch_site(masm_);
-
- if (ShouldInlineSmiCase(expr->op())) {
- if (expr->op() == Token::INC) {
- __ add(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- }
- __ j(overflow, &stub_call, Label::kNear);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(eax, &done, Label::kNear);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ add(eax, Immediate(Smi::FromInt(1)));
- }
- }
-
// Record position before stub call.
SetSourcePosition(expr->position());
// Call stub for +1/-1.
+ __ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
+ BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4492,10 +4448,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4512,7 +4465,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4534,16 +4487,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ mov(edx, GlobalObjectOperand());
__ mov(ecx, Immediate(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- CallIC(ic);
+ CallLoadIC(NOT_CONTEXTUAL);
PrepareForBailout(expr, TOS_REG);
context()->Plug(eax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4702,7 +4655,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4738,7 +4691,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
__ test(eax, eax);
Split(not_zero, if_true, if_false, fall_through);
}
@@ -4891,9 +4844,11 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x11;
-static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
+#ifdef DEBUG
+static const byte kCallInstruction = 0xe8;
+#endif
void BackEdgeTable::PatchAt(Code* unoptimized_code,
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index f8e4ea53d0..69f6e3a6bb 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -611,7 +611,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -653,7 +653,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ TailCallExternalReference(ref, 2, 1);
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -678,7 +678,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(eax, unmapped_location);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -707,7 +707,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -733,6 +733,19 @@ static void KeyedStoreGenerateGenericHelper(
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
__ j(not_equal, fast_double);
}
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ cmp(FixedArrayElementOperand(ebx, ecx),
+ masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, &holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+
+ __ bind(&holecheck_passed1);
+
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(eax, &non_smi_value);
@@ -742,7 +755,7 @@ static void KeyedStoreGenerateGenericHelper(
Immediate(Smi::FromInt(1)));
}
// It's irrelevant whether array is smi-only or not when writing a smi.
- __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
+ __ mov(FixedArrayElementOperand(ebx, ecx), eax);
__ ret(0);
__ bind(&non_smi_value);
@@ -757,7 +770,7 @@ static void KeyedStoreGenerateGenericHelper(
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
}
- __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
+ __ mov(FixedArrayElementOperand(ebx, ecx), eax);
// Update write barrier for the elements array address.
__ mov(edx, eax); // Preserve the value which is returned.
__ RecordWriteArray(
@@ -773,6 +786,16 @@ static void KeyedStoreGenerateGenericHelper(
// If the value is a number, store it as a double in the FastDoubleElements
// array.
}
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(ebx, ecx, times_4, offset), Immediate(kHoleNanUpper32));
+ __ j(not_equal, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(eax, ebx, ecx, edi, xmm0,
&transition_double_elements, false);
@@ -851,10 +874,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ JumpIfSmi(edx, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
+ 1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(ecx, &slow);
@@ -924,377 +947,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- Isolate* isolate = masm->isolate();
- isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, eax);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(edx, &number);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
- __ j(not_equal, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, edx);
- __ jmp(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ CmpInstanceType(ebx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, edx);
- __ jmp(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ cmp(edx, isolate->factory()->true_value());
- __ j(equal, &boolean);
- __ cmp(edx, isolate->factory()->false_value());
- __ j(not_equal, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edi : function
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Check that the result is not a smi.
- __ JumpIfSmi(edi, miss);
-
- // Check that the value is a JavaScript function, fetching its map into eax.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ j(not_equal, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-// The generated code falls through if the call should be handled by runtime.
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
-
- // eax: elements
- // Search the dictionary placing the result in edi.
- GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, edi);
- GenerateFunctionTailCall(masm, argc, &miss);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(counters->call_miss(), 1);
- } else {
- __ IncrementCounter(counters->keyed_call_miss(), 1);
- }
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ push(edx);
- __ push(ecx);
-
- // Call the entry.
- CEntryStub stub(1);
- __ mov(eax, Immediate(2));
- __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
- __ CallStub(&stub);
-
- // Move result to edi and exit the internal frame.
- __ mov(edi, eax);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
- __ JumpIfSmi(edx, &invoke, Label::kNear);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, &global, Label::kNear);
- __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke, Label::kNear);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(edi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC,
- extra_state);
-
- GenerateMiss(masm, argc, extra_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_name, lookup_monomorphic_cache;
- Label index_smi, index_name;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(ecx, &check_name);
-
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
-
- __ bind(&do_call);
- // receiver in edx is not used after this point.
- // ecx: key
- // edi: function
- GenerateFunctionTailCall(masm, argc, &slow_call);
-
- __ bind(&check_number_dictionary);
- // eax: elements
- // ecx: smi key
- // Check whether the elements is a number dictionary.
- __ CheckMap(eax,
- isolate->factory()->hash_table_map(),
- &slow_load,
- DONT_DO_SMI_CHECK);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- // ebx: untagged index
- // Receiver in edx will be clobbered, need to reload it on miss.
- __ LoadFromNumberDictionary(
- &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&slow_reload_receiver);
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(ecx); // save the key
- __ push(edx); // pass the receiver
- __ push(ecx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(ecx); // restore the key
- // Leave the internal frame.
- }
-
- __ mov(edi, eax);
- __ jmp(&do_call);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, ecx, eax, ebx, &index_name, &slow_call);
-
- // The key is known to be a unique name.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ CheckMap(ebx,
- isolate->factory()->hash_table_map(),
- &lookup_monomorphic_cache,
- DONT_DO_SMI_CHECK);
-
- GenerateDictionaryLoad(masm, &slow_load, ebx, ecx, eax, edi, edi);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
- CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor a unique name,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
- GenerateMiss(masm, argc);
-
- __ bind(&index_name);
- __ IndexFromHash(ebx, ecx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label slow, notin;
- Factory* factory = masm->isolate()->factory();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- Operand mapped_location =
- GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
- __ mov(edi, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
- __ cmp(unmapped_location, factory->the_hole_value());
- __ j(equal, &slow);
- __ mov(edi, unmapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Check if the name is really a name.
- Label miss;
- __ JumpIfSmi(ecx, &miss);
- Condition cond = masm->IsObjectNameType(ecx, eax, eax);
- __ j(NegateCondition(cond), &miss);
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : name
@@ -1303,9 +955,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, eax);
@@ -1373,7 +1023,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
@@ -1388,10 +1038,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ push(ebx); // return address
// Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
}
@@ -1413,18 +1061,14 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
-
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, strict_mode,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, no_reg);
@@ -1528,7 +1172,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -1543,10 +1187,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ push(ebx);
// Do tail-call to runtime routine.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index e08a6e1d66..71946afe06 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -103,9 +103,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
+ RegisterDependentCodeForEmbeddedMaps(code);
PopulateDeoptimizationData(code);
if (!info()->IsStub()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -177,24 +175,31 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). ecx is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
+ // Classic mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->is_classic_mode() &&
+ !info_->is_native()) {
Label ok;
- __ test(ecx, Operand(ecx));
- __ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ mov(Operand(esp, receiver_offset),
- Immediate(isolate()->factory()->undefined_value()));
+ __ mov(ecx, Operand(esp, receiver_offset));
+
+ __ cmp(ecx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &ok, Label::kNear);
+
+ __ mov(ecx, GlobalObjectOperand());
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+
+ __ mov(Operand(esp, receiver_offset), ecx);
+
__ bind(&ok);
}
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
- __ mov(edx, Immediate(kNoAlignmentPadding));
+ __ Set(edx, Immediate(kNoAlignmentPadding));
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
@@ -287,17 +292,18 @@ bool LCodeGen::GeneratePrologue() {
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in edi.
- __ push(edi);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
+ __ push(edi);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ // Context is returned in eax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in esi.
+ __ mov(esi, eax);
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
// Copy parameters into context if necessary.
int num_parameters = scope()->num_parameters();
@@ -340,7 +346,7 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
// Move state of dynamic frame alignment into edx.
- __ mov(edx, Immediate(kNoAlignmentPadding));
+ __ Set(edx, Immediate(kNoAlignmentPadding));
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
Label do_not_pad, align_loop;
@@ -470,7 +476,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -809,17 +816,36 @@ bool LCodeGen::IsSmi(LConstantOperand* op) const {
}
+static int ArgumentsOffsetWithoutFrame(int index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize + kPCOnStackSize;
+}
+
+
Operand LCodeGen::ToOperand(LOperand* op) const {
if (op->IsRegister()) return Operand(ToRegister(op));
if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return Operand(ebp, StackSlotOffset(op->index()));
+ if (NeedsEagerFrame()) {
+ return Operand(ebp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
}
Operand LCodeGen::HighOperand(LOperand* op) {
ASSERT(op->IsDoubleStackSlot());
- return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+ if (NeedsEagerFrame()) {
+ return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return Operand(
+ esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ }
}
@@ -1071,7 +1097,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return;
}
- if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ if (DeoptEveryNTimes()) {
ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt;
__ pushfd();
@@ -1143,36 +1169,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<JSObject> > objects(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else if (it.rinfo()->target_object()->IsJSObject()) {
- Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded objects after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
- }
-}
-
-
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
@@ -1183,6 +1179,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1342,11 +1339,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::RegExpExec: {
RegExpExecStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1362,12 +1354,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -1383,7 +1369,7 @@ void LCodeGen::DoModI(LModI* instr) {
HMod* hmod = instr->hydrogen();
HValue* left = hmod->left();
HValue* right = hmod->right();
- if (hmod->HasPowerOf2Divisor()) {
+ if (hmod->RightIsPowerOf2()) {
// TODO(svenpanne) We should really do the strength reduction on the
// Hydrogen level.
Register left_reg = ToRegister(instr->left());
@@ -1408,36 +1394,6 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&left_is_not_negative);
__ and_(left_reg, divisor - 1);
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
- Register right_reg = ToRegister(instr->right());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- __ cmp(right_reg, Immediate(divisor));
- DeoptimizeIf(not_equal, instr->environment());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ neg(left_reg);
- __ and_(left_reg, divisor - 1);
- __ neg(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&left_is_not_negative);
- __ and_(left_reg, divisor - 1);
- __ bind(&done);
-
} else {
Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(eax));
@@ -1495,56 +1451,41 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
+ if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
Register dividend = ToRegister(instr->left());
- int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
- int32_t test_value = 0;
- int32_t power = 0;
+ HDiv* hdiv = instr->hydrogen();
+ int32_t divisor = hdiv->right()->GetInteger32Constant();
+ Register result = ToRegister(instr->result());
+ ASSERT(!result.is(dividend));
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, Operand(dividend));
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->left()->RangeCanInclude(0) && divisor < 0 &&
+ hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ test(dividend, Operand(dividend));
+ DeoptimizeIf(zero, instr->environment());
}
-
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- Label done, negative;
- __ cmp(dividend, 0);
- __ j(less, &negative, Label::kNear);
- __ sar(dividend, power);
- if (divisor < 0) __ neg(dividend);
- __ jmp(&done, Label::kNear);
-
- __ bind(&negative);
- __ neg(dividend);
- __ sar(dividend, power);
- if (divisor > 0) __ neg(dividend);
- __ bind(&done);
- return; // Don't fall through to "__ neg" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ test(dividend, Immediate(test_value));
+ // Check for (kMinInt / -1).
+ if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 &&
+ hdiv->CheckFlag(HValue::kCanOverflow)) {
+ __ cmp(dividend, kMinInt);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ Abs(divisor) != 1) {
+ __ test(dividend, Immediate(Abs(divisor) - 1));
DeoptimizeIf(not_zero, instr->environment());
- __ sar(dividend, power);
- }
}
-
- if (divisor < 0) __ neg(dividend);
-
+ __ Move(result, dividend);
+ int32_t shift = WhichPowerOf2(Abs(divisor));
+ if (shift > 0) {
+ // The arithmetic shift is always OK, the 'if' is an optimization only.
+ if (shift > 1) __ sar(result, 31);
+ __ shr(result, 32 - shift);
+ __ add(result, dividend);
+ __ sar(result, shift);
+ }
+ if (divisor < 0) __ neg(result);
return;
}
@@ -1664,10 +1605,10 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
double multiplier_f =
static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
int64_t multiplier;
- if (multiplier_f - floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(floor(multiplier_f));
+ if (multiplier_f - std::floor(multiplier_f) < 0.5) {
+ multiplier = static_cast<int64_t>(std::floor(multiplier_f));
} else {
- multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
+ multiplier = static_cast<int64_t>(std::floor(multiplier_f)) + 1;
}
// The multiplier is a uint32.
ASSERT(multiplier > 0 &&
@@ -1986,7 +1927,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
XMMRegister xmm_scratch = double_scratch0();
__ Set(temp, Immediate(lower));
__ movd(xmm_scratch, Operand(temp));
- __ por(res, xmm_scratch);
+ __ orps(res, xmm_scratch);
}
}
}
@@ -2014,43 +1955,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(result, FieldOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(result, Map::kElementsKindMask);
- __ shr(result, Map::kElementsKindShift);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temp());
- ASSERT(input.is(result));
-
- Label done;
-
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
- }
-
- // If the object is not a value type, return the object.
- __ CmpObjectType(input, JS_VALUE_TYPE, map);
- __ j(not_equal, &done, Label::kNear);
- __ mov(result, FieldOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
@@ -2075,7 +1979,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
__ j(not_equal, &runtime, Label::kNear);
__ mov(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch);
@@ -2087,44 +1991,87 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+Operand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToRepresentation(LConstantOperand::cast(index),
+ Representation::Integer32());
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldOperand(string, SeqString::kHeaderSize + offset);
+ }
+ return FieldOperand(
+ string, ToRegister(index),
+ encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
+ SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register result = ToRegister(instr->result());
Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register value = ToRegister(instr->value());
- String::Encoding encoding = instr->encoding();
if (FLAG_debug_code) {
- __ push(value);
- __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+ __ push(string);
+ __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
- __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
+ __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
- __ pop(value);
+ __ pop(string);
}
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (encoding == String::ONE_BYTE_ENCODING) {
- __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
+ __ movzx_b(result, operand);
} else {
- __ mov_w(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
- value);
+ __ movzx_w(result, operand);
}
}
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToOperand(instr->value()));
- ASSERT(ToRegister(instr->context()).is(esi));
- CallRuntime(Runtime::kThrow, 1, instr);
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
if (FLAG_debug_code) {
- Comment("Unreachable code.");
- __ int3();
+ Register value = ToRegister(instr->value());
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (instr->value()->IsConstantOperand()) {
+ int value = ToRepresentation(LConstantOperand::cast(instr->value()),
+ Representation::Integer32());
+ ASSERT_LE(0, value);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ ASSERT_LE(value, String::kMaxOneByteCharCode);
+ __ mov_b(operand, static_cast<int8_t>(value));
+ } else {
+ ASSERT_LE(value, String::kMaxUtf16CodeUnit);
+ __ mov_w(operand, static_cast<int16_t>(value));
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov_w(operand, value);
+ }
}
}
@@ -2212,7 +2159,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ ucomisd(left_reg, left_reg); // NaN check.
__ j(parity_even, &return_left, Label::kNear); // left == NaN.
__ bind(&return_right);
- __ movsd(left_reg, right_reg);
+ __ movaps(left_reg, right_reg);
__ bind(&return_left);
}
@@ -2247,7 +2194,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ movsd(Operand(esp, 0 * kDoubleSize), left);
__ movsd(Operand(esp, 1 * kDoubleSize), right);
__ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ ExternalReference::mod_two_doubles_operation(isolate()),
4);
// Return value is in st(0) on ia32.
@@ -2291,7 +2238,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
ASSERT(left.is(result));
X87PrepareToWrite(result);
__ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ ExternalReference::mod_two_doubles_operation(isolate()),
4);
// Return value is in st(0) on ia32.
@@ -2312,7 +2259,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
@@ -2617,7 +2564,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
__ fld(0);
__ FCmp();
Label ok;
- __ j(parity_even, &ok);
+ __ j(parity_even, &ok, Label::kNear);
__ fstp(0);
EmitFalseBranch(instr, no_condition);
__ bind(&ok);
@@ -2640,6 +2587,35 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ CpuFeatureScope use_sse2(masm(), SSE2);
+ XMMRegister value = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, value);
+ EmitFalseBranch(instr, not_equal);
+ __ movmskpd(scratch, value);
+ __ test(scratch, Immediate(1));
+ EmitBranch(instr, not_zero);
+ } else {
+ Register value = ToRegister(instr->value());
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
+ Immediate(0x80000000));
+ EmitFalseBranch(instr, not_equal);
+ __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
+ Immediate(0x00000000));
+ EmitBranch(instr, equal);
+ }
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -2941,7 +2917,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register temp = ToRegister(instr->temp());
// A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result);
+ __ JumpIfSmi(object, &false_result, Label::kNear);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
@@ -2954,18 +2930,18 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
__ j(not_equal, &cache_miss, Label::kNear);
__ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// The inlined call site cache did not match. Check for null and string
// before calling the deferred code.
__ bind(&cache_miss);
// Null is not an instance of anything.
__ cmp(object, factory()->null_value());
- __ j(equal, &false_result);
+ __ j(equal, &false_result, Label::kNear);
// String values are not instances of anything.
Condition is_string = masm_->IsObjectStringType(object, temp, temp);
- __ j(is_string, &false_result);
+ __ j(is_string, &false_result, Label::kNear);
// Go to the deferred code.
__ jmp(deferred->entry());
@@ -3100,7 +3076,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
if (dynamic_frame_alignment_) {
Label no_padding;
__ cmp(edx, Immediate(kNoAlignmentPadding));
- __ j(equal, &no_padding);
+ __ j(equal, &no_padding, Label::kNear);
EmitReturn(instr, true);
__ bind(&no_padding);
@@ -3129,10 +3105,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(ecx, instr->name());
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
- RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3155,19 +3130,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
}
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->global_object()).is(edx));
- ASSERT(ToRegister(instr->value()).is(eax));
-
- __ mov(ecx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3233,18 +3195,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
? MemOperand::StaticVariable(ToExternalReference(
LConstantOperand::cast(instr->object())))
: MemOperand(ToRegister(instr->object()), offset);
- if (access.representation().IsByte()) {
- ASSERT(instr->hydrogen()->representation().IsInteger32());
- __ movzx_b(result, operand);
- } else {
- __ mov(result, operand);
- }
+ __ Load(result, operand, access.representation());
return;
}
Register object = ToRegister(instr->object());
- if (FLAG_track_double_fields &&
- instr->hydrogen()->representation().IsDouble()) {
+ if (instr->hydrogen()->representation().IsDouble()) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
@@ -3260,12 +3216,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
object = result;
}
- if (access.representation().IsByte()) {
- ASSERT(instr->hydrogen()->representation().IsInteger32());
- __ movzx_b(result, FieldOperand(object, offset));
- } else {
- __ mov(result, FieldOperand(object, offset));
- }
+ __ Load(result, FieldOperand(object, offset), access.representation());
}
@@ -3293,7 +3244,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3346,15 +3297,6 @@ void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
}
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ mov(result, FieldOperand(input,
- ExternalArray::kExternalPointerOffset));
-}
-
-
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
@@ -3390,7 +3332,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
elements_kind,
0,
instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result(ToDoubleRegister(instr->result()));
@@ -3399,7 +3342,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
} else {
X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
}
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
__ movsd(ToDoubleRegister(instr->result()), operand);
@@ -3409,31 +3353,40 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
__ movsx_b(result, operand);
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ movzx_b(result, operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
__ movsx_w(result, operand);
break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ movzx_w(result, operand);
break;
- case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
__ mov(result, operand);
break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
DeoptimizeIf(negative, instr->environment());
}
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -3506,7 +3459,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3525,6 +3478,9 @@ Operand LCodeGen::BuildFastArrayOperand(
uint32_t additional_index) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int element_shift_size = ElementsKindToShiftSize(elements_kind);
+ if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ offset += FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
+ }
int shift_size = element_shift_size;
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
@@ -3612,25 +3568,28 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
- Register scratch = ToRegister(instr->temp());
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
+ Label receiver_ok, global_object;
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+ Register scratch = ToRegister(instr->temp());
- // Do not transform the receiver to object for strict mode
- // functions.
- __ mov(scratch,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &receiver_ok); // A near jump is not sufficient here!
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ mov(scratch,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &receiver_ok, dist);
- // Do not transform the receiver to object for builtins.
- __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &receiver_ok);
+ // Do not transform the receiver to object for builtins.
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &receiver_ok, dist);
+ }
// Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
@@ -3643,16 +3602,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
DeoptimizeIf(equal, instr->environment());
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok, Label::kNear);
+ __ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
- // TODO(kmillikin): We have a hydrogen value for the global object. See
- // if it's better to use it than to explicitly fetch it from the context
- // here.
- __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
- __ mov(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
+ const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ mov(receiver, Operand(receiver, global_offset));
+ const int receiver_offset = GlobalObject::kGlobalReceiverOffset;
+ __ mov(receiver, FieldOperand(receiver, receiver_offset));
__ bind(&receiver_ok);
}
@@ -3693,8 +3650,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
}
@@ -3731,14 +3687,6 @@ void LCodeGen::DoContext(LContext* instr) {
}
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result,
- Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
__ push(esi); // The context is the first argument.
@@ -3748,26 +3696,10 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
}
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result,
- Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
EDIState edi_state) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -3789,7 +3721,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function directly.
- __ SetCallKind(ecx, call_kind);
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
@@ -3803,20 +3734,59 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(
- function, expected, count, CALL_FUNCTION, generator, call_kind);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(Operand(target)));
+ __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(target);
}
+ generator.AfterCall();
}
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
- CallKnownFunction(instr->hydrogen()->function(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- EDI_UNINITIALIZED);
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ mov(eax, instr->arity());
+ }
+
+ // Change context.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ bool is_self_call = false;
+ if (instr->hydrogen()->function()->IsConstant()) {
+ HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+ Handle<JSFunction> jsfun =
+ Handle<JSFunction>::cast(fun_const->handle(isolate()));
+ is_self_call = jsfun.is_identical_to(info()->closure());
+ }
+
+ if (is_self_call) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ }
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
@@ -3839,7 +3809,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// |result| are the same register and |input| will be restored
// unchanged by popping safepoint registers.
__ test(tmp, Immediate(HeapNumber::kSignMask));
- __ j(zero, &done);
+ __ j(zero, &done, Label::kNear);
__ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
__ jmp(&allocated, Label::kNear);
@@ -3993,9 +3963,11 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
ExternalReference::address_of_minus_one_half();
Label done, round_to_zero, below_one_half, do_not_compensate;
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+
__ movsd(xmm_scratch, Operand::StaticVariable(one_half));
__ ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_one_half);
+ __ j(above, &below_one_half, Label::kNear);
// CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
__ addsd(xmm_scratch, input_reg);
@@ -4004,16 +3976,16 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cmp(output_reg, 0x80000000u);
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ jmp(&done);
+ __ jmp(&done, dist);
__ bind(&below_one_half);
__ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
__ ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &round_to_zero);
+ __ j(below_equal, &round_to_zero, Label::kNear);
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
// compare and compensate.
- __ movsd(input_temp, input_reg); // Do not alter input_reg.
+ __ movaps(input_temp, input_reg); // Do not alter input_reg.
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
@@ -4023,10 +3995,10 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
- __ j(equal, &done);
+ __ j(equal, &done, dist);
__ sub(output_reg, Immediate(1));
// No overflow because we already ruled out minint.
- __ jmp(&done);
+ __ jmp(&done, dist);
__ bind(&round_to_zero);
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
@@ -4119,69 +4091,6 @@ void LCodeGen::DoPower(LPower* instr) {
}
-void LCodeGen::DoRandom(LRandom* instr) {
- CpuFeatureScope scope(masm(), SSE2);
-
- // Assert that the register size is indeed the size of each seed.
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- // Load native context
- Register global_object = ToRegister(instr->global_object());
- Register native_context = global_object;
- __ mov(native_context, FieldOperand(
- global_object, GlobalObject::kNativeContextOffset));
-
- // Load state (FixedArray of the native context's random seeds)
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- Register state = native_context;
- __ mov(state, FieldOperand(native_context, kRandomSeedOffset));
-
- // Load state[0].
- Register state0 = ToRegister(instr->scratch());
- __ mov(state0, FieldOperand(state, ByteArray::kHeaderSize));
- // Load state[1].
- Register state1 = ToRegister(instr->scratch2());
- __ mov(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- Register scratch3 = ToRegister(instr->scratch3());
- __ movzx_w(scratch3, state0);
- __ imul(scratch3, scratch3, 18273);
- __ shr(state0, 16);
- __ add(state0, scratch3);
- // Save state[0].
- __ mov(FieldOperand(state, ByteArray::kHeaderSize), state0);
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzx_w(scratch3, state1);
- __ imul(scratch3, scratch3, 36969);
- __ shr(state1, 16);
- __ add(state1, scratch3);
- // Save state[1].
- __ mov(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- Register random = state0;
- __ shl(random, 14);
- __ and_(state1, Immediate(0x3FFFF));
- __ add(random, state1);
-
- // Convert 32 random bits in random to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- XMMRegister result = ToDoubleRegister(instr->result());
- XMMRegister scratch4 = double_scratch0();
- __ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(scratch4, scratch3);
- __ movd(result, random);
- __ cvtss2sd(scratch4, scratch4);
- __ xorps(result, scratch4);
- __ subsd(result, scratch4);
-}
-
-
void LCodeGen::DoMathLog(LMathLog* instr) {
CpuFeatureScope scope(masm(), SSE2);
ASSERT(instr->value()->Equals(instr->result()));
@@ -4191,7 +4100,7 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
- __ j(equal, &zero, Label::kNear);
+ __ j(not_carry, &zero, Label::kNear);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
__ movsd(input_reg, Operand::StaticVariable(nan));
@@ -4226,39 +4135,6 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
}
-void LCodeGen::DoMathTan(LMathTan* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(esi, Immediate(0));
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(esi, Immediate(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(esi, Immediate(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->function()).is(edi));
@@ -4270,78 +4146,28 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
- CALL_AS_METHOD,
EDI_CONTAINS_TARGET);
}
}
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->key()).is(ecx));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(ecx, instr->name());
- CallCode(ic, mode, instr);
-}
-
-
void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->function()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+ CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(ecx, instr->name());
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(eax));
- CallKnownFunction(instr->hydrogen()->target(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- EDI_UNINITIALIZED);
-}
-
-
void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->constructor()).is(edi));
@@ -4362,16 +4188,15 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
__ Set(eax, Immediate(instr->arity()));
- __ mov(ebx, instr->hydrogen()->property_cell());
+ __ mov(ebx, factory()->undefined_value());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
- ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
+ ArrayNoArgumentConstructorStub stub(kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
@@ -4381,27 +4206,27 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
// look at the first argument
__ mov(ecx, Operand(esp, 0));
__ test(ecx, ecx);
- __ j(zero, &packed_case);
+ __ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
- override_mode);
+ ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
+ ArraySingleArgumentConstructorStub stub(kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
+ ArrayNArgumentsConstructorStub stub(kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
}
@@ -4417,7 +4242,13 @@ void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
- __ lea(result, Operand(base, instr->offset()));
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ lea(result, Operand(base, ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ lea(result, Operand(base, offset, times_1, 0));
+ }
}
@@ -4434,22 +4265,20 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
ToExternalReference(LConstantOperand::cast(instr->object())))
: MemOperand(ToRegister(instr->object()), offset);
if (instr->value()->IsConstantOperand()) {
- ASSERT(!representation.IsByte());
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
__ mov(operand, Immediate(ToInteger32(operand_value)));
} else {
Register value = ToRegister(instr->value());
- if (representation.IsByte()) {
- __ mov_b(operand, value);
- } else {
- __ mov(operand, value);
- }
+ __ Store(value, operand, representation);
}
return;
}
Register object = ToRegister(instr->object());
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (FLAG_track_fields && representation.IsSmi()) {
if (instr->value()->IsConstantOperand()) {
@@ -4469,9 +4298,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
__ test(value, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
}
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
ASSERT(transition.is_null());
ASSERT(access.IsInobject());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -4506,10 +4338,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
@@ -4521,11 +4349,11 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (operand_value->IsRegister()) {
Register value = ToRegister(operand_value);
- if (representation.IsByte()) {
- __ mov_b(operand, value);
- } else {
- __ mov(operand, value);
- }
+ __ Store(value, operand, representation);
+ } else if (representation.IsInteger32()) {
+ Immediate immediate = ToImmediate(operand_value, representation);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ __ mov(operand, immediate);
} else {
Handle<Object> handle_value = ToHandle(operand_value);
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -4533,11 +4361,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
} else {
Register value = ToRegister(instr->value());
- if (representation.IsByte()) {
- __ mov_b(operand, value);
- } else {
- __ mov(operand, value);
- }
+ __ Store(value, operand, representation);
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -4561,9 +4385,8 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(),
+ instr->strict_mode_flag());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4615,7 +4438,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
elements_kind,
0,
instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister xmm_scratch = double_scratch0();
@@ -4625,7 +4449,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ fld(0);
__ fstp_s(operand);
}
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
__ movsd(operand, ToDoubleRegister(instr->value()));
@@ -4635,21 +4460,30 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ mov_b(operand, value);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
__ mov_w(operand, value);
break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
__ mov(operand, value);
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -4684,7 +4518,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Label have_value;
__ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
+ __ j(parity_odd, &have_value, Label::kNear); // NaN.
__ movsd(value, Operand::StaticVariable(canonical_nan_reference));
__ bind(&have_value);
@@ -4720,15 +4554,15 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ fld(0);
__ FCmp();
- __ j(parity_odd, &no_special_nan_handling);
+ __ j(parity_odd, &no_special_nan_handling, Label::kNear);
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(MemOperand(esp, 0));
__ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
Immediate(kHoleNanUpper32));
__ add(esp, Immediate(kDoubleSize));
Label canonicalize;
- __ j(not_equal, &canonicalize);
- __ jmp(&no_special_nan_handling);
+ __ j(not_equal, &canonicalize, Label::kNear);
+ __ jmp(&no_special_nan_handling, Label::kNear);
__ bind(&canonicalize);
__ fstp(0);
__ fld_d(Operand::StaticVariable(canonical_nan_reference));
@@ -4787,7 +4621,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases...external, fast-double, fast
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -4845,13 +4679,10 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
ToRegister(instr->temp()),
kDontSaveFPRegs);
} else {
+ ASSERT(ToRegister(instr->context()).is(esi));
PushSafepointRegistersScope scope(this);
if (!object_reg.is(eax)) {
- __ push(object_reg);
- }
- LoadContextFromDeferred(instr->context());
- if (!object_reg.is(eax)) {
- __ pop(eax);
+ __ mov(eax, object_reg);
}
__ mov(ebx, to_map);
TransitionElementsKindStub stub(from_kind, to_kind);
@@ -4975,9 +4806,11 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- EmitPushTaggedOperand(instr->left());
- EmitPushTaggedOperand(instr->right());
- StringAddStub stub(instr->hydrogen()->flags());
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->left()).is(edx));
+ ASSERT(ToRegister(instr->right()).is(eax));
+ StringAddStub stub(instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -5661,7 +5494,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
PushSafepointRegistersScope scope(this);
__ push(object);
__ xor_(esi, esi);
- __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
@@ -5708,12 +5541,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Label success;
for (int i = 0; i < map_set.size() - 1; i++) {
Handle<Map> map = map_set.at(i).handle();
- __ CompareMap(reg, map, &success);
- __ j(equal, &success);
+ __ CompareMap(reg, map);
+ __ j(equal, &success, Label::kNear);
}
Handle<Map> map = map_set.at(map_set.size() - 1).handle();
- __ CompareMap(reg, map, &success);
+ __ CompareMap(reg, map);
if (instr->hydrogen()->has_migration_target()) {
__ j(not_equal, deferred->entry());
} else {
@@ -5791,13 +5624,13 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// Check for heap number
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kFar);
+ __ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
- __ jmp(&zero_result);
+ __ jmp(&zero_result, Label::kNear);
// Heap number
__ bind(&heap_number);
@@ -5812,15 +5645,15 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// Test for negative values --> clamp to zero
__ test(scratch, scratch);
- __ j(negative, &zero_result);
+ __ j(negative, &zero_result, Label::kNear);
// Get exponent alone in scratch2.
__ mov(scratch2, scratch);
__ and_(scratch2, HeapNumber::kExponentMask);
__ shr(scratch2, HeapNumber::kExponentShift);
- __ j(zero, &zero_result);
+ __ j(zero, &zero_result, Label::kNear);
__ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
- __ j(negative, &zero_result);
+ __ j(negative, &zero_result, Label::kNear);
const uint32_t non_int8_exponent = 7;
__ cmp(scratch2, Immediate(non_int8_exponent + 1));
@@ -5851,18 +5684,18 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
__ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
__ cmp(scratch2, Immediate(1 << one_half_bit_shift));
Label no_round;
- __ j(less, &no_round);
+ __ j(less, &no_round, Label::kNear);
Label round_up;
__ mov(scratch2, Immediate(1 << one_half_bit_shift));
- __ j(greater, &round_up);
+ __ j(greater, &round_up, Label::kNear);
__ test(scratch3, scratch3);
- __ j(not_zero, &round_up);
+ __ j(not_zero, &round_up, Label::kNear);
__ mov(scratch2, scratch);
__ and_(scratch2, Immediate(1 << one_bit_shift));
__ shr(scratch2, 1);
__ bind(&round_up);
__ add(scratch, scratch2);
- __ j(overflow, &largest_value);
+ __ j(overflow, &largest_value, Label::kNear);
__ bind(&no_round);
__ shr(scratch, 23);
__ mov(result_reg, scratch);
@@ -5877,7 +5710,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// bit is set.
__ and_(scratch, HeapNumber::kMantissaMask);
__ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
- __ j(not_zero, &zero_result); // M!=0 --> NaN
+ __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
// Infinity -> Fall through to map to 255.
__ bind(&largest_value);
@@ -5886,7 +5719,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
__ bind(&zero_result);
__ xor_(result_reg, result_reg);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// smi
__ bind(&is_smi);
@@ -5969,7 +5802,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ mov(result, Immediate(Smi::FromInt(0)));
+ __ Set(result, Immediate(Smi::FromInt(0)));
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
@@ -5982,19 +5815,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ push(Immediate(Smi::FromInt(size)));
}
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(
- Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(
- Runtime::kAllocateInOldDataSpace, 1, instr, instr->context());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
} else {
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
+ __ push(Immediate(Smi::FromInt(flags)));
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -6034,7 +5870,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
__ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
+ __ jmp(&allocated, Label::kNear);
__ bind(&runtime_allocate);
__ push(ebx);
@@ -6079,6 +5915,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
LOperand* input = instr->value();
EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
@@ -6087,43 +5924,48 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
-
- Condition final_branch_condition =
- EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- input, instr->type_literal());
+ Condition final_branch_condition = EmitTypeofIs(instr, input);
if (final_branch_condition != no_condition) {
EmitBranch(instr, final_branch_condition);
}
}
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
+Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Handle<String> type_name = instr->type_literal();
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+ int next_block = GetNextEmittedBlock();
+
+ Label::Distance true_distance = left_block == next_block ? Label::kNear
+ : Label::kFar;
+ Label::Distance false_distance = right_block == next_block ? Label::kNear
+ : Label::kFar;
Condition final_branch_condition = no_condition;
if (type_name->Equals(heap()->number_string())) {
- __ JumpIfSmi(input, true_label);
+ __ JumpIfSmi(input, true_label, true_distance);
__ cmp(FieldOperand(input, HeapObject::kMapOffset),
factory()->heap_number_map());
final_branch_condition = equal;
} else if (type_name->Equals(heap()->string_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label);
+ __ j(above_equal, false_label, false_distance);
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
final_branch_condition = zero;
} else if (type_name->Equals(heap()->symbol_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->boolean_string())) {
__ cmp(input, factory()->true_value());
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
__ cmp(input, factory()->false_value());
final_branch_condition = equal;
@@ -6133,8 +5975,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (type_name->Equals(heap()->undefined_string())) {
__ cmp(input, factory()->undefined_value());
- __ j(equal, true_label);
- __ JumpIfSmi(input, false_label);
+ __ j(equal, true_label, true_distance);
+ __ JumpIfSmi(input, false_label, false_distance);
// Check for undetectable objects => true.
__ mov(input, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
@@ -6143,29 +5985,29 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->object_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
if (!FLAG_harmony_typeof) {
__ cmp(input, factory()->null_value());
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
}
__ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
- __ j(below, false_label);
+ __ j(below, false_label, false_distance);
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, false_label);
+ __ j(above, false_label, false_distance);
// Check for undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
final_branch_condition = zero;
} else {
- __ jmp(false_label);
+ __ jmp(false_label, false_distance);
}
return final_branch_condition;
}
@@ -6328,6 +6170,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
__ cmp(eax, isolate()->factory()->undefined_value());
DeoptimizeIf(equal, instr->environment());
@@ -6365,9 +6208,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Label load_cache, done;
__ EnumLength(result, map);
__ cmp(result, Immediate(Smi::FromInt(0)));
- __ j(not_equal, &load_cache);
+ __ j(not_equal, &load_cache, Label::kNear);
__ mov(result, isolate()->factory()->empty_fixed_array());
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
@@ -6395,7 +6238,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
Label out_of_object, done;
__ cmp(index, Immediate(0));
- __ j(less, &out_of_object);
+ __ j(less, &out_of_object, Label::kNear);
__ mov(object, FieldOperand(object,
index,
times_half_pointer_size,
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 94ab28e1e3..fa5e88b033 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -257,7 +257,6 @@ class LCodeGen: public LCodeGenBase {
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
EDIState edi_state);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
@@ -271,6 +270,10 @@ class LCodeGen: public LCodeGenBase {
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
+ bool DeoptEveryNTimes() {
+ return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
+ }
+
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
@@ -278,7 +281,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -298,6 +300,10 @@ class LCodeGen: public LCodeGenBase {
uint32_t offset,
uint32_t additional_index = 0);
+ Operand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
@@ -315,6 +321,8 @@ class LCodeGen: public LCodeGenBase {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
@@ -340,10 +348,7 @@ class LCodeGen: public LCodeGenBase {
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
+ Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index 2b2126af9d..d621bd261d 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -488,7 +488,7 @@ void LGapResolver::EmitSwap(int index) {
cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
__ movsd(xmm0, other);
__ movsd(other, reg);
- __ movsd(reg, Operand(xmm0));
+ __ movaps(reg, xmm0);
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), SSE2);
// Double-width memory-to-memory. Spill on demand to use a general
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 73aabd6b1e..bbbc7ec731 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -286,7 +286,7 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
+ hydrogen()->type_literal()->ToCString().get(),
true_block_id(), false_block_id());
}
@@ -302,11 +302,23 @@ void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
- stream->Add(" + %d", offset());
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
stream->Add("#%d / ", arity());
}
@@ -333,28 +345,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[ecx] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
context()->PrintTo(stream);
@@ -419,7 +409,7 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -580,6 +570,14 @@ LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
}
+LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value,
+ Register fixed_register) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseFixed(value, fixed_register);
+}
+
+
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -616,8 +614,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
@@ -625,41 +622,36 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr,
+ LTemplateResultInstruction<1>* instr,
int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
Register reg) {
return Define(instr, ToUnallocated(reg));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr,
+ LTemplateResultInstruction<1>* instr,
XMMRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -919,10 +911,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (current->OperandCount() == 0) {
instr = DefineAsRegister(new(zone()) LDummy());
} else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
instr = DefineAsRegister(new(zone())
LDummyUse(UseAny(current->OperandAt(0))));
}
for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
LInstruction* dummy =
new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
dummy->set_hydrogen_value(current);
@@ -991,90 +985,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
- LEnvironment* result =
- new(zone()) LEnvironment(hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- int object_index = objects_to_materialize->length();
- for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- LOperand* op;
- HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- for (int i = object_index; i < objects_to_materialize->length(); ++i) {
- HValue* object_to_materialize = objects_to_materialize->at(i);
- int previously_materialized_object = -1;
- for (int prev = 0; prev < i; ++prev) {
- if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
- previously_materialized_object = prev;
- break;
- }
- }
- int length = object_to_materialize->OperandCount();
- bool is_arguments = object_to_materialize->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- continue;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
- }
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* value = object_to_materialize->OperandAt(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else {
- ASSERT(!value->IsPushArgument());
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
- }
-
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
- }
-
- return result;
-}
-
-
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -1117,6 +1027,9 @@ LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return new(zone()) LCmpMapAndBranch(value);
@@ -1157,7 +1070,7 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegister(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
+ LOperand* function = UseRegister(instr->function());
LOperand* temp = TempRegister();
LWrapReceiver* result =
new(zone()) LWrapReceiver(receiver, function, temp);
@@ -1193,11 +1106,11 @@ LInstruction* LChunkBuilder::DoStoreCodeEntry(
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* inner_object) {
- LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
- LInnerAllocatedObject* result =
- new(zone()) LInnerAllocatedObject(base_object);
- return DefineAsRegister(result);
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
}
@@ -1219,33 +1132,38 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
}
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context));
-}
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), edi);
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1263,9 +1181,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathRound: return DoMathRound(instr);
case kMathAbs: return DoMathAbs(instr);
case kMathLog: return DoMathLog(instr);
- case kMathSin: return DoMathSin(instr);
- case kMathCos: return DoMathCos(instr);
- case kMathTan: return DoMathTan(instr);
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
@@ -1284,10 +1199,9 @@ LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- LOperand* context = UseAny(instr->context());
LOperand* input = UseRegister(instr->value());
LOperand* temp = FixedTemp(xmm4);
- LMathRound* result = new(zone()) LMathRound(context, input, temp);
+ LMathRound* result = new(zone()) LMathRound(input, temp);
return AssignEnvironment(DefineAsRegister(result));
}
@@ -1304,29 +1218,7 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathLog* result = new(zone()) LMathLog(input);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathSin* result = new(zone()) LMathSin(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathCos* result = new(zone()) LMathCos(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathTan* result = new(zone()) LMathTan(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
}
@@ -1349,42 +1241,13 @@ LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- LOperand* context = UseAny(instr->context());
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
- LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
return DefineSameAsFirst(result);
}
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* key = UseFixed(instr->key(), ecx);
- LCallKeyed* result = new(zone()) LCallKeyed(context, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LCallNamed* result = new(zone()) LCallNamed(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LCallGlobal* result = new(zone()) LCallGlobal(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
@@ -1404,8 +1267,8 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- LCallFunction* result = new(zone()) LCallFunction(context, function);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, eax), instr);
}
@@ -1454,12 +1317,12 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
+ if (instr->RightIsPowerOf2()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
+ LOperand* value = UseRegister(instr->left());
LDivI* div =
new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ return AssignEnvironment(DefineAsRegister(div));
}
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
@@ -1476,25 +1339,6 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
- }
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
HValue* right = instr->right();
if (!right->IsConstant()) {
@@ -1540,7 +1384,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
+ if (instr->RightIsPowerOf2()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
UseOrConstant(right),
@@ -1550,10 +1394,6 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegisterAtStart(right),
- NULL);
return AssignEnvironment(DefineSameAsFirst(mod));
} else {
// The temporary operand is necessary to ensure that right is not
@@ -1649,6 +1489,21 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ bool use_lea = LAddI::UseLea(instr);
+ LOperand* left = UseRegisterAtStart(instr->left());
+ HValue* right_candidate = instr->right();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ return result;
} else {
return DoArithmeticT(Token::ADD, instr);
}
@@ -1691,19 +1546,6 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseTempRegister(instr->global_object());
- LOperand* scratch = TempRegister();
- LOperand* scratch2 = TempRegister();
- LOperand* scratch3 = TempRegister();
- LRandom* result = new(zone()) LRandom(
- global_object, scratch, scratch2, scratch3);
- return DefineFixedDouble(result, xmm1);
-}
-
-
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsSmiOrTagged());
ASSERT(instr->right()->representation().IsSmiOrTagged());
@@ -1762,6 +1604,16 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
}
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsSmiOrTagged());
LOperand* temp = TempRegister();
@@ -1846,35 +1698,51 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* date = UseFixed(instr->value(), eax);
+ LDateField* result =
+ new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
+ return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineSameAsFirst(result);
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* date = UseFixed(instr->value(), eax);
- LDateField* result =
- new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) {
+ if (instr->encoding() == String::ONE_BYTE_ENCODING) {
+ if (FLAG_debug_code) {
+ return UseFixed(instr->value(), eax);
+ } else {
+ return UseFixedOrConstant(instr->value(), eax);
+ }
+ } else {
+ if (FLAG_debug_code) {
+ return UseRegisterAtStart(instr->value());
+ } else {
+ return UseRegisterOrConstantAtStart(instr->value());
+ }
+ }
}
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- ASSERT(ecx.is_byte_register());
- LOperand* value = UseFixed(instr->value(), ecx);
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineSameAsFirst(result);
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = GetSeqStringSetCharOperand(instr);
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL;
+ LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
+ index, value);
+ if (FLAG_debug_code) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -1899,13 +1767,6 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
}
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* value = UseFixed(instr->value(), eax);
- return MarkAsCall(new(zone()) LThrow(context, value), instr);
-}
-
-
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
@@ -2104,12 +1965,10 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub()
- ? UseFixed(instr->context(), esi)
- : NULL;
+ LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), eax), context,
- parameter_count);
+ return new(zone()) LReturn(
+ UseFixed(instr->value(), eax), context, parameter_count);
}
@@ -2159,16 +2018,6 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object = UseFixed(instr->global_object(), edx);
- LOperand* value = UseFixed(instr->value(), eax);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(context, global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2223,13 +2072,6 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
@@ -2240,24 +2082,22 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
: UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
result = new(zone()) LLoadKeyed(obj, key);
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
(instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
+ (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(backing_store, key);
}
DefineAsRegister(result);
bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ (elements_kind == EXTERNAL_UINT32_ELEMENTS);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
return can_deoptimize ? AssignEnvironment(result) : result;
@@ -2280,9 +2120,12 @@ LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
// Determine if we need a byte register in this case for the value.
bool val_is_fixed_register =
- elements_kind == EXTERNAL_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS;
+ elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS;
if (val_is_fixed_register) {
return UseFixed(instr->value(), eax);
}
@@ -2297,7 +2140,7 @@ LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsSmi());
@@ -2329,23 +2172,22 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
- LOperand* external_pointer = UseRegister(instr->elements());
+ LOperand* backing_store = UseRegister(instr->elements());
LOperand* val = GetStoreKeyedValueOperand(instr);
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(external_pointer,
- key,
- val);
+ return new(zone()) LStoreKeyed(backing_store, key, val);
}
@@ -2377,7 +2219,7 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
new_map_reg, temp_reg);
return result;
} else {
- LOperand* context = UseRegister(instr->context());
+ LOperand* context = UseFixed(instr->context(), esi);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
return AssignPointerMap(result);
@@ -2424,7 +2266,8 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
!(FLAG_track_double_fields && instr->field_representation().IsDouble());
LOperand* val;
- if (instr->field_representation().IsByte()) {
+ if (instr->field_representation().IsInteger8() ||
+ instr->field_representation().IsUInteger8()) {
// mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
// Just force the value to be in eax and we're safe here.
val = UseFixed(instr->value(), eax);
@@ -2474,8 +2317,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
return MarkAsCall(DefineFixed(string_add, eax), instr);
}
@@ -2544,7 +2387,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
int index = static_cast<int>(instr->index());
- Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2629,6 +2472,8 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
@@ -2682,8 +2527,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
+ instr->inlining_kind());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 9b00f3c358..811700a544 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -52,12 +52,9 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallConstantFunction) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@@ -74,6 +71,7 @@ class LCodeGen;
V(ClampTToUint8NoSSE2) \
V(ClassOfTestAndBranch) \
V(ClobberDoubles) \
+ V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -95,13 +93,10 @@ class LCodeGen;
V(Drop) \
V(Dummy) \
V(DummyUse) \
- V(ElementsKind) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@@ -120,7 +115,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadExternalArrayPointer) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -132,7 +126,6 @@ class LCodeGen;
V(LoadRoot) \
V(MapEnumLength) \
V(MathAbs) \
- V(MathCos) \
V(MathExp) \
V(MathFloor) \
V(MathFloorOfDiv) \
@@ -140,9 +133,7 @@ class LCodeGen;
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
- V(MathSin) \
V(MathSqrt) \
- V(MathTan) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -150,13 +141,12 @@ class LCodeGen;
V(NumberTagU) \
V(NumberUntagD) \
V(OsrEntry) \
- V(OuterContext) \
V(Parameter) \
V(Power) \
- V(Random) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringGetChar) \
V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
@@ -165,7 +155,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -177,7 +166,6 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
@@ -186,7 +174,6 @@ class LCodeGen;
V(Uint32ToDouble) \
V(Uint32ToSmi) \
V(UnknownOSRValue) \
- V(ValueOf) \
V(WrapReceiver)
@@ -310,10 +297,8 @@ class LInstruction : public ZoneObject {
// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LInstruction {
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
@@ -325,6 +310,15 @@ class LTemplateInstruction : public LInstruction {
protected:
EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
@@ -500,10 +494,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
};
@@ -575,6 +565,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
};
@@ -749,15 +740,13 @@ class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LMathRound(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[1] = context;
+ LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
@@ -793,42 +782,6 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathTan(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
-};
-
-
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value,
@@ -860,15 +813,13 @@ class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[1] = context;
+ LMathPowHalf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
@@ -903,6 +854,22 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@@ -977,6 +944,7 @@ class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
inputs_[2] = right;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
@@ -1081,6 +1049,7 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+ LOperand* context() { return inputs_[0]; }
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1107,6 +1076,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
@@ -1296,34 +1266,6 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index)
@@ -1345,41 +1287,39 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
- inputs_[2] = value;
}
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
- LThrow(LOperand* context, LOperand* value) {
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
inputs_[0] = context;
- inputs_[1] = value;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
};
@@ -1433,28 +1373,6 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
- public:
- LRandom(LOperand* global_object,
- LOperand* scratch,
- LOperand* scratch2,
- LOperand* scratch3) {
- inputs_[0] = global_object;
- temps_[0] = scratch;
- temps_[1] = scratch2;
- temps_[2] = scratch3;
- }
-
- LOperand* global_object() const { return inputs_[0]; }
- LOperand* scratch() const { return temps_[0]; }
- LOperand* scratch2() const { return temps_[1]; }
- LOperand* scratch3() const { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1510,7 +1428,8 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value, LOperand* context,
+ explicit LReturn(LOperand* value,
+ LOperand* context,
LOperand* parameter_count) {
inputs_[0] = value;
inputs_[1] = context;
@@ -1585,20 +1504,6 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadExternalArrayPointer V8_FINAL
- : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
@@ -1613,6 +1518,12 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
bool is_external() const {
return hydrogen()->is_external();
}
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -1632,9 +1543,12 @@ inline static bool ExternalArrayOpRequiresTemp(
// an index cannot fold the scale operation into a load and need an extra
// temp register to do the work.
return key_representation.IsSmi() &&
- (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS);
+ (elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS);
}
@@ -1692,28 +1606,6 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1794,19 +1686,19 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInnerAllocatedObject(LOperand* base_object) {
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
+ inputs_[1] = offset;
}
- LOperand* base_object() { return inputs_[0]; }
- int offset() { return hydrogen()->offset(); }
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
- DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@@ -1824,18 +1716,6 @@ class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
@@ -1849,94 +1729,69 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalObject(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-};
-
-
-class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
}
- LOperand* global() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
+ LOperand* function() { return inputs_[0]; }
-class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
}
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
+ LOperand* target() const { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
-};
+ ZoneList<LOperand*> inputs_;
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallKeyed(LOperand* context, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
};
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNamed(LOperand* context) {
+ LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
+ inputs_[1] = function;
}
LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1958,35 +1813,6 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallGlobal(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
@@ -2292,6 +2118,12 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@@ -2748,18 +2580,17 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- argument_count_(0),
allocator_(allocator),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
@@ -2774,15 +2605,10 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
-
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathTan(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
@@ -2798,7 +2624,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2840,6 +2665,10 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+ // An input operand in a fixed register or a constant operand.
+ MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value,
+ Register fixed_register);
+
// An input operand in a register or a constant operand.
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
@@ -2849,7 +2678,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2858,24 +2687,17 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg);
- template<int I, int T>
- LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr);
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ XMMRegister reg);
+ LInstruction* DefineX87TOS(LTemplateResultInstruction<1>* instr);
// Assigns an environment to an instruction. An instruction which can
// deoptimize must have an environment.
LInstruction* AssignEnvironment(LInstruction* instr);
@@ -2885,6 +2707,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+ LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr);
+
// Marks a call for the register allocator. Assigns a pointer map to
// support GC and lazy deoptimization. Assigns an environment to support
// eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
@@ -2893,10 +2717,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
-
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
@@ -2911,12 +2731,10 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
- Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
- int argument_count_;
LAllocator* allocator_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 025bd891c2..30bc4adb58 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -46,7 +46,6 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
// TODO(titzer): should we just use a null handle here instead?
@@ -56,6 +55,34 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
+void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ movsx_b(dst, src);
+ } else if (r.IsUInteger8()) {
+ movzx_b(dst, src);
+ } else if (r.IsInteger16()) {
+ movsx_w(dst, src);
+ } else if (r.IsUInteger16()) {
+ movzx_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ mov_b(dst, src);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ mov_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
@@ -586,6 +613,10 @@ void MacroAssembler::RecordWriteForMap(
return;
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// A single check of the map's pages interesting flag suffices, since it is
// only set during incremental collection, and then it's also guaranteed that
// the from object's page's interesting flag is also set. This optimization
@@ -642,6 +673,10 @@ void MacroAssembler::RecordWrite(Register object,
bind(&ok);
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
@@ -867,9 +902,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
}
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success) {
+void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}
@@ -882,10 +915,8 @@ void MacroAssembler::CheckMap(Register obj,
JumpIfSmi(obj, fail);
}
- Label success;
- CompareMap(obj, map, &success);
+ CompareMap(obj, map);
j(not_equal, fail);
- bind(&success);
}
@@ -1075,10 +1106,8 @@ void MacroAssembler::EnterExitFramePrologue() {
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
- isolate());
- ExternalReference context_address(Isolate::kContextAddress,
- isolate());
+ ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
+ ExternalReference context_address(Isolate::kContextAddress, isolate());
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
}
@@ -1388,8 +1417,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
-// Compute the hash code from the untagged key. This must be kept in sync
-// with ComputeIntegerHash in utils.h.
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
//
// Note: r0 will contain hash code
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
@@ -1465,8 +1495,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
dec(r1);
// Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
@@ -1484,7 +1513,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
+ if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
@@ -1562,7 +1591,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1986,30 +2015,48 @@ void MacroAssembler::CopyBytes(Register source,
Register destination,
Register length,
Register scratch) {
- Label loop, done, short_string, short_loop;
- // Experimentation shows that the short string loop is faster if length < 10.
- cmp(length, Immediate(10));
- j(less_equal, &short_string);
-
+ Label short_loop, len4, len8, len12, done, short_string;
ASSERT(source.is(esi));
ASSERT(destination.is(edi));
ASSERT(length.is(ecx));
+ cmp(length, Immediate(4));
+ j(below, &short_string, Label::kNear);
// Because source is 4-byte aligned in our uses of this function,
// we keep source aligned for the rep_movs call by copying the odd bytes
// at the end of the ranges.
mov(scratch, Operand(source, length, times_1, -4));
mov(Operand(destination, length, times_1, -4), scratch);
+
+ cmp(length, Immediate(8));
+ j(below_equal, &len4, Label::kNear);
+ cmp(length, Immediate(12));
+ j(below_equal, &len8, Label::kNear);
+ cmp(length, Immediate(16));
+ j(below_equal, &len12, Label::kNear);
+
mov(scratch, ecx);
shr(ecx, 2);
rep_movs();
and_(scratch, Immediate(0x3));
add(destination, scratch);
- jmp(&done);
+ jmp(&done, Label::kNear);
+
+ bind(&len12);
+ mov(scratch, Operand(source, 8));
+ mov(Operand(destination, 8), scratch);
+ bind(&len8);
+ mov(scratch, Operand(source, 4));
+ mov(Operand(destination, 4), scratch);
+ bind(&len4);
+ mov(scratch, Operand(source, 0));
+ mov(Operand(destination, 0), scratch);
+ add(destination, length);
+ jmp(&done, Label::kNear);
bind(&short_string);
test(length, length);
- j(zero, &done);
+ j(zero, &done, Label::kNear);
bind(&short_loop);
mov_b(scratch, Operand(source, 0));
@@ -2140,8 +2187,6 @@ void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ ||
- stub->CompilingCallsToThisStubIsGCSafe(isolate()));
jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -2153,8 +2198,7 @@ void MacroAssembler::StubReturn(int argc) {
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -2253,7 +2297,7 @@ void MacroAssembler::PrepareCallApiFunction(int argc) {
void MacroAssembler::CallApiFunctionAndReturn(
- Address function_address,
+ Register function_address,
Address thunk_address,
Operand thunk_last_arg,
int stack_space,
@@ -2266,6 +2310,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference level_address =
ExternalReference::handle_scope_level_address(isolate());
+ ASSERT(edx.is(function_address));
// Allocate HandleScope in callee-save registers.
mov(ebx, Operand::StaticVariable(next_address));
mov(edi, Operand::StaticVariable(limit_address));
@@ -2292,14 +2337,14 @@ void MacroAssembler::CallApiFunctionAndReturn(
j(zero, &profiler_disabled);
// Additional parameter is the address of the actual getter function.
- mov(thunk_last_arg, Immediate(function_address));
+ mov(thunk_last_arg, function_address);
// Call the api function.
call(thunk_address, RelocInfo::RUNTIME_ENTRY);
jmp(&end_profiler_check);
bind(&profiler_disabled);
// Call the api function.
- call(function_address, RelocInfo::RUNTIME_ENTRY);
+ call(function_address);
bind(&end_profiler_check);
if (FLAG_log_timer_events) {
@@ -2411,23 +2456,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
}
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be ecx to
- // follow the calling convention which requires the call type to be
- // in ecx.
- ASSERT(dst.is(ecx));
- if (call_kind == CALL_AS_FUNCTION) {
- // Set to some non-zero smi by updating the least significant
- // byte.
- mov_b(dst, 1 << kSmiTagSize);
- } else {
- // Set to smi zero by clearing the register.
- xor_(dst, dst);
- }
-}
-
-
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
@@ -2436,8 +2464,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance done_near,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label invoke;
@@ -2490,14 +2517,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
- SetCallKind(ecx, call_kind);
call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
jmp(done, done_near);
}
} else {
- SetCallKind(ecx, call_kind);
jmp(adaptor, RelocInfo::CODE_TARGET);
}
bind(&invoke);
@@ -2509,8 +2534,7 @@ void MacroAssembler::InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -2518,16 +2542,14 @@ void MacroAssembler::InvokeCode(const Operand& code,
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, &definitely_mismatches, flag, Label::kNear,
- call_wrapper, call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(ecx, call_kind);
call(code);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
jmp(code);
}
bind(&done);
@@ -2535,42 +2557,10 @@ void MacroAssembler::InvokeCode(const Operand& code,
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- Operand dummy(eax, 0);
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
- flag, Label::kNear, call_wrapper, call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code, rmode));
- SetCallKind(ecx, call_kind);
- call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
- jmp(code, rmode);
- }
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -2582,28 +2572,33 @@ void MacroAssembler::InvokeFunction(Register fun,
ParameterCount expected(ebx);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper, call_kind);
+ expected, actual, flag, call_wrapper);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
- // Get the function and setup the context.
- LoadHeapObject(edi, function);
+ ASSERT(fun.is(edi));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper, call_kind);
+ expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ LoadHeapObject(edi, function);
+ InvokeFunction(edi, expected, actual, flag, call_wrapper);
}
@@ -2619,7 +2614,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
ParameterCount expected(0);
GetBuiltinFunction(edi, id);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, expected, flag, call_wrapper, CALL_AS_METHOD);
+ expected, expected, flag, call_wrapper);
}
@@ -2985,16 +2980,8 @@ void MacroAssembler::CheckStackAlignment() {
void MacroAssembler::Abort(BailoutReason reason) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -3007,22 +2994,55 @@ void MacroAssembler::Abort(BailoutReason reason) {
#endif
push(eax);
- push(Immediate(p0));
- push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
+ push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// will not return here
int3();
}
+void MacroAssembler::Throw(BailoutReason reason) {
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ push(eax);
+ push(Immediate(Smi::FromInt(reason)));
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // will not return here
+ int3();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label L;
+ j(NegateCondition(cc), &L);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
@@ -3188,6 +3208,41 @@ void MacroAssembler::JumpIfNotUniqueName(Operand operand,
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ JumpIfNotSmi(string, &is_object, Label::kNear);
+ Abort(kNonObject);
+ bind(&is_object);
+
+ push(value);
+ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ cmp(value, Immediate(encoding_mask));
+ pop(value);
+ Check(equal, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ SmiTag(index);
+ Check(no_overflow, kIndexIsTooLarge);
+
+ cmp(index, FieldOperand(string, String::kLengthOffset));
+ Check(less, kIndexIsTooLarge);
+
+ cmp(index, Immediate(Smi::FromInt(0)));
+ Check(greater_equal, kIndexIsNegative);
+
+ // Restore the index
+ SmiUntag(index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
@@ -3504,7 +3559,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
+ cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
j(equal, call_runtime);
jmp(&start);
@@ -3521,10 +3576,16 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
+ Label no_elements;
mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
cmp(ecx, isolate()->factory()->empty_fixed_array());
+ j(equal, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
j(not_equal, call_runtime);
+ bind(&no_elements);
mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
cmp(ecx, isolate()->factory()->null_value());
j(not_equal, &next);
@@ -3551,6 +3612,32 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
}
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // scratch contained elements pointer.
+ mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ mov(current, FieldOperand(current, HeapObject::kMapOffset));
+ mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ and_(scratch1, Map::kElementsKindMask);
+ shr(scratch1, Map::kElementsKindShift);
+ cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ j(equal, found);
+ mov(current, FieldOperand(current, Map::kPrototypeOffset));
+ cmp(current, Immediate(factory->null_value()));
+ j(not_equal, &loop_again);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 1e41f734ee..6807d082d8 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -61,6 +61,9 @@ class MacroAssembler: public Assembler {
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
+ void Load(Register dst, const Operand& src, Representation r);
+ void Store(Register src, const Operand& dst, Representation r);
+
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
@@ -308,50 +311,39 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Set up call kind marking in ecx. The method takes ecx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
+ const CallWrapper& call_wrapper) {
+ InvokeCode(Operand(code), expected, actual, flag, call_wrapper);
}
void InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
@@ -417,13 +409,8 @@ class MacroAssembler: public Assembler {
bool specialize_for_processor,
int offset = 0);
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
- // result of map compare. If multiple map compares are required, the compare
- // sequences branches to early_success.
- void CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success);
+ // Compare an object's map with the specified map.
+ void CompareMap(Register obj, Handle<Map> map);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -585,6 +572,12 @@ class MacroAssembler: public Assembler {
// Throw past all JS frames to the top JS entry frame.
void ThrowUncatchable(Register value);
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason);
+
// ---------------------------------------------------------------------------
// Inline caching support
@@ -822,7 +815,7 @@ class MacroAssembler: public Assembler {
// from handle and propagates exceptions. Clobbers ebx, edi and
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address,
+ void CallApiFunctionAndReturn(Register function_address,
Address thunk_address,
Operand thunk_last_arg,
int stack_space,
@@ -901,8 +894,6 @@ class MacroAssembler: public Assembler {
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
@@ -945,6 +936,11 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
Label::Distance distance = Label::kFar);
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask);
+
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
@@ -977,9 +973,12 @@ class MacroAssembler: public Assembler {
bind(&no_memento_found);
}
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
private:
bool generating_stub_;
- bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
@@ -993,8 +992,7 @@ class MacroAssembler: public Assembler {
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance done_distance,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
+ const CallWrapper& call_wrapper = NullCallWrapper());
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
@@ -1084,6 +1082,14 @@ inline Operand FieldOperand(Register object,
}
+inline Operand FixedArrayElementOperand(Register array,
+ Register index_as_smi,
+ int additional_offset = 0) {
+ int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
+ return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
+}
+
+
inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
diff --git a/deps/v8/src/ia32/simulator-ia32.cc b/deps/v8/src/ia32/simulator-ia32.cc
index ab8169375c..b6f2847332 100644
--- a/deps/v8/src/ia32/simulator-ia32.cc
+++ b/deps/v8/src/ia32/simulator-ia32.cc
@@ -27,4 +27,3 @@
// Since there is no simulator for the ia32 architecture this file is empty.
-
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 0648833dc7..4bc428849c 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -271,13 +271,17 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
int index,
Register prototype,
Label* miss) {
- // Check we're still in the same context.
- __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
- masm->isolate()->global_object());
- __ j(not_equal, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(masm->isolate()->native_context()->get(index)));
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ mov(scratch, Operand(esi, offset));
+ __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
+ __ j(not_equal, miss);
+
// Load its initial map. The global functions all have initial maps.
__ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
// Load the prototype from the initial map.
@@ -405,374 +409,94 @@ static void CompileCallLoadPropertyWithInterceptor(
Register receiver,
Register holder,
Register name,
- Handle<JSObject> holder_obj) {
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate()),
+ ExternalReference(IC_Utility(id), masm->isolate()),
StubCache::kInterceptorArgsLength);
}
-// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
-
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : last argument in the internal frame of the caller
- // -----------------------------------
- __ pop(scratch);
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(Immediate(Smi::FromInt(0)));
- }
- __ push(scratch);
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address.
- // -- esp[4] : last fast api call extra argument.
- // -- ...
- // -- esp[kFastApiCallArguments * 4] : first fast api call extra argument.
- // -- esp[kFastApiCallArguments * 4 + 4] : last argument in the internal
- // frame.
- // -----------------------------------
- __ pop(scratch);
- __ add(esp, Immediate(kPointerSize * kFastApiCallArguments));
- __ push(scratch);
-}
-
-
-// Generates call to API function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] - esp[28] : FunctionCallbackInfo, incl.
- // : object passing the type check
- // (set by CheckPrototypes)
- // -- esp[32] : last argument
- // -- ...
- // -- esp[(argc + 7) * 4] : first argument
- // -- esp[(argc + 8) * 4] : receiver
- // -----------------------------------
-
- typedef FunctionCallbackArguments FCA;
- // Save calling context.
- __ mov(Operand(esp, (1 + FCA::kContextSaveIndex) * kPointerSize), esi);
-
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(edi, function);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Construct the FunctionCallbackInfo.
- __ mov(Operand(esp, (1 + FCA::kCalleeIndex) * kPointerSize), edi);
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ mov(ecx, api_call_info);
- __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
- __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize), ebx);
- } else {
- __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize),
- Immediate(call_data));
- }
- __ mov(Operand(esp, (1 + FCA::kIsolateIndex) * kPointerSize),
- Immediate(reinterpret_cast<int>(masm->isolate())));
- __ mov(Operand(esp, (1 + FCA::kReturnValueOffset) * kPointerSize),
- masm->isolate()->factory()->undefined_value());
- __ mov(Operand(esp, (1 + FCA::kReturnValueDefaultValueIndex) * kPointerSize),
- masm->isolate()->factory()->undefined_value());
-
- // Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 7);
- __ lea(eax, Operand(esp, 1 * kPointerSize));
-
- // API function gets reference to the v8::Arguments. If CPU profiler
- // is enabled wrapper function will be called and we need to pass
- // address of the callback as additional parameter, always allocate
- // space for it.
- const int kApiArgc = 1 + 1;
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- // Function address is a foreign pointer outside V8's heap.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
-
- // FunctionCallbackInfo::implicit_args_.
- __ mov(ApiParameterOperand(2), eax);
- __ add(eax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
- // FunctionCallbackInfo::values_.
- __ mov(ApiParameterOperand(3), eax);
- // FunctionCallbackInfo::length_.
- __ Set(ApiParameterOperand(4), Immediate(argc));
- // FunctionCallbackInfo::is_construct_call_.
- __ Set(ApiParameterOperand(5), Immediate(0));
-
- // v8::InvocationCallback's argument.
- __ lea(eax, ApiParameterOperand(2));
- __ mov(ApiParameterOperand(0), eax);
-
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
-
- Operand context_restore_operand(ebp,
- (2 + FCA::kContextSaveIndex) * kPointerSize);
- Operand return_value_operand(ebp,
- (2 + FCA::kReturnValueOffset) * kPointerSize);
- __ CallApiFunctionAndReturn(function_address,
- thunk_address,
- ApiParameterOperand(1),
- argc + kFastApiCallArguments + 1,
- return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
-}
-
-
// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch,
- int argc,
- Register* values) {
- ASSERT(optimization.is_simple_api_call());
- ASSERT(!receiver.is(scratch));
-
- const int stack_space = kFastApiCallArguments + argc + 1;
- const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1;
+// This function uses push() to generate smaller, faster code than
+// the version above. It is an optimization that should will be removed
+// when api call ICs are generated in hydrogen.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
// Copy return value.
- __ mov(scratch, Operand(esp, 0));
- // Assign stack space for the call arguments.
- __ sub(esp, Immediate(stack_space * kPointerSize));
- // Move the return address on top of the stack.
- __ mov(Operand(esp, 0), scratch);
- // Write holder to stack frame.
- __ mov(Operand(esp, kHolderIndex * kPointerSize), receiver);
- // Write receiver to stack frame.
- int index = stack_space;
- __ mov(Operand(esp, index-- * kPointerSize), receiver);
+ __ pop(scratch_in);
+ // receiver
+ __ push(receiver);
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
- ASSERT(!receiver.is(values[i]));
- ASSERT(!scratch.is(values[i]));
- __ mov(Operand(esp, index-- * kPointerSize), values[i]);
- }
-
- GenerateFastApiCall(masm, optimization, argc, true);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- Code::ExtraICState extra_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_state_(extra_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch_in.is(arg));
+ __ push(arg);
}
+ __ push(scratch_in);
+ // Stack now matches JSFunction abi.
+ ASSERT(optimization.is_simple_api_call());
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<Name> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->call_const_interceptor(), 1);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate(), false);
- } else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- Handle<JSFunction> function = optimization.constant_function();
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm, scratch1);
- __ jmp(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm, scratch1);
- }
+ // Abi for CallApiFunctionStub.
+ Register callee = eax;
+ Register call_data = ebx;
+ Register holder = ecx;
+ Register api_function_address = edx;
+ Register scratch = edi; // scratch_in is no longer valid.
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadHeapObject(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
}
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<Name> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- StubCache::kInterceptorArgsLength);
-
- // Restore the name_ register.
- __ pop(name_);
-
- // Leave the internal frame.
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ LoadHeapObject(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ mov(scratch, api_call_info);
+ __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
+ } else {
+ __ mov(call_data, call_data_obj);
}
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- // Leave the internal frame.
- }
-
- __ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel());
- __ j(not_equal, interceptor_succeeded);
- }
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ mov(api_function_address, Immediate(function_address));
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- Code::ExtraICState extra_state_;
-};
+ // Jump to stub.
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
@@ -1118,26 +842,6 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<JSGlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ jmp(code, RelocInfo::CODE_TARGET);
}
@@ -1147,22 +851,20 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
Handle<Name> name,
- int save_at_depth,
Label* miss,
PrototypeCheckType check) {
- const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1;
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ mov(scratch1, Handle<Map>(object->map()));
+ __ mov(scratch1, receiver_map);
- Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1170,31 +872,33 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Keep track of the current object in register reg.
Register reg = object_reg;
- Handle<JSObject> current = object;
int depth = 0;
- if (save_at_depth == depth) {
- __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
- }
-
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
- while (!current.is_identical_to(holder)) {
+ while (!current_map.is_identical_to(holder_map)) {
++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
if (!name->IsUniqueName()) {
ASSERT(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
+ ASSERT(current.is_null() ||
+ current->property_dictionary()->FindEntry(*name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -1205,16 +909,19 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
__ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
bool in_new_space = heap()->InNewSpace(*prototype);
- Handle<Map> current_map(current->map());
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
__ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
- if (current->IsJSGlobalProxy()) {
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
}
if (in_new_space) {
@@ -1234,71 +941,62 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
}
- if (save_at_depth == depth) {
- __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
- }
-
// Go to the next object in the prototype chain.
current = prototype;
+ current_map = handle(current->map());
}
- ASSERT(current.is_identical_to(holder));
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()), miss, DONT_DO_SMI_CHECK);
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
}
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
// Return the register containing the holder.
return reg;
}
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ jmp(success);
+ Label success;
+ __ jmp(&success);
__ bind(miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ jmp(success);
+ Label success;
+ __ jmp(&success);
GenerateRestoreName(masm(), miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
Register LoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
- Label* success,
Handle<Object> callback) {
Label miss;
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
ASSERT(!reg.is(scratch2()));
@@ -1344,7 +1042,7 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
__ j(not_equal, &miss);
}
- HandlerFrontendFooter(name, success, &miss);
+ HandlerFrontendFooter(name, &miss);
return reg;
}
@@ -1369,13 +1067,6 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
@@ -1408,36 +1099,16 @@ void LoadStubCompiler::GenerateLoadCallback(
__ push(esp);
__ push(name()); // name
- __ mov(ebx, esp); // esp points to reference to name (handler).
__ push(scratch3()); // Restore return address.
- // array for v8::Arguments::values_, handler for name and pointer
- // to the values (it considered as smi in GC).
- const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
- // Allocate space for opional callback address parameter in case
- // CPU profiler is active.
- const int kApiArgc = 2 + 1;
-
- Address getter_address = v8::ToCData<Address>(callback->getter());
- __ PrepareCallApiFunction(kApiArgc);
- __ mov(ApiParameterOperand(0), ebx); // name.
- __ add(ebx, Immediate(kPointerSize));
- __ mov(ApiParameterOperand(1), ebx); // arguments pointer.
-
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
-
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
-
- __ CallApiFunctionAndReturn(getter_address,
- thunk_address,
- ApiParameterOperand(2),
- kStackSpace,
- Operand(ebp, 7 * kPointerSize),
- NULL);
+ // Abi for CallApiGetter
+ Register getter_address = edx;
+ Address function_address = v8::ToCData<Address>(callback->getter());
+ __ mov(getter_address, Immediate(function_address));
+
+ CallApiGetterStub stub;
+ __ TailCallStub(&stub);
}
@@ -1450,7 +1121,7 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
- Handle<JSObject> object,
+ Handle<Object> object,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Handle<Name> name) {
@@ -1501,11 +1172,9 @@ void LoadStubCompiler::GenerateLoadInterceptor(
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -1549,1334 +1218,14 @@ void LoadStubCompiler::GenerateLoadInterceptor(
}
-void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(edx, miss);
- CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- if (Serializer::enabled()) {
- __ mov(edi, Immediate(cell));
- __ mov(edi, FieldOperand(edi, Cell::kValueOffset));
- } else {
- __ mov(edi, Operand::ForCell(cell));
- }
-
- // Check that the cell contains the same function.
- if (isolate()->heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(edi, miss);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
- Immediate(Handle<SharedFunctionInfo>(function->shared())));
- } else {
- __ cmp(edi, Immediate(function));
- }
- __ j(not_equal, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
- name, &miss);
-
- GenerateFastPropertyLoad(
- masm(), edi, reg, index.is_inobject(holder),
- index.translate(holder), Representation::Tagged());
-
- // Check that the function really is a function.
- __ JumpIfSmi(edi, &miss);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- Label miss;
-
- // Check that function is still array
- const int argc = arguments().immediate();
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
- Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
- __ mov(eax, Immediate(argc));
- __ mov(ebx, site_feedback_cell);
- __ mov(edi, function);
-
- ArrayConstructorStub stub(isolate());
- __ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
-
- if (argc == 0) {
- // Noop, return the length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- __ ret((argc + 1) * kPointerSize);
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- // Get the elements array of the object.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &check_double);
-
- // Get the array's length into eax and calculate new length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(eax, Immediate(Smi::FromInt(argc)));
-
- // Get the elements' length into ecx.
- __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(eax, ecx);
- __ j(greater, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ mov(ecx, Operand(esp, argc * kPointerSize));
- __ JumpIfNotSmi(ecx, &with_write_barrier);
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- // Store the value.
- __ mov(FieldOperand(edi,
- eax,
- times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize),
- ecx);
-
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&check_double);
-
-
- // Check that the elements are in double mode.
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(factory()->fixed_double_array_map()));
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into eax and calculate new length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(eax, Immediate(Smi::FromInt(argc)));
-
- // Get the elements' length into ecx.
- __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(eax, ecx);
- __ j(greater, &call_builtin);
-
- __ mov(ecx, Operand(esp, argc * kPointerSize));
- __ StoreNumberToDoubleElements(
- ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&with_write_barrier);
-
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(ebx, &call_builtin);
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(factory()->heap_number_map()));
- __ j(equal, &call_builtin);
- // edi: elements array
- // edx: receiver
- // ebx: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- &try_holey_map);
-
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- ebx,
- edi,
- &call_builtin);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(ebx, &call_builtin);
- }
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- // Store the value.
- __ lea(edx, FieldOperand(edi,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ mov(Operand(edx, 0), ecx);
-
- __ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&attempt_to_grow_elements);
- if (!FLAG_inline_new) {
- __ jmp(&call_builtin);
- }
-
- __ mov(ebx, Operand(esp, argc * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(ebx, &no_fast_elements_check);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(ecx, &call_builtin, Label::kFar);
- __ bind(&no_fast_elements_check);
-
- // We could be lucky and the elements array could be at the top of
- // new-space. In this case we can just grow it in place by moving the
- // allocation pointer up.
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top.
- __ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
-
- // Check if it's the end of elements.
- __ lea(edx, FieldOperand(edi,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmp(edx, ecx);
- __ j(not_equal, &call_builtin);
- __ add(ecx, Immediate(kAllocationDelta * kPointerSize));
- __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
- __ j(above, &call_builtin);
-
- // We fit and could grow elements.
- __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
-
- // Push the argument...
- __ mov(Operand(edx, 0), ebx);
- // ... and fill the rest with holes.
- for (int i = 1; i < kAllocationDelta; i++) {
- __ mov(Operand(edx, i * kPointerSize),
- Immediate(factory()->the_hole_value()));
- }
-
- // We know the elements array is in new space so we don't need the
- // remembered set, but we just pushed a value onto it so we may have to
- // tell the incremental marker to rescan the object that we just grew. We
- // don't need to worry about the holes because they are in old space and
- // already marked black.
- __ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
- // Restore receiver to edx as finish sequence assumes it's here.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Increment element's and array's sizes.
- __ add(FieldOperand(edi, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(kAllocationDelta)));
-
- // NOTE: This only happen in new-space, where we don't
- // care about the black-byte-count on pages. Otherwise we should
- // update that too if the object is black.
-
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate()),
- argc + 1,
- 1);
- }
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- Label miss, return_undefined, call_builtin;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
-
- // Get the elements array of the object.
- __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into ecx and calculate new length.
- __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
- __ sub(ecx, Immediate(Smi::FromInt(1)));
- __ j(negative, &return_undefined);
-
- // Get the last element.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(eax, FieldOperand(ebx,
- ecx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(eax, Immediate(factory()->the_hole_value()));
- __ j(equal, &call_builtin);
-
- // Set the array's length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), ecx);
-
- // Fill with the hole.
- __ mov(FieldOperand(ebx,
- ecx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- Immediate(factory()->the_hole_value()));
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&return_undefined);
- __ mov(eax, Immediate(factory()->undefined_value()));
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()),
- argc + 1,
- 1);
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
-
- Register receiver = ebx;
- Register index = edi;
- Register result = eax;
- __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
- } else {
- __ Set(index, Immediate(factory()->undefined_value()));
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(factory()->nan_value()));
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in ecx.
- __ Set(ecx, Immediate(name));
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
-
- Register receiver = eax;
- Register index = edi;
- Register scratch = edx;
- Register result = eax;
- __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
- } else {
- __ Set(index, Immediate(factory()->undefined_value()));
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(factory()->empty_string()));
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in ecx.
- __ Set(ecx, Immediate(name));
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = ebx;
- __ mov(code, Operand(esp, 1 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ and_(code, Immediate(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, eax);
- generator.GenerateFast(masm());
- __ ret(2 * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
-
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- if (!CpuFeatures::IsSupported(SSE2)) {
- return Handle<Code>::null();
- }
-
- CpuFeatureScope use_sse2(masm(), SSE2);
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into eax.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &smi);
-
- // Check if the argument is a heap number and load its value into xmm0.
- Label slow;
- __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movsd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
-
- // Check if the argument is strictly positive. Note this also
- // discards NaN.
- __ xorpd(xmm1, xmm1);
- __ ucomisd(xmm0, xmm1);
- __ j(below_equal, &slow);
-
- // Do a truncating conversion.
- __ cvttsd2si(eax, Operand(xmm0));
-
- // Check if the result fits into a smi. Note this also checks for
- // 0x80000000 which signals a failed conversion.
- Label wont_fit_into_smi;
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &wont_fit_into_smi);
-
- // Smi tag and return.
- __ SmiTag(eax);
- __ bind(&smi);
- __ ret(2 * kPointerSize);
-
- // Check if the argument is < 2^kMantissaBits.
- Label already_round;
- __ bind(&wont_fit_into_smi);
- __ LoadPowerOf2(xmm1, ebx, HeapNumber::kMantissaBits);
- __ ucomisd(xmm0, xmm1);
- __ j(above_equal, &already_round);
-
- // Save a copy of the argument.
- __ movaps(xmm2, xmm0);
-
- // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
- __ addsd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
-
- // Compare the argument and the tentative result to get the right mask:
- // if xmm2 < xmm0:
- // xmm2 = 1...1
- // else:
- // xmm2 = 0...0
- __ cmpltsd(xmm2, xmm0);
-
- // Subtract 1 if the argument was less than the tentative result.
- __ LoadPowerOf2(xmm1, ebx, 0);
- __ andpd(xmm1, xmm2);
- __ subsd(xmm0, xmm1);
-
- // Return a new heap number.
- __ AllocateHeapNumber(eax, ebx, edx, &slow);
- __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(2 * kPointerSize);
-
- // Return the argument (when it's an already round heap number).
- __ bind(&already_round);
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into eax.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(eax, &not_smi);
-
- // Branchless abs implementation, refer to below:
- // http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
- // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
- // otherwise.
- __ mov(ebx, eax);
- __ sar(ebx, kBitsPerInt - 1);
-
- // Do bitwise not or do nothing depending on ebx.
- __ xor_(eax, ebx);
-
- // Add 1 or do nothing depending on ebx.
- __ sub(eax, ebx);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ j(negative, &slow);
-
- // Smi case done.
- __ ret(2 * kPointerSize);
-
- // Check if the argument is a heap number and load its exponent and
- // sign into ebx.
- __ bind(&not_smi);
- __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ test(ebx, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative_sign);
- __ ret(2 * kPointerSize);
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ and_(ebx, ~HeapNumber::kSignMask);
- __ mov(ecx, FieldOperand(eax, HeapNumber::kMantissaOffset));
- __ AllocateHeapNumber(eax, edi, edx, &slow);
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ebx);
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss_before_stack_reserved);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_const(), 1);
- __ IncrementCounter(counters->call_const_fast_api(), 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before calling any runtime function.
- __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, depth, &miss);
-
- // Move the return address on top of the stack.
- __ mov(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
-
- // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
- // duplicate of return address and will be overwritten.
- GenerateFastApiCall(masm(), optimization, argc, false);
-
- __ bind(&miss);
- __ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
-
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(edx, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(isolate()->counters()->call_const(), 1);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax,
- edi, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
- break;
-
- case STRING_CHECK:
- // Check that the object is a string.
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
- break;
-
- case SYMBOL_CHECK:
- // Check that the object is a symbol.
- __ CmpObjectType(edx, SYMBOL_TYPE, eax);
- __ j(not_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
- break;
-
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(edx, &fast);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
- break;
- }
- case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ cmp(edx, factory()->true_value());
- __ j(equal, &fast);
- __ cmp(edx, factory()->false_value());
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
- break;
- }
- }
-
- __ jmp(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Handle<JSFunction> function) {
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<Cell>::null(),
- function, Handle<String>::cast(name),
- Code::CONSTANT);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
Label success;
-
- CompileHandlerFrontend(object, holder, name, check, &success);
+ // Check that the object is a boolean.
+ __ cmp(object, factory()->true_value());
+ __ j(equal, &success);
+ __ cmp(object, factory()->false_value());
+ __ j(not_equal, miss);
__ bind(&success);
- CompileHandlerBackend(function);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state_);
- compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
- &miss);
-
- // Restore receiver.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the function really is a function.
- __ JumpIfSmi(eax, &miss);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Invoke the function.
- __ mov(edi, eax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle load cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function,
- Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name),
- Code::NORMAL);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Set up the context (function already in edi).
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
}
@@ -2885,12 +1234,12 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
__ pop(scratch1()); // remove the return address
__ push(receiver());
+ __ push(holder_reg);
__ Push(callback);
__ Push(name);
__ push(value());
@@ -2899,28 +1248,10 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch1(), 1, values);
+ __ TailCallExternalReference(store_callback_property, 5, 1);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2930,6 +1261,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
+ Handle<HeapType> type,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- eax : value
@@ -2939,18 +1271,25 @@ void StoreStubCompiler::GenerateStoreViaSetter(
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ Register receiver = edx;
+ Register value = eax;
// Save value register, so we can restore it later.
- __ push(eax);
+ __ push(value);
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
- __ push(edx);
- __ push(eax);
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ push(receiver);
+ __ push(value);
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2978,16 +1317,15 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ push(receiver());
__ push(this->name());
__ push(value());
- __ push(Immediate(Smi::FromInt(strict_mode())));
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
+ __ TailCallExternalReference(store_ic_property, 3, 1);
// Return the generated code.
- return GetCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -3019,23 +1357,18 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Handle<JSGlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
- __ bind(&success);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ mov(eax, isolate()->factory()->undefined_value());
__ ret(0);
// Return the generated code.
- return GetCode(kind(), Code::NONEXISTENT, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -3067,27 +1400,12 @@ Register* KeyedStoreStubCompiler::registers() {
}
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ cmp(name_reg, Immediate(name));
- __ j(not_equal, miss);
-}
-
-
-void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ cmp(name_reg, Immediate(name));
- __ j(not_equal, miss);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Register receiver,
Handle<JSFunction> getter) {
{
@@ -3095,11 +1413,16 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
__ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -3118,16 +1441,14 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
- Label success, miss;
+ Label miss;
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
if (Serializer::enabled()) {
__ mov(eax, Immediate(cell));
@@ -3145,41 +1466,50 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
}
- HandlerFrontendFooter(name, &success, &miss);
- __ bind(&success);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
// The code above already loads the result into the return register.
__ ret(0);
+ HandlerFrontendFooter(name, &miss);
+
// Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), Code::NORMAL, name);
}
Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
+ TypeHandleList* types,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
IcCheckType check) {
Label miss;
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ cmp(this->name(), Immediate(name));
+ __ j(not_equal, &miss);
}
- __ JumpIfSmi(receiver(), &miss);
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
Register map_reg = scratch1();
__ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
+ int receiver_count = types->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = receiver_maps->at(current);
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
__ cmp(map_reg, map);
+ if (type->Is(HeapType::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ bind(&number_case);
+ }
__ j(equal, handlers->at(current));
}
}
@@ -3206,11 +1536,11 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label slow, miss_force_generic;
+ Label slow, miss;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
- __ JumpIfNotSmi(ecx, &miss_force_generic);
+ __ JumpIfNotSmi(ecx, &miss);
__ mov(ebx, ecx);
__ SmiUntag(ebx);
__ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3233,13 +1563,13 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
- __ bind(&miss_force_generic);
+ __ bind(&miss);
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index 06cbf2e112..e0f807ce4b 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -86,8 +86,8 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
// ICs as strict mode. The strict-ness of the IC must be preserved.
if (old_target->kind() == Code::STORE_IC ||
old_target->kind() == Code::KEYED_STORE_IC) {
- ASSERT(Code::GetStrictMode(old_target->extra_ic_state()) ==
- Code::GetStrictMode(target->extra_ic_state()));
+ ASSERT(StoreIC::GetStrictMode(old_target->extra_ic_state()) ==
+ StoreIC::GetStrictMode(target->extra_ic_state()));
}
#endif
Assembler::set_target_address_at(address, target->instruction_start());
@@ -100,8 +100,7 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
}
-InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
- JSObject* holder) {
+InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object) {
if (object->IsJSObject()) return OWN_MAP;
// If the object is a value, we use the prototype map for the cache.
@@ -111,13 +110,46 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
}
-JSObject* IC::GetCodeCacheHolder(Isolate* isolate,
- Object* object,
- InlineCacheHolderFlag holder) {
- Object* map_owner =
- holder == OWN_MAP ? object : object->GetPrototype(isolate);
- ASSERT(map_owner->IsJSObject());
- return JSObject::cast(map_owner);
+HeapObject* IC::GetCodeCacheHolder(Isolate* isolate,
+ Object* object,
+ InlineCacheHolderFlag holder) {
+ if (object->IsSmi()) holder = PROTOTYPE_MAP;
+ Object* map_owner = holder == OWN_MAP
+ ? object : object->GetPrototype(isolate);
+ return HeapObject::cast(map_owner);
+}
+
+
+InlineCacheHolderFlag IC::GetCodeCacheFlag(HeapType* type) {
+ if (type->Is(HeapType::Boolean()) ||
+ type->Is(HeapType::Number()) ||
+ type->Is(HeapType::String()) ||
+ type->Is(HeapType::Symbol())) {
+ return PROTOTYPE_MAP;
+ }
+ return OWN_MAP;
+}
+
+
+Handle<Map> IC::GetCodeCacheHolder(InlineCacheHolderFlag flag,
+ HeapType* type,
+ Isolate* isolate) {
+ if (flag == PROTOTYPE_MAP) {
+ Context* context = isolate->context()->native_context();
+ JSFunction* constructor;
+ if (type->Is(HeapType::Boolean())) {
+ constructor = context->boolean_function();
+ } else if (type->Is(HeapType::Number())) {
+ constructor = context->number_function();
+ } else if (type->Is(HeapType::String())) {
+ constructor = context->string_function();
+ } else {
+ ASSERT(type->Is(HeapType::Symbol()));
+ constructor = context->symbol_function();
+ }
+ return handle(JSObject::cast(constructor->instance_prototype())->map());
+ }
+ return TypeToMap(type, isolate);
}
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 55d7ba936f..bd06cb6f96 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -35,6 +35,7 @@
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
+#include "v8conversions.h"
namespace v8 {
namespace internal {
@@ -89,9 +90,10 @@ void IC::TraceIC(const char* type,
}
}
JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
- Code::ExtraICState extra_state = new_target->extra_ic_state();
+ ExtraICState extra_state = new_target->extra_ic_state();
const char* modifier =
- GetTransitionMarkModifier(Code::GetKeyedAccessStoreMode(extra_state));
+ GetTransitionMarkModifier(
+ KeyedStoreIC::GetKeyedAccessStoreMode(extra_state));
PrintF(" (%c->%c%s)",
TransitionMarkFromState(state()),
TransitionMarkFromState(new_state),
@@ -146,6 +148,9 @@ IC::IC(FrameDepth depth, Isolate* isolate)
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
target_ = handle(raw_target(), isolate);
state_ = target_->ic_state();
+ extra_ic_state_ = target_->extra_ic_state();
+ target()->FindAllTypes(&types_);
+ target()->FindHandlers(&handlers_);
}
@@ -226,60 +231,8 @@ static void LookupForRead(Handle<Object> object,
}
-bool CallIC::TryUpdateExtraICState(LookupResult* lookup,
- Handle<Object> object) {
- if (!lookup->IsConstantFunction()) return false;
- JSFunction* function = lookup->GetConstantFunction();
- if (!function->shared()->HasBuiltinFunctionId()) return false;
-
- // Fetch the arguments passed to the called function.
- const int argc = target()->arguments_count();
- Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
- Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- Arguments args(argc + 1,
- &Memory::Object_at(fp +
- StandardFrameConstants::kCallerSPOffset +
- argc * kPointerSize));
- switch (function->shared()->builtin_function_id()) {
- case kStringCharCodeAt:
- case kStringCharAt:
- if (object->IsString()) {
- String* string = String::cast(*object);
- // Check there's the right string value or wrapper in the receiver slot.
- ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
- // If we're in the default (fastest) state and the index is
- // out of bounds, update the state to record this fact.
- if (StringStubState::decode(extra_ic_state()) == DEFAULT_STRING_STUB &&
- argc >= 1 && args[1]->IsNumber()) {
- double index = DoubleToInteger(args.number_at(1));
- if (index < 0 || index >= string->length()) {
- extra_ic_state_ =
- StringStubState::update(extra_ic_state(),
- STRING_INDEX_OUT_OF_BOUNDS);
- return true;
- }
- }
- }
- break;
- default:
- return false;
- }
- return false;
-}
-
-
bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
Handle<String> name) {
- DisallowHeapAllocation no_gc;
-
- if (target()->is_call_stub()) {
- LookupResult lookup(isolate());
- LookupForRead(receiver, name, &lookup);
- if (static_cast<CallIC*>(this)->TryUpdateExtraICState(&lookup, receiver)) {
- return true;
- }
- }
-
if (target()->is_keyed_stub()) {
// Determine whether the failure is due to a name failure.
if (!name->IsName()) return false;
@@ -330,7 +283,7 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
// If the IC is shared between multiple receivers (slow dictionary mode), then
// the map cannot be deprecated and the stub invalidated.
if (cache_holder == OWN_MAP) {
- Map* old_map = target()->FindFirstMap();
+ Map* old_map = first_map();
if (old_map == *map) return true;
if (old_map != NULL) {
if (old_map->is_deprecated()) return true;
@@ -355,10 +308,8 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
void IC::TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name) {
- CodeHandleList handlers;
- target()->FindHandlers(&handlers);
- for (int i = 0; i < handlers.length(); i++) {
- Handle<Code> handler = handlers.at(i);
+ for (int i = 0; i < handlers()->length(); i++) {
+ Handle<Code> handler = handlers()->at(i);
int index = map->IndexInCodeCache(*name, *handler);
if (index >= 0) {
map->RemoveFromCodeCache(*name, *handler, index);
@@ -398,19 +349,6 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
}
-RelocInfo::Mode IC::ComputeMode() {
- Address addr = address();
- Code* code = Code::cast(isolate()->FindCodeObject(addr));
- for (RelocIterator it(code, RelocInfo::kCodeTargetMask);
- !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->pc() == addr) return info->rmode();
- }
- UNREACHABLE();
- return RelocInfo::NONE32;
-}
-
-
Failure* IC::TypeError(const char* type,
Handle<Object> object,
Handle<Object> key) {
@@ -441,9 +379,6 @@ static int ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state) {
void IC::PostPatching(Address address, Code* target, Code* old_target) {
- if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) {
- return;
- }
Isolate* isolate = target->GetHeap()->isolate();
Code* host = isolate->
inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
@@ -466,10 +401,8 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
TypeFeedbackInfo::cast(host->type_feedback_info());
info->change_own_type_change_checksum();
}
- if (FLAG_watch_ic_patching) {
- host->set_profiler_ticks(0);
- isolate->runtime_profiler()->NotifyICChanged();
- }
+ host->set_profiler_ticks(0);
+ isolate->runtime_profiler()->NotifyICChanged();
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
// unoptimized version for the benefit of later inlining.
@@ -489,8 +422,6 @@ void IC::Clear(Isolate* isolate, Address address) {
case Code::STORE_IC: return StoreIC::Clear(isolate, address, target);
case Code::KEYED_STORE_IC:
return KeyedStoreIC::Clear(isolate, address, target);
- case Code::CALL_IC: return CallIC::Clear(address, target);
- case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
case Code::COMPARE_IC: return CompareIC::Clear(isolate, address, target);
case Code::COMPARE_NIL_IC: return CompareNilIC::Clear(address, target);
case Code::BINARY_OP_IC:
@@ -503,18 +434,6 @@ void IC::Clear(Isolate* isolate, Address address) {
}
-void CallICBase::Clear(Address address, Code* target) {
- if (IsCleared(target)) return;
- bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
- Code* code =
- target->GetIsolate()->stub_cache()->FindCallInitialize(
- target->arguments_count(),
- contextual ? RelocInfo::CODE_TARGET_CONTEXT : RelocInfo::CODE_TARGET,
- target->kind());
- SetTargetAtAddress(address, code);
-}
-
-
void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target) {
if (IsCleared(target)) return;
// Make sure to also clear the map used in inline fast cases. If we
@@ -526,15 +445,17 @@ void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target) {
void LoadIC::Clear(Isolate* isolate, Address address, Code* target) {
if (IsCleared(target)) return;
- SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
+ Code* code = target->GetIsolate()->stub_cache()->FindPreMonomorphicIC(
+ Code::LOAD_IC, target->extra_ic_state());
+ SetTargetAtAddress(address, code);
}
void StoreIC::Clear(Isolate* isolate, Address address, Code* target) {
if (IsCleared(target)) return;
- SetTargetAtAddress(address,
- *pre_monomorphic_stub(
- isolate, Code::GetStrictMode(target->extra_ic_state())));
+ Code* code = target->GetIsolate()->stub_cache()->FindPreMonomorphicIC(
+ Code::STORE_IC, target->extra_ic_state());
+ SetTargetAtAddress(address, code);
}
@@ -542,7 +463,7 @@ void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target) {
if (IsCleared(target)) return;
SetTargetAtAddress(address,
*pre_monomorphic_stub(
- isolate, Code::GetStrictMode(target->extra_ic_state())));
+ isolate, StoreIC::GetStrictMode(target->extra_ic_state())));
}
@@ -559,50 +480,6 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target) {
}
-Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
- Handle<Object> delegate = Execution::GetFunctionDelegate(isolate(), object);
-
- if (delegate->IsJSFunction() && !object->IsJSFunctionProxy()) {
- // Patch the receiver and use the delegate as the function to
- // invoke. This is used for invoking objects as if they were functions.
- const int argc = target()->arguments_count();
- StackFrameLocator locator(isolate());
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- int index = frame->ComputeExpressionsCount() - (argc + 1);
- frame->SetExpression(index, *object);
- }
-
- return delegate;
-}
-
-
-void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
- Handle<Object> object) {
- while (callee->IsJSFunctionProxy()) {
- callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap(),
- isolate());
- }
-
- if (callee->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(callee);
- if (!function->shared()->is_classic_mode() || function->IsBuiltin()) {
- // Do not wrap receiver for strict mode functions or for builtins.
- return;
- }
- }
-
- // And only wrap string, number or boolean.
- if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- // Change the receiver to the result of calling ToObject on it.
- const int argc = this->target()->arguments_count();
- StackFrameLocator locator(isolate());
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- int index = frame->ComputeExpressionsCount() - (argc + 1);
- frame->SetExpression(index, *isolate()->factory()->ToObject(object));
- }
-}
-
-
static bool MigrateDeprecated(Handle<Object> object) {
if (!object->IsJSObject()) return false;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
@@ -612,229 +489,6 @@ static bool MigrateDeprecated(Handle<Object> object) {
}
-MaybeObject* CallICBase::LoadFunction(Handle<Object> object,
- Handle<String> name) {
- bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
-
- // If the object is undefined or null it's illegal to try to get any
- // of its properties; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_call", object, name);
- }
-
- // Check if the name is trivially convertible to an index and get
- // the element if so.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- Handle<Object> result = Object::GetElement(isolate(), object, index);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- if (result->IsJSFunction()) return *result;
-
- // Try to find a suitable function delegate for the object at hand.
- result = TryCallAsFunction(result);
- if (result->IsJSFunction()) return *result;
-
- // Otherwise, it will fail in the lookup step.
- }
-
- // Lookup the property in the object.
- LookupResult lookup(isolate());
- LookupForRead(object, name, &lookup);
-
- if (!lookup.IsFound()) {
- // If the object does not have the requested property, check which
- // exception we need to throw.
- return IsUndeclaredGlobal(object)
- ? ReferenceError("not_defined", name)
- : TypeError("undefined_method", object, name);
- }
-
- // Lookup is valid: Update inline cache and stub cache.
- if (use_ic) UpdateCaches(&lookup, object, name);
-
- // Get the property.
- PropertyAttributes attr;
- Handle<Object> result =
- Object::GetProperty(object, object, &lookup, name, &attr);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
-
- if (lookup.IsInterceptor() && attr == ABSENT) {
- // If the object does not have the requested property, check which
- // exception we need to throw.
- return IsUndeclaredGlobal(object)
- ? ReferenceError("not_defined", name)
- : TypeError("undefined_method", object, name);
- }
-
- ASSERT(!result->IsTheHole());
-
- // Make receiver an object if the callee requires it. Strict mode or builtin
- // functions do not wrap the receiver, non-strict functions and objects
- // called as functions do.
- ReceiverToObjectIfRequired(result, object);
-
- if (result->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(result);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Handle stepping into a function if step into is active.
- Debug* debug = isolate()->debug();
- if (debug->StepInActive()) {
- // Protect the result in a handle as the debugger can allocate and might
- // cause GC.
- debug->HandleStepIn(function, object, fp(), false);
- }
-#endif
- return *function;
- }
-
- // Try to find a suitable function delegate for the object at hand.
- result = TryCallAsFunction(result);
- if (result->IsJSFunction()) return *result;
-
- return TypeError("property_not_function", object, name);
-}
-
-
-Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
- Handle<Object> object,
- Handle<String> name) {
- int argc = target()->arguments_count();
- Handle<JSObject> holder(lookup->holder(), isolate());
- switch (lookup->type()) {
- case FIELD: {
- PropertyIndex index = lookup->GetFieldIndex();
- return isolate()->stub_cache()->ComputeCallField(
- argc, kind_, extra_ic_state(), name, object, holder, index);
- }
- case CONSTANT: {
- if (!lookup->IsConstantFunction()) return Handle<Code>::null();
- // Get the constant function and compute the code stub for this
- // call; used for rewriting to monomorphic state and making sure
- // that the code stub is in the stub cache.
- Handle<JSFunction> function(lookup->GetConstantFunction(), isolate());
- return isolate()->stub_cache()->ComputeCallConstant(
- argc, kind_, extra_ic_state(), name, object, holder, function);
- }
- case NORMAL: {
- // If we return a null handle, the IC will not be patched.
- if (!object->IsJSObject()) return Handle<Code>::null();
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- if (holder->IsGlobalObject()) {
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
- Handle<PropertyCell> cell(
- global->GetPropertyCell(lookup), isolate());
- if (!cell->value()->IsJSFunction()) return Handle<Code>::null();
- Handle<JSFunction> function(JSFunction::cast(cell->value()));
- return isolate()->stub_cache()->ComputeCallGlobal(
- argc, kind_, extra_ic_state(), name,
- receiver, global, cell, function);
- } else {
- // There is only one shared stub for calling normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
- // applicable.
- if (!holder.is_identical_to(receiver)) return Handle<Code>::null();
- return isolate()->stub_cache()->ComputeCallNormal(
- argc, kind_, extra_ic_state());
- }
- break;
- }
- case INTERCEPTOR:
- ASSERT(HasInterceptorGetter(*holder));
- return isolate()->stub_cache()->ComputeCallInterceptor(
- argc, kind_, extra_ic_state(), name, object, holder);
- default:
- return Handle<Code>::null();
- }
-}
-
-
-Handle<Code> CallICBase::megamorphic_stub() {
- return isolate()->stub_cache()->ComputeCallMegamorphic(
- target()->arguments_count(), kind_, extra_ic_state());
-}
-
-
-Handle<Code> CallICBase::pre_monomorphic_stub() {
- return isolate()->stub_cache()->ComputeCallPreMonomorphic(
- target()->arguments_count(), kind_, extra_ic_state());
-}
-
-
-void CallICBase::UpdateCaches(LookupResult* lookup,
- Handle<Object> object,
- Handle<String> name) {
- // Bail out if we didn't find a result.
- if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
-
- // Compute the number of arguments.
- Handle<Code> code;
- code = state() == UNINITIALIZED
- ? pre_monomorphic_stub()
- : ComputeMonomorphicStub(lookup, object, name);
-
- // If there's no appropriate stub we simply avoid updating the caches.
- // TODO(verwaest): Install a slow fallback in this case to avoid not learning,
- // and deopting Crankshaft code.
- if (code.is_null()) return;
-
- Handle<JSObject> cache_object = object->IsJSObject()
- ? Handle<JSObject>::cast(object)
- : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())),
- isolate());
-
- PatchCache(cache_object, name, code);
- TRACE_IC("CallIC", name);
-}
-
-
-MaybeObject* KeyedCallIC::LoadFunction(Handle<Object> object,
- Handle<Object> key) {
- if (key->IsInternalizedString()) {
- return CallICBase::LoadFunction(object, Handle<String>::cast(key));
- }
-
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_call", object, key);
- }
-
- bool use_ic = MigrateDeprecated(object)
- ? false : FLAG_use_ic && !object->IsAccessCheckNeeded();
-
- if (use_ic && state() != MEGAMORPHIC) {
- ASSERT(!object->IsJSGlobalProxy());
- int argc = target()->arguments_count();
- Handle<Code> stub = isolate()->stub_cache()->ComputeCallMegamorphic(
- argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = isolate()->stub_cache()->ComputeCallArguments(argc);
- }
- }
- ASSERT(!stub.is_null());
- set_target(*stub);
- TRACE_IC("CallIC", key);
- }
-
- Handle<Object> result = GetProperty(isolate(), object, key);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
-
- // Make receiver an object if the callee requires it. Strict mode or builtin
- // functions do not wrap the receiver, non-strict functions and objects
- // called as functions do.
- ReceiverToObjectIfRequired(result, object);
- if (result->IsJSFunction()) return *result;
-
- result = TryCallAsFunction(result);
- if (result->IsJSFunction()) return *result;
-
- return TypeError("property_not_function", object, key);
-}
-
-
MaybeObject* LoadIC::Load(Handle<Object> object,
Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
@@ -862,9 +516,7 @@ MaybeObject* LoadIC::Load(Handle<Object> object,
}
if (!stub.is_null()) {
set_target(*stub);
-#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /stringwrapper]\n");
-#endif
}
// Get the string if we have a string wrapper object.
String* string = String::cast(JSValue::cast(*object)->value());
@@ -887,9 +539,7 @@ MaybeObject* LoadIC::Load(Handle<Object> object,
}
if (!stub.is_null()) {
set_target(*stub);
-#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
-#endif
}
return *Accessors::FunctionGetPrototype(Handle<JSFunction>::cast(object));
}
@@ -932,6 +582,7 @@ MaybeObject* LoadIC::Load(Handle<Object> object,
attr == ABSENT && IsUndeclaredGlobal(object)) {
return ReferenceError("not_defined", name);
}
+
return *result;
}
@@ -950,153 +601,155 @@ static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
}
-bool IC::UpdatePolymorphicIC(Handle<HeapObject> receiver,
+bool IC::UpdatePolymorphicIC(Handle<HeapType> type,
Handle<String> name,
Handle<Code> code) {
if (!code->is_handler()) return false;
-
- MapHandleList receiver_maps;
- CodeHandleList handlers;
-
- int number_of_valid_maps;
+ int number_of_valid_types;
int handler_to_overwrite = -1;
- Handle<Map> new_receiver_map(receiver->map());
- {
- DisallowHeapAllocation no_gc;
- target()->FindAllMaps(&receiver_maps);
- int number_of_maps = receiver_maps.length();
- number_of_valid_maps = number_of_maps;
-
- for (int i = 0; i < number_of_maps; i++) {
- Handle<Map> map = receiver_maps.at(i);
- // Filter out deprecated maps to ensure its instances get migrated.
- if (map->is_deprecated()) {
- number_of_valid_maps--;
- // If the receiver map is already in the polymorphic IC, this indicates
- // there was a prototoype chain failure. In that case, just overwrite the
- // handler.
- } else if (map.is_identical_to(new_receiver_map)) {
- number_of_valid_maps--;
- handler_to_overwrite = i;
- }
- }
-
- if (number_of_valid_maps >= 4) return false;
- if (number_of_maps == 0) return false;
- if (!target()->FindHandlers(&handlers, receiver_maps.length())) {
- return false;
+ int number_of_types = types()->length();
+ number_of_valid_types = number_of_types;
+
+ for (int i = 0; i < number_of_types; i++) {
+ Handle<HeapType> current_type = types()->at(i);
+ // Filter out deprecated maps to ensure their instances get migrated.
+ if (current_type->IsClass() && current_type->AsClass()->is_deprecated()) {
+ number_of_valid_types--;
+ // If the receiver type is already in the polymorphic IC, this indicates
+ // there was a prototoype chain failure. In that case, just overwrite the
+ // handler.
+ } else if (type->IsCurrently(current_type)) {
+ ASSERT(handler_to_overwrite == -1);
+ number_of_valid_types--;
+ handler_to_overwrite = i;
}
}
- number_of_valid_maps++;
+ if (number_of_valid_types >= 4) return false;
+ if (number_of_types == 0) return false;
+ if (handlers()->length() < types()->length()) return false;
+
+ number_of_valid_types++;
if (handler_to_overwrite >= 0) {
- handlers.Set(handler_to_overwrite, code);
+ handlers()->Set(handler_to_overwrite, code);
} else {
- receiver_maps.Add(new_receiver_map);
- handlers.Add(code);
+ types()->Add(type);
+ handlers()->Add(code);
}
Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
- &receiver_maps, &handlers, number_of_valid_maps, name, strict_mode());
+ kind(), types(), handlers(), number_of_valid_types,
+ name, extra_ic_state());
set_target(*ic);
return true;
}
-void IC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
+Handle<HeapType> IC::CurrentTypeOf(Handle<Object> object, Isolate* isolate) {
+ return object->IsJSGlobalObject()
+ ? HeapType::Constant(Handle<JSGlobalObject>::cast(object), isolate)
+ : HeapType::OfCurrently(object, isolate);
+}
+
+
+Handle<Map> IC::TypeToMap(HeapType* type, Isolate* isolate) {
+ if (type->Is(HeapType::Number()))
+ return isolate->factory()->heap_number_map();
+ if (type->Is(HeapType::Boolean())) return isolate->factory()->oddball_map();
+ if (type->IsConstant()) {
+ return handle(Handle<JSGlobalObject>::cast(type->AsConstant())->map());
+ }
+ ASSERT(type->IsClass());
+ return type->AsClass();
+}
+
+
+template <class T>
+typename T::TypeHandle IC::MapToType(Handle<Map> map,
+ typename T::Region* region) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
+ return T::Number(region);
+ } else if (map->instance_type() == ODDBALL_TYPE) {
+ // The only oddballs that can be recorded in ICs are booleans.
+ return T::Boolean(region);
+ } else {
+ return T::Class(map, region);
+ }
+}
+
+
+template
+Type* IC::MapToType<Type>(Handle<Map> map, Zone* zone);
+
+
+template
+Handle<HeapType> IC::MapToType<HeapType>(Handle<Map> map, Isolate* region);
+
+
+void IC::UpdateMonomorphicIC(Handle<HeapType> type,
Handle<Code> handler,
Handle<String> name) {
if (!handler->is_handler()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
- receiver, handler, name, strict_mode());
+ kind(), name, type, handler, extra_ic_state());
set_target(*ic);
}
void IC::CopyICToMegamorphicCache(Handle<String> name) {
- MapHandleList receiver_maps;
- CodeHandleList handlers;
- {
- DisallowHeapAllocation no_gc;
- target()->FindAllMaps(&receiver_maps);
- if (!target()->FindHandlers(&handlers, receiver_maps.length())) return;
- }
- for (int i = 0; i < receiver_maps.length(); i++) {
- UpdateMegamorphicCache(*receiver_maps.at(i), *name, *handlers.at(i));
+ if (handlers()->length() < types()->length()) return;
+ for (int i = 0; i < types()->length(); i++) {
+ UpdateMegamorphicCache(*types()->at(i), *name, *handlers()->at(i));
}
}
-bool IC::IsTransitionedMapOfMonomorphicTarget(Map* receiver_map) {
- DisallowHeapAllocation no_allocation;
-
- Map* current_map = target()->FindFirstMap();
- ElementsKind receiver_elements_kind = receiver_map->elements_kind();
- bool more_general_transition =
- IsMoreGeneralElementsKindTransition(
- current_map->elements_kind(), receiver_elements_kind);
+bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
+ if (source_map == NULL) return true;
+ if (target_map == NULL) return false;
+ ElementsKind target_elements_kind = target_map->elements_kind();
+ bool more_general_transition = IsMoreGeneralElementsKindTransition(
+ source_map->elements_kind(), target_elements_kind);
Map* transitioned_map = more_general_transition
- ? current_map->LookupElementsTransitionMap(receiver_elements_kind)
+ ? source_map->LookupElementsTransitionMap(target_elements_kind)
: NULL;
-
- return transitioned_map == receiver_map;
+ return transitioned_map == target_map;
}
-void IC::PatchCache(Handle<HeapObject> receiver,
+void IC::PatchCache(Handle<HeapType> type,
Handle<String> name,
Handle<Code> code) {
switch (state()) {
case UNINITIALIZED:
case PREMONOMORPHIC:
case MONOMORPHIC_PROTOTYPE_FAILURE:
- UpdateMonomorphicIC(receiver, code, name);
+ UpdateMonomorphicIC(type, code, name);
break;
- case MONOMORPHIC:
+ case MONOMORPHIC: {
// For now, call stubs are allowed to rewrite to the same stub. This
// happens e.g., when the field does not contain a function.
- ASSERT(target()->is_call_stub() ||
- target()->is_keyed_call_stub() ||
- !target().is_identical_to(code));
+ ASSERT(!target().is_identical_to(code));
+ Map* old_map = first_map();
+ Code* old_handler = first_handler();
+ Map* map = type->IsClass() ? *type->AsClass() : NULL;
+ if (old_handler == *code &&
+ IsTransitionOfMonomorphicTarget(old_map, map)) {
+ UpdateMonomorphicIC(type, code, name);
+ break;
+ }
+ // Fall through.
+ }
+ case POLYMORPHIC:
if (!target()->is_keyed_stub()) {
- bool is_same_handler = false;
- {
- DisallowHeapAllocation no_allocation;
- Code* old_handler = target()->FindFirstHandler();
- is_same_handler = old_handler == *code;
- }
- if (is_same_handler
- && IsTransitionedMapOfMonomorphicTarget(receiver->map())) {
- UpdateMonomorphicIC(receiver, code, name);
- break;
- }
- if (UpdatePolymorphicIC(receiver, name, code)) {
- break;
- }
-
+ if (UpdatePolymorphicIC(type, name, code)) break;
CopyICToMegamorphicCache(name);
}
-
- UpdateMegamorphicCache(receiver->map(), *name, *code);
set_target(*megamorphic_stub());
- break;
+ // Fall through.
case MEGAMORPHIC:
- UpdateMegamorphicCache(receiver->map(), *name, *code);
- break;
- case POLYMORPHIC:
- if (target()->is_keyed_stub()) {
- // When trying to patch a polymorphic keyed stub with anything other
- // than another polymorphic stub, go generic.
- set_target(*generic_stub());
- } else {
- if (UpdatePolymorphicIC(receiver, name, code)) {
- break;
- }
- CopyICToMegamorphicCache(name);
- UpdateMegamorphicCache(receiver->map(), *name, *code);
- set_target(*megamorphic_stub());
- }
+ UpdateMegamorphicCache(*type, *name, *code);
break;
case DEBUG_STUB:
break;
@@ -1107,6 +760,23 @@ void IC::PatchCache(Handle<HeapObject> receiver,
}
+Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
+ ExtraICState extra_state) {
+ return isolate->stub_cache()->ComputeLoad(UNINITIALIZED, extra_state);
+}
+
+
+Handle<Code> LoadIC::pre_monomorphic_stub(Isolate* isolate,
+ ExtraICState extra_state) {
+ return isolate->stub_cache()->ComputeLoad(PREMONOMORPHIC, extra_state);
+}
+
+
+Handle<Code> LoadIC::megamorphic_stub() {
+ return isolate()->stub_cache()->ComputeLoad(MEGAMORPHIC, extra_ic_state());
+}
+
+
Handle<Code> LoadIC::SimpleFieldLoad(int offset,
bool inobject,
Representation representation) {
@@ -1119,65 +789,64 @@ Handle<Code> LoadIC::SimpleFieldLoad(int offset,
}
}
+
void LoadIC::UpdateCaches(LookupResult* lookup,
Handle<Object> object,
Handle<String> name) {
- // TODO(verwaest): It would be nice to support loading fields from smis as
- // well. For now just fail to update the cache.
- if (!object->IsHeapObject()) return;
-
- Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
-
- Handle<Code> code;
if (state() == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
- code = pre_monomorphic_stub();
- } else if (!lookup->IsCacheable()) {
+ set_target(*pre_monomorphic_stub());
+ TRACE_IC("LoadIC", name);
+ return;
+ }
+
+ Handle<HeapType> type = CurrentTypeOf(object, isolate());
+ Handle<Code> code;
+ if (!lookup->IsCacheable()) {
// Bail out if the result is not cacheable.
code = slow_stub();
- } else if (object->IsString() &&
- name->Equals(isolate()->heap()->length_string())) {
- int length_index = String::kLengthOffset / kPointerSize;
- code = SimpleFieldLoad(length_index);
- } else if (!object->IsJSObject()) {
- // TODO(jkummerow): It would be nice to support non-JSObjects in
- // ComputeLoadHandler, then we wouldn't need to go generic here.
- code = slow_stub();
} else if (!lookup->IsProperty()) {
- code = kind() == Code::LOAD_IC
- ? isolate()->stub_cache()->ComputeLoadNonexistent(
- name, Handle<JSObject>::cast(receiver))
- : slow_stub();
+ if (kind() == Code::LOAD_IC) {
+ code = isolate()->stub_cache()->ComputeLoadNonexistent(name, type);
+ } else {
+ code = slow_stub();
+ }
} else {
- code = ComputeHandler(lookup, Handle<JSObject>::cast(receiver), name);
+ code = ComputeHandler(lookup, object, name);
}
- PatchCache(receiver, name, code);
+ PatchCache(type, name, code);
TRACE_IC("LoadIC", name);
}
-void IC::UpdateMegamorphicCache(Map* map, Name* name, Code* code) {
+void IC::UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {
// Cache code holding map should be consistent with
// GenerateMonomorphicCacheProbe.
+ Map* map = *TypeToMap(type, isolate());
isolate()->stub_cache()->Set(name, map, code);
}
Handle<Code> IC::ComputeHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
+ Handle<Object> object,
Handle<String> name,
Handle<Object> value) {
+ InlineCacheHolderFlag cache_holder = GetCodeCacheForObject(*object);
+ Handle<HeapObject> stub_holder(GetCodeCacheHolder(
+ isolate(), *object, cache_holder));
+
Handle<Code> code = isolate()->stub_cache()->FindHandler(
- name, receiver, kind());
+ name, handle(stub_holder->map()), kind(), cache_holder);
if (!code.is_null()) return code;
- code = CompileHandler(lookup, receiver, name, value);
+ code = CompileHandler(lookup, object, name, value, cache_holder);
+ ASSERT(code->is_handler());
- if (code->is_handler() && code->type() != Code::NORMAL) {
- HeapObject::UpdateMapCodeCache(receiver, name, code);
+ if (code->type() != Code::NORMAL) {
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
}
return code;
@@ -1185,29 +854,36 @@ Handle<Code> IC::ComputeHandler(LookupResult* lookup,
Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
+ Handle<Object> object,
Handle<String> name,
- Handle<Object> unused) {
+ Handle<Object> unused,
+ InlineCacheHolderFlag cache_holder) {
+ if (object->IsString() && name->Equals(isolate()->heap()->length_string())) {
+ int length_index = String::kLengthOffset / kPointerSize;
+ return SimpleFieldLoad(length_index);
+ }
+
+ Handle<HeapType> type = CurrentTypeOf(object, isolate());
Handle<JSObject> holder(lookup->holder());
- LoadStubCompiler compiler(isolate(), kind());
+ LoadStubCompiler compiler(isolate(), kNoExtraICState, cache_holder, kind());
switch (lookup->type()) {
case FIELD: {
PropertyIndex field = lookup->GetFieldIndex();
- if (receiver.is_identical_to(holder)) {
+ if (object.is_identical_to(holder)) {
return SimpleFieldLoad(field.translate(holder),
field.is_inobject(holder),
lookup->representation());
}
return compiler.CompileLoadField(
- receiver, holder, name, field, lookup->representation());
+ type, holder, name, field, lookup->representation());
}
case CONSTANT: {
Handle<Object> constant(lookup->GetConstant(), isolate());
// TODO(2803): Don't compute a stub for cons strings because they cannot
// be embedded into code.
if (constant->IsConsString()) break;
- return compiler.CompileLoadConstant(receiver, holder, name, constant);
+ return compiler.CompileLoadConstant(type, holder, name, constant);
}
case NORMAL:
if (kind() != Code::LOAD_IC) break;
@@ -1215,25 +891,31 @@ Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
Handle<PropertyCell> cell(
global->GetPropertyCell(lookup), isolate());
- // TODO(verwaest): Turn into a handler.
- return isolate()->stub_cache()->ComputeLoadGlobal(
- name, receiver, global, cell, lookup->IsDontDelete());
+ Handle<Code> code = compiler.CompileLoadGlobal(
+ type, global, cell, name, lookup->IsDontDelete());
+ // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
+ Handle<HeapObject> stub_holder(GetCodeCacheHolder(
+ isolate(), *object, cache_holder));
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
+ return code;
}
// There is only one shared stub for loading normalized
// properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
+ // property must be found in the object for the stub to be
// applicable.
- if (!holder.is_identical_to(receiver)) break;
+ if (!object.is_identical_to(holder)) break;
return isolate()->builtins()->LoadIC_Normal();
case CALLBACKS: {
// Use simple field loads for some well-known callback properties.
- int object_offset;
- Handle<Map> map(receiver->map());
- if (Accessors::IsJSObjectFieldAccessor(map, name, &object_offset)) {
- PropertyIndex index =
- PropertyIndex::NewHeaderIndex(object_offset / kPointerSize);
- return compiler.CompileLoadField(
- receiver, receiver, name, index, Representation::Tagged());
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ Handle<HeapType> type = IC::MapToType<HeapType>(
+ handle(receiver->map()), isolate());
+ int object_offset;
+ if (Accessors::IsJSObjectFieldAccessor<HeapType>(
+ type, name, &object_offset)) {
+ return SimpleFieldLoad(object_offset / kPointerSize);
+ }
}
Handle<Object> callback(lookup->GetCallbackObject(), isolate());
@@ -1241,8 +923,8 @@ Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
Handle<ExecutableAccessorInfo> info =
Handle<ExecutableAccessorInfo>::cast(callback);
if (v8::ToCData<Address>(info->getter()) == 0) break;
- if (!info->IsCompatibleReceiver(*receiver)) break;
- return compiler.CompileLoadCallback(receiver, holder, name, info);
+ if (!info->IsCompatibleReceiver(*object)) break;
+ return compiler.CompileLoadCallback(type, holder, name, info);
} else if (callback->IsAccessorPair()) {
Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter(),
isolate());
@@ -1250,13 +932,20 @@ Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
if (holder->IsGlobalObject()) break;
if (!holder->HasFastProperties()) break;
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+ if (!object->IsJSObject() &&
+ !function->IsBuiltin() &&
+ function->shared()->is_classic_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ break;
+ }
CallOptimization call_optimization(function);
if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*receiver)) {
+ call_optimization.IsCompatibleReceiver(object, holder)) {
return compiler.CompileLoadCallback(
- receiver, holder, name, call_optimization);
+ type, holder, name, call_optimization);
}
- return compiler.CompileLoadViaGetter(receiver, holder, name, function);
+ return compiler.CompileLoadViaGetter(type, holder, name, function);
}
// TODO(dcarney): Handle correctly.
if (callback->IsDeclaredAccessorInfo()) break;
@@ -1266,7 +955,7 @@ Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
}
case INTERCEPTOR:
ASSERT(HasInterceptorGetter(*holder));
- return compiler.CompileLoadInterceptor(receiver, holder, name);
+ return compiler.CompileLoadInterceptor(type, holder, name);
default:
break;
}
@@ -1315,7 +1004,7 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
if (target().is_identical_to(string_stub())) {
target_receiver_maps.Add(isolate()->factory()->string_map());
} else {
- target()->FindAllMaps(&target_receiver_maps);
+ GetMapsFromTypes(&target_receiver_maps);
if (target_receiver_maps.length() == 0) {
return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
}
@@ -1358,9 +1047,7 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
}
-MaybeObject* KeyedLoadIC::Load(Handle<Object> object,
- Handle<Object> key,
- ICMissMode miss_mode) {
+MaybeObject* KeyedLoadIC::Load(Handle<Object> object, Handle<Object> key) {
if (MigrateDeprecated(object)) {
return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
}
@@ -1368,29 +1055,27 @@ MaybeObject* KeyedLoadIC::Load(Handle<Object> object,
MaybeObject* maybe_object = NULL;
Handle<Code> stub = generic_stub();
- // Check for values that can be converted into an internalized string directly
- // or is representable as a smi.
+ // Check for non-string values that can be converted into an
+ // internalized string directly or is representable as a smi.
key = TryConvertKey(key, isolate());
if (key->IsInternalizedString()) {
maybe_object = LoadIC::Load(object, Handle<String>::cast(key));
if (maybe_object->IsFailure()) return maybe_object;
} else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
- ASSERT(!object->IsJSGlobalProxy());
- if (miss_mode != MISS_FORCE_GENERIC) {
- if (object->IsString() && key->IsNumber()) {
- if (state() == UNINITIALIZED) stub = string_stub();
- } else if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
- } else if (receiver->HasIndexedInterceptor()) {
- stub = indexed_interceptor_stub();
- } else if (!key->ToSmi()->IsFailure() &&
- (!target().is_identical_to(non_strict_arguments_stub()))) {
- stub = LoadElementStub(receiver);
- }
+ ASSERT(!object->IsAccessCheckNeeded());
+ if (object->IsString() && key->IsNumber()) {
+ if (state() == UNINITIALIZED) stub = string_stub();
+ } else if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->elements()->map() ==
+ isolate()->heap()->non_strict_arguments_elements_map()) {
+ stub = non_strict_arguments_stub();
+ } else if (receiver->HasIndexedInterceptor()) {
+ stub = indexed_interceptor_stub();
+ } else if (!key->ToSmi()->IsFailure() &&
+ (!target().is_identical_to(non_strict_arguments_stub()))) {
+ stub = LoadElementStub(receiver);
}
}
}
@@ -1417,21 +1102,18 @@ static bool LookupForWrite(Handle<JSObject> receiver,
Handle<JSObject> holder = receiver;
receiver->Lookup(*name, lookup);
if (lookup->IsFound()) {
- if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
-
- if (lookup->holder() == *receiver) {
- if (lookup->IsInterceptor() && !HasInterceptorSetter(*receiver)) {
- receiver->LocalLookupRealNamedProperty(*name, lookup);
- return lookup->IsFound() &&
- !lookup->IsReadOnly() &&
- lookup->CanHoldValue(value) &&
- lookup->IsCacheable();
- }
- return lookup->CanHoldValue(value);
+ if (lookup->IsInterceptor() && !HasInterceptorSetter(lookup->holder())) {
+ receiver->LocalLookupRealNamedProperty(*name, lookup);
+ if (!lookup->IsFound()) return false;
}
+ if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
+ if (lookup->holder() == *receiver) return lookup->CanHoldValue(value);
if (lookup->IsPropertyCallbacks()) return true;
-
+ // JSGlobalProxy either stores on the global object in the prototype, or
+ // goes into the runtime if access checks are needed, so this is always
+ // safe.
+ if (receiver->IsJSGlobalProxy()) return true;
// Currently normal holders in the prototype chain are not supported. They
// would require a runtime positive lookup and verification that the details
// have not changed.
@@ -1446,8 +1128,7 @@ static bool LookupForWrite(Handle<JSObject> receiver,
// receiver when trying to fetch extra information from the transition.
receiver->map()->LookupTransition(*holder, *name, lookup);
if (!lookup->IsTransition()) return false;
- PropertyDetails target_details =
- lookup->GetTransitionDetails(receiver->map());
+ PropertyDetails target_details = lookup->GetTransitionDetails();
if (target_details.IsReadOnly()) return false;
// If the value that's being stored does not fit in the field that the
@@ -1458,7 +1139,7 @@ static bool LookupForWrite(Handle<JSObject> receiver,
// transition target.
ASSERT(!receiver->map()->is_deprecated());
if (!value->FitsRepresentation(target_details.representation())) {
- Handle<Map> target(lookup->GetTransitionMapFromMap(receiver->map()));
+ Handle<Map> target(lookup->GetTransitionTarget());
Map::GeneralizeRepresentation(
target, target->LastAdded(),
value->OptimalRepresentation(), FORCE_FIELD);
@@ -1538,26 +1219,12 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
return *result;
}
- if (receiver->IsJSGlobalProxy()) {
- if (FLAG_use_ic && kind() != Code::KEYED_STORE_IC) {
- // Generate a generic stub that goes to the runtime when we see a global
- // proxy as receiver.
- Handle<Code> stub = global_proxy_stub();
- set_target(*stub);
- TRACE_IC("StoreIC", name);
- }
- Handle<Object> result = JSReceiver::SetProperty(
- receiver, name, value, NONE, strict_mode(), store_mode);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *result;
- }
-
LookupResult lookup(isolate());
bool can_store = LookupForWrite(receiver, name, value, &lookup, this);
if (!can_store &&
strict_mode() == kStrictMode &&
!(lookup.IsProperty() && lookup.IsReadOnly()) &&
- IsUndeclaredGlobal(object)) {
+ object->IsGlobalObject()) {
// Strict mode doesn't allow setting non-existent global property.
return ReferenceError("not_defined", name);
}
@@ -1584,11 +1251,36 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
}
+Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ ExtraICState extra_state = ComputeExtraICState(strict_mode);
+ Handle<Code> ic = isolate->stub_cache()->ComputeStore(
+ UNINITIALIZED, extra_state);
+ return ic;
+}
+
+
+Handle<Code> StoreIC::megamorphic_stub() {
+ return isolate()->stub_cache()->ComputeStore(MEGAMORPHIC, extra_ic_state());
+}
+
+
+Handle<Code> StoreIC::generic_stub() const {
+ return isolate()->stub_cache()->ComputeStore(GENERIC, extra_ic_state());
+}
+
+
+Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ ExtraICState state = ComputeExtraICState(strict_mode);
+ return isolate->stub_cache()->ComputeStore(PREMONOMORPHIC, state);
+}
+
+
void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value) {
- ASSERT(!receiver->IsJSGlobalProxy());
ASSERT(lookup->IsFound());
// These are not cacheable, so we never see such LookupResults here.
@@ -1596,29 +1288,32 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<Code> code = ComputeHandler(lookup, receiver, name, value);
- PatchCache(receiver, name, code);
+ PatchCache(CurrentTypeOf(receiver, isolate()), name, code);
TRACE_IC("StoreIC", name);
}
Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
+ Handle<Object> object,
Handle<String> name,
- Handle<Object> value) {
+ Handle<Object> value,
+ InlineCacheHolderFlag cache_holder) {
+ if (object->IsAccessCheckNeeded()) return slow_stub();
+ ASSERT(cache_holder == OWN_MAP);
+ // This is currently guaranteed by checks in StoreIC::Store.
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
Handle<JSObject> holder(lookup->holder());
- StoreStubCompiler compiler(isolate(), strict_mode(), kind());
+ // Handlers do not use strict mode.
+ StoreStubCompiler compiler(isolate(), kNonStrictMode, kind());
switch (lookup->type()) {
case FIELD:
return compiler.CompileStoreField(receiver, lookup, name);
case TRANSITION: {
// Explicitly pass in the receiver map since LookupForWrite may have
// stored something else than the receiver in the holder.
- Handle<Map> transition(
- lookup->GetTransitionTarget(receiver->map()), isolate());
- int descriptor = transition->LastAdded();
-
- DescriptorArray* target_descriptors = transition->instance_descriptors();
- PropertyDetails details = target_descriptors->GetDetails(descriptor);
+ Handle<Map> transition(lookup->GetTransitionTarget());
+ PropertyDetails details = transition->GetLastDescriptorDetails();
if (details.type() == CALLBACKS || details.attributes() != NONE) break;
@@ -1627,21 +1322,25 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
}
case NORMAL:
if (kind() == Code::KEYED_STORE_IC) break;
- if (receiver->IsGlobalObject()) {
+ if (receiver->IsJSGlobalProxy() || receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- Handle<PropertyCell> cell(
- global->GetPropertyCell(lookup), isolate());
- // TODO(verwaest): Turn into a handler.
- return isolate()->stub_cache()->ComputeStoreGlobal(
- name, global, cell, value, strict_mode());
+ Handle<GlobalObject> global = receiver->IsJSGlobalProxy()
+ ? handle(GlobalObject::cast(receiver->GetPrototype()))
+ : Handle<GlobalObject>::cast(receiver);
+ Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate());
+ Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value);
+ StoreGlobalStub stub(
+ union_type->IsConstant(), receiver->IsJSGlobalProxy());
+ Handle<Code> code = stub.GetCodeCopyFromTemplate(
+ isolate(), *global, *cell);
+ // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
+ HeapObject::UpdateMapCodeCache(receiver, name, code);
+ return code;
}
ASSERT(holder.is_identical_to(receiver));
- return strict_mode() == kStrictMode
- ? isolate()->builtins()->StoreIC_Normal_Strict()
- : isolate()->builtins()->StoreIC_Normal();
+ return isolate()->builtins()->StoreIC_Normal();
case CALLBACKS: {
if (kind() == Code::KEYED_STORE_IC) break;
Handle<Object> callback(lookup->GetCallbackObject(), isolate());
@@ -1661,7 +1360,7 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
CallOptimization call_optimization(function);
if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*receiver)) {
+ call_optimization.IsCompatibleReceiver(receiver, holder)) {
return compiler.CompileStoreCallback(
receiver, holder, name, call_optimization);
}
@@ -1676,7 +1375,7 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
}
case INTERCEPTOR:
if (kind() == Code::KEYED_STORE_IC) break;
- ASSERT(HasInterceptorSetter(*receiver));
+ ASSERT(HasInterceptorSetter(*holder));
return compiler.CompileStoreInterceptor(receiver, name);
case CONSTANT:
break;
@@ -1709,36 +1408,25 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
monomorphic_map, strict_mode(), store_mode);
}
- MapHandleList target_receiver_maps;
- target()->FindAllMaps(&target_receiver_maps);
- if (target_receiver_maps.length() == 0) {
- // In the case that there is a non-map-specific IC is installed (e.g. keyed
- // stores into properties in dictionary mode), then there will be not
- // receiver maps in the target.
- return generic_stub();
- }
-
// There are several special cases where an IC that is MONOMORPHIC can still
// transition to a different GetNonTransitioningStoreMode IC that handles a
// superset of the original IC. Handle those here if the receiver map hasn't
// changed or it has transitioned to a more general kind.
KeyedAccessStoreMode old_store_mode =
- Code::GetKeyedAccessStoreMode(target()->extra_ic_state());
- Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
+ KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
if (state() == MONOMORPHIC) {
// If the "old" and "new" maps are in the same elements map family, stay
// MONOMORPHIC and use the map for the most generic ElementsKind.
- Handle<Map> transitioned_receiver_map = receiver_map;
+ Handle<Map> transitioned_map = receiver_map;
if (IsTransitionStoreMode(store_mode)) {
- transitioned_receiver_map =
- ComputeTransitionedMap(receiver, store_mode);
+ transitioned_map = ComputeTransitionedMap(receiver, store_mode);
}
- if (IsTransitionedMapOfMonomorphicTarget(*transitioned_receiver_map)) {
+ if (IsTransitionOfMonomorphicTarget(first_map(), *transitioned_map)) {
// Element family is the same, use the "worst" case map.
store_mode = GetNonTransitioningStoreMode(store_mode);
return isolate()->stub_cache()->ComputeKeyedStoreElement(
- transitioned_receiver_map, strict_mode(), store_mode);
- } else if (*previous_receiver_map == receiver->map() &&
+ transitioned_map, strict_mode(), store_mode);
+ } else if (first_map() == receiver->map() &&
old_store_mode == STANDARD_STORE &&
(IsGrowStoreMode(store_mode) ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
@@ -1753,6 +1441,9 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
ASSERT(state() != GENERIC);
+ MapHandleList target_receiver_maps;
+ GetMapsFromTypes(&target_receiver_maps);
+
bool map_added =
AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
@@ -1795,7 +1486,8 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
if (store_mode != STANDARD_STORE) {
int external_arrays = 0;
for (int i = 0; i < target_receiver_maps.length(); ++i) {
- if (target_receiver_maps[i]->has_external_array_elements()) {
+ if (target_receiver_maps[i]->has_external_array_elements() ||
+ target_receiver_maps[i]->has_fixed_typed_array_elements()) {
external_arrays++;
}
}
@@ -1933,15 +1625,19 @@ KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> key,
- Handle<Object> value,
- ICMissMode miss_mode) {
+ Handle<Object> value) {
if (MigrateDeprecated(object)) {
- return Runtime::SetObjectPropertyOrFail(
- isolate(), object , key, value, NONE, strict_mode());
+ Handle<Object> result = Runtime::SetObjectProperty(isolate(), object,
+ key,
+ value,
+ NONE,
+ strict_mode());
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
- // Check for values that can be converted into an internalized string directly
- // or is representable as a smi.
+ // Check for non-string values that can be converted into an
+ // internalized string directly or is representable as a smi.
key = TryConvertKey(key, isolate());
MaybeObject* maybe_object = NULL;
@@ -1966,17 +1662,21 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
}
if (use_ic) {
- ASSERT(!object->IsJSGlobalProxy());
-
- if (miss_mode != MISS_FORCE_GENERIC) {
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
- if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
- } else if (key_is_smi_like &&
- (!target().is_identical_to(non_strict_arguments_stub()))) {
+ ASSERT(!object->IsAccessCheckNeeded());
+
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
+ if (receiver->elements()->map() ==
+ isolate()->heap()->non_strict_arguments_elements_map()) {
+ stub = non_strict_arguments_stub();
+ } else if (key_is_smi_like &&
+ !(target().is_identical_to(non_strict_arguments_stub()))) {
+ // We should go generic if receiver isn't a dictionary, but our
+ // prototype chain does have dictionary elements. This ensures that
+ // other non-dictionary receivers in the polymorphic case benefit
+ // from fast path keyed stores.
+ if (!(receiver->map()->DictionaryElementsInPrototypeChainOnly())) {
KeyedAccessStoreMode store_mode =
GetStoreMode(receiver, key, value);
stub = StoreElementStub(receiver, store_mode);
@@ -1996,8 +1696,12 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
}
if (maybe_object) return maybe_object;
- return Runtime::SetObjectPropertyOrFail(
- isolate(), object , key, value, NONE, strict_mode());
+ Handle<Object> result = Runtime::SetObjectProperty(isolate(), object, key,
+ value,
+ NONE,
+ strict_mode());
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
@@ -2009,51 +1713,6 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
//
// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- CallIC ic(isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<String> key = args.at<String>(1);
- ic.UpdateState(receiver, key);
- MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
- JSFunction* raw_function;
- if (!maybe_result->To(&raw_function)) return maybe_result;
-
- // The first time the inline cache is updated may be the first time the
- // function it references gets called. If the function is lazily compiled
- // then the first call will trigger a compilation. We check for this case
- // and we do the compilation immediately, instead of waiting for the stub
- // currently attached to the JSFunction object to trigger compilation.
- if (raw_function->is_compiled()) return raw_function;
-
- Handle<JSFunction> function(raw_function);
- JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
- return *function;
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- KeyedCallIC ic(isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- ic.UpdateState(receiver, key);
- MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
- // Result could be a function or a failure.
- JSFunction* raw_function = NULL;
- if (!maybe_result->To(&raw_function)) return maybe_result;
-
- if (raw_function->is_compiled()) return raw_function;
-
- Handle<JSFunction> function(raw_function, isolate);
- JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
- return *function;
-}
-
-
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
HandleScope scope(isolate);
@@ -2074,7 +1733,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
ic.UpdateState(receiver, key);
- return ic.Load(receiver, key, MISS);
+ return ic.Load(receiver, key);
}
@@ -2085,18 +1744,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure) {
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
ic.UpdateState(receiver, key);
- return ic.Load(receiver, key, MISS);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- ic.UpdateState(receiver, key);
- return ic.Load(receiver, key, MISS_FORCE_GENERIC);
+ return ic.Load(receiver, key);
}
@@ -2205,7 +1853,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
ic.UpdateState(receiver, key);
- return ic.Store(receiver, key, args.at<Object>(2), MISS);
+ return ic.Store(receiver, key, args.at<Object>(2));
}
@@ -2216,7 +1864,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure) {
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
ic.UpdateState(receiver, key);
- return ic.Store(receiver, key, args.at<Object>(2), MISS);
+ return ic.Store(receiver, key, args.at<Object>(2));
}
@@ -2228,12 +1876,12 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
StrictModeFlag strict_mode = ic.strict_mode();
- return Runtime::SetObjectProperty(isolate,
- object,
- key,
- value,
- NONE,
- strict_mode);
+ Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
+ value,
+ NONE,
+ strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -2245,23 +1893,12 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
StrictModeFlag strict_mode = ic.strict_mode();
- return Runtime::SetObjectProperty(isolate,
- object,
- key,
- value,
- NONE,
- strict_mode);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- ic.UpdateState(receiver, key);
- return ic.Store(receiver, key, args.at<Object>(2), MISS_FORCE_GENERIC);
+ Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
+ value,
+ NONE,
+ strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -2270,94 +1907,535 @@ RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
ASSERT(args.length() == 4);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
Handle<Object> value = args.at<Object>(0);
+ Handle<Map> map = args.at<Map>(1);
Handle<Object> key = args.at<Object>(2);
Handle<Object> object = args.at<Object>(3);
StrictModeFlag strict_mode = ic.strict_mode();
- return Runtime::SetObjectProperty(isolate,
- object,
- key,
- value,
- NONE,
- strict_mode);
+ if (object->IsJSObject()) {
+ JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
+ map->elements_kind());
+ }
+ Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
+ value,
+ NONE,
+ strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
+}
+
+
+BinaryOpIC::State::State(ExtraICState extra_ic_state) {
+ // We don't deserialize the SSE2 Field, since this is only used to be able
+ // to include SSE2 as well as non-SSE2 versions in the snapshot. For code
+ // generation we always want it to reflect the current state.
+ op_ = static_cast<Token::Value>(
+ FIRST_TOKEN + OpField::decode(extra_ic_state));
+ mode_ = OverwriteModeField::decode(extra_ic_state);
+ fixed_right_arg_ = Maybe<int>(
+ HasFixedRightArgField::decode(extra_ic_state),
+ 1 << FixedRightArgValueField::decode(extra_ic_state));
+ left_kind_ = LeftKindField::decode(extra_ic_state);
+ if (fixed_right_arg_.has_value) {
+ right_kind_ = Smi::IsValid(fixed_right_arg_.value) ? SMI : INT32;
+ } else {
+ right_kind_ = RightKindField::decode(extra_ic_state);
+ }
+ result_kind_ = ResultKindField::decode(extra_ic_state);
+ ASSERT_LE(FIRST_TOKEN, op_);
+ ASSERT_LE(op_, LAST_TOKEN);
+}
+
+
+ExtraICState BinaryOpIC::State::GetExtraICState() const {
+ bool sse2 = (Max(result_kind_, Max(left_kind_, right_kind_)) > SMI &&
+ CpuFeatures::IsSafeForSnapshot(SSE2));
+ ExtraICState extra_ic_state =
+ SSE2Field::encode(sse2) |
+ OpField::encode(op_ - FIRST_TOKEN) |
+ OverwriteModeField::encode(mode_) |
+ LeftKindField::encode(left_kind_) |
+ ResultKindField::encode(result_kind_) |
+ HasFixedRightArgField::encode(fixed_right_arg_.has_value);
+ if (fixed_right_arg_.has_value) {
+ extra_ic_state = FixedRightArgValueField::update(
+ extra_ic_state, WhichPowerOf2(fixed_right_arg_.value));
+ } else {
+ extra_ic_state = RightKindField::update(extra_ic_state, right_kind_);
+ }
+ return extra_ic_state;
+}
+
+
+// static
+void BinaryOpIC::State::GenerateAheadOfTime(
+ Isolate* isolate, void (*Generate)(Isolate*, const State&)) {
+ // TODO(olivf) We should investigate why adding stubs to the snapshot is so
+ // expensive at runtime. When solved we should be able to add most binops to
+ // the snapshot instead of hand-picking them.
+ // Generated list of commonly used stubs
+#define GENERATE(op, left_kind, right_kind, result_kind, mode) \
+ do { \
+ State state(op, mode); \
+ state.left_kind_ = left_kind; \
+ state.fixed_right_arg_.has_value = false; \
+ state.right_kind_ = right_kind; \
+ state.result_kind_ = result_kind; \
+ Generate(isolate, state); \
+ } while (false)
+ GENERATE(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT);
+#undef GENERATE
+#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind, mode) \
+ do { \
+ State state(op, mode); \
+ state.left_kind_ = left_kind; \
+ state.fixed_right_arg_.has_value = true; \
+ state.fixed_right_arg_.value = fixed_right_arg_value; \
+ state.right_kind_ = SMI; \
+ state.result_kind_ = result_kind; \
+ Generate(isolate, state); \
+ } while (false)
+ GENERATE(Token::MOD, SMI, 2, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 4, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MOD, SMI, 8, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MOD, SMI, 32, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE);
+#undef GENERATE
+}
+
+
+Type* BinaryOpIC::State::GetResultType(Zone* zone) const {
+ Kind result_kind = result_kind_;
+ if (HasSideEffects()) {
+ result_kind = NONE;
+ } else if (result_kind == GENERIC && op_ == Token::ADD) {
+ return Type::Union(Type::Number(zone), Type::String(zone), zone);
+ } else if (result_kind == NUMBER && op_ == Token::SHR) {
+ return Type::Unsigned32(zone);
+ }
+ ASSERT_NE(GENERIC, result_kind);
+ return KindToType(result_kind, zone);
+}
+
+
+void BinaryOpIC::State::Print(StringStream* stream) const {
+ stream->Add("(%s", Token::Name(op_));
+ if (mode_ == OVERWRITE_LEFT) stream->Add("_ReuseLeft");
+ else if (mode_ == OVERWRITE_RIGHT) stream->Add("_ReuseRight");
+ if (CouldCreateAllocationMementos()) stream->Add("_CreateAllocationMementos");
+ stream->Add(":%s*", KindToString(left_kind_));
+ if (fixed_right_arg_.has_value) {
+ stream->Add("%d", fixed_right_arg_.value);
+ } else {
+ stream->Add("%s", KindToString(right_kind_));
+ }
+ stream->Add("->%s)", KindToString(result_kind_));
+}
+
+
+void BinaryOpIC::State::Update(Handle<Object> left,
+ Handle<Object> right,
+ Handle<Object> result) {
+ ExtraICState old_extra_ic_state = GetExtraICState();
+
+ left_kind_ = UpdateKind(left, left_kind_);
+ right_kind_ = UpdateKind(right, right_kind_);
+
+ int32_t fixed_right_arg_value = 0;
+ bool has_fixed_right_arg =
+ op_ == Token::MOD &&
+ right->ToInt32(&fixed_right_arg_value) &&
+ fixed_right_arg_value > 0 &&
+ IsPowerOf2(fixed_right_arg_value) &&
+ FixedRightArgValueField::is_valid(WhichPowerOf2(fixed_right_arg_value)) &&
+ (left_kind_ == SMI || left_kind_ == INT32) &&
+ (result_kind_ == NONE || !fixed_right_arg_.has_value);
+ fixed_right_arg_ = Maybe<int32_t>(has_fixed_right_arg,
+ fixed_right_arg_value);
+
+ result_kind_ = UpdateKind(result, result_kind_);
+
+ if (!Token::IsTruncatingBinaryOp(op_)) {
+ Kind input_kind = Max(left_kind_, right_kind_);
+ if (result_kind_ < input_kind && input_kind <= NUMBER) {
+ result_kind_ = input_kind;
+ }
+ }
+
+ // We don't want to distinguish INT32 and NUMBER for string add (because
+ // NumberToString can't make use of this anyway).
+ if (left_kind_ == STRING && right_kind_ == INT32) {
+ ASSERT_EQ(STRING, result_kind_);
+ ASSERT_EQ(Token::ADD, op_);
+ right_kind_ = NUMBER;
+ } else if (right_kind_ == STRING && left_kind_ == INT32) {
+ ASSERT_EQ(STRING, result_kind_);
+ ASSERT_EQ(Token::ADD, op_);
+ left_kind_ = NUMBER;
+ }
+
+ // Reset overwrite mode unless we can actually make use of it, or may be able
+ // to make use of it at some point in the future.
+ if ((mode_ == OVERWRITE_LEFT && left_kind_ > NUMBER) ||
+ (mode_ == OVERWRITE_RIGHT && right_kind_ > NUMBER) ||
+ result_kind_ > NUMBER) {
+ mode_ = NO_OVERWRITE;
+ }
+
+ if (old_extra_ic_state == GetExtraICState()) {
+ // Tagged operations can lead to non-truncating HChanges
+ if (left->IsUndefined() || left->IsBoolean()) {
+ left_kind_ = GENERIC;
+ } else if (right->IsUndefined() || right->IsBoolean()) {
+ right_kind_ = GENERIC;
+ } else {
+ // Since the X87 is too precise, we might bail out on numbers which
+ // actually would truncate with 64 bit precision.
+ ASSERT(!CpuFeatures::IsSupported(SSE2));
+ ASSERT(result_kind_ < NUMBER);
+ result_kind_ = NUMBER;
+ }
+ }
}
-const char* BinaryOpIC::GetName(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED: return "Uninitialized";
+BinaryOpIC::State::Kind BinaryOpIC::State::UpdateKind(Handle<Object> object,
+ Kind kind) const {
+ Kind new_kind = GENERIC;
+ bool is_truncating = Token::IsTruncatingBinaryOp(op());
+ if (object->IsBoolean() && is_truncating) {
+ // Booleans will be automatically truncated by HChange.
+ new_kind = INT32;
+ } else if (object->IsUndefined()) {
+ // Undefined will be automatically truncated by HChange.
+ new_kind = is_truncating ? INT32 : NUMBER;
+ } else if (object->IsSmi()) {
+ new_kind = SMI;
+ } else if (object->IsHeapNumber()) {
+ double value = Handle<HeapNumber>::cast(object)->value();
+ new_kind = IsInt32Double(value) ? INT32 : NUMBER;
+ } else if (object->IsString() && op() == Token::ADD) {
+ new_kind = STRING;
+ }
+ if (new_kind == INT32 && SmiValuesAre32Bits()) {
+ new_kind = NUMBER;
+ }
+ if (kind != NONE &&
+ ((new_kind <= NUMBER && kind > NUMBER) ||
+ (new_kind > NUMBER && kind <= NUMBER))) {
+ new_kind = GENERIC;
+ }
+ return Max(kind, new_kind);
+}
+
+
+// static
+const char* BinaryOpIC::State::KindToString(Kind kind) {
+ switch (kind) {
+ case NONE: return "None";
case SMI: return "Smi";
case INT32: return "Int32";
case NUMBER: return "Number";
- case ODDBALL: return "Oddball";
case STRING: return "String";
case GENERIC: return "Generic";
- default: return "Invalid";
}
+ UNREACHABLE();
+ return NULL;
+}
+
+
+// static
+Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) {
+ switch (kind) {
+ case NONE: return Type::None(zone);
+ case SMI: return Type::Smi(zone);
+ case INT32: return Type::Signed32(zone);
+ case NUMBER: return Type::Number(zone);
+ case STRING: return Type::String(zone);
+ case GENERIC: return Type::Any(zone);
+ }
+ UNREACHABLE();
+ return NULL;
}
-MaybeObject* BinaryOpIC::Transition(Handle<Object> left, Handle<Object> right) {
- Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state();
- BinaryOpStub stub(extra_ic_state);
+MaybeObject* BinaryOpIC::Transition(Handle<AllocationSite> allocation_site,
+ Handle<Object> left,
+ Handle<Object> right) {
+ State state(target()->extra_ic_state());
- Handle<Type> left_type = stub.GetLeftType(isolate());
- Handle<Type> right_type = stub.GetRightType(isolate());
- bool smi_was_enabled = left_type->Maybe(Type::Smi()) &&
- right_type->Maybe(Type::Smi());
+ // Compute the actual result using the builtin for the binary operation.
+ Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
+ TokenToJSBuiltin(state.op()));
+ Handle<JSFunction> function = handle(JSFunction::cast(builtin), isolate());
+ bool caught_exception;
+ Handle<Object> result = Execution::Call(
+ isolate(), function, left, 1, &right, &caught_exception);
+ if (caught_exception) return Failure::Exception();
- Maybe<Handle<Object> > result = stub.Result(left, right, isolate());
- if (!result.has_value) return Failure::Exception();
+ // Compute the new state.
+ State old_state = state;
+ state.Update(left, right, result);
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- char buffer[100];
- NoAllocationStringAllocator allocator(buffer,
- static_cast<unsigned>(sizeof(buffer)));
- StringStream stream(&allocator);
- stream.Add("[");
- stub.PrintName(&stream);
+ // Check if we have a string operation here.
+ Handle<Code> target;
+ if (!allocation_site.is_null() || state.ShouldCreateAllocationMementos()) {
+ // Setup the allocation site on-demand.
+ if (allocation_site.is_null()) {
+ allocation_site = isolate()->factory()->NewAllocationSite();
+ }
+
+ // Install the stub with an allocation site.
+ BinaryOpICWithAllocationSiteStub stub(state);
+ target = stub.GetCodeCopyFromTemplate(isolate(), allocation_site);
- stub.UpdateStatus(left, right, result);
+ // Sanity check the trampoline stub.
+ ASSERT_EQ(*allocation_site, target->FindFirstAllocationSite());
+ } else {
+ // Install the generic stub.
+ BinaryOpICStub stub(state);
+ target = stub.GetCode(isolate());
+ // Sanity check the generic stub.
+ ASSERT_EQ(NULL, target->FindFirstAllocationSite());
+ }
+ set_target(*target);
+
+ if (FLAG_trace_ic) {
+ char buffer[150];
+ NoAllocationStringAllocator allocator(
+ buffer, static_cast<unsigned>(sizeof(buffer)));
+ StringStream stream(&allocator);
+ stream.Add("[BinaryOpIC");
+ old_state.Print(&stream);
stream.Add(" => ");
- stub.PrintState(&stream);
- stream.Add(" ");
+ state.Print(&stream);
+ stream.Add(" @ %p <- ", static_cast<void*>(*target));
stream.OutputToStdOut();
- PrintF(" @ %p <- ", static_cast<void*>(*stub.GetCode(isolate())));
JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ if (!allocation_site.is_null()) {
+ PrintF(" using allocation site %p", static_cast<void*>(*allocation_site));
+ }
PrintF("]\n");
- } else {
- stub.UpdateStatus(left, right, result);
}
-#else
- stub.UpdateStatus(left, right, result);
-#endif
- Handle<Code> code = stub.GetCode(isolate());
- set_target(*code);
-
- left_type = stub.GetLeftType(isolate());
- right_type = stub.GetRightType(isolate());
- bool enable_smi = left_type->Maybe(Type::Smi()) &&
- right_type->Maybe(Type::Smi());
-
- if (!smi_was_enabled && enable_smi) {
+ // Patch the inlined smi code as necessary.
+ if (!old_state.UseInlinedSmiCode() && state.UseInlinedSmiCode()) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- } else if (smi_was_enabled && !enable_smi) {
+ } else if (old_state.UseInlinedSmiCode() && !state.UseInlinedSmiCode()) {
PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
}
- ASSERT(result.has_value);
- return static_cast<MaybeObject*>(*result.value);
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss) {
HandleScope scope(isolate);
- Handle<Object> left = args.at<Object>(0);
- Handle<Object> right = args.at<Object>(1);
+ ASSERT_EQ(2, args.length());
+ Handle<Object> left = args.at<Object>(BinaryOpICStub::kLeft);
+ Handle<Object> right = args.at<Object>(BinaryOpICStub::kRight);
+ BinaryOpIC ic(isolate);
+ return ic.Transition(Handle<AllocationSite>::null(), left, right);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_MissWithAllocationSite) {
+ HandleScope scope(isolate);
+ ASSERT_EQ(3, args.length());
+ Handle<AllocationSite> allocation_site = args.at<AllocationSite>(
+ BinaryOpWithAllocationSiteStub::kAllocationSite);
+ Handle<Object> left = args.at<Object>(
+ BinaryOpWithAllocationSiteStub::kLeft);
+ Handle<Object> right = args.at<Object>(
+ BinaryOpWithAllocationSiteStub::kRight);
BinaryOpIC ic(isolate);
- return ic.Transition(left, right);
+ return ic.Transition(allocation_site, left, right);
}
@@ -2392,48 +2470,39 @@ const char* CompareIC::GetStateName(State state) {
}
-Handle<Type> CompareIC::StateToType(
- Isolate* isolate,
+Type* CompareIC::StateToType(
+ Zone* zone,
CompareIC::State state,
Handle<Map> map) {
switch (state) {
- case CompareIC::UNINITIALIZED:
- return handle(Type::None(), isolate);
- case CompareIC::SMI:
- return handle(Type::Smi(), isolate);
- case CompareIC::NUMBER:
- return handle(Type::Number(), isolate);
- case CompareIC::STRING:
- return handle(Type::String(), isolate);
- case CompareIC::INTERNALIZED_STRING:
- return handle(Type::InternalizedString(), isolate);
- case CompareIC::UNIQUE_NAME:
- return handle(Type::UniqueName(), isolate);
- case CompareIC::OBJECT:
- return handle(Type::Receiver(), isolate);
+ case CompareIC::UNINITIALIZED: return Type::None(zone);
+ case CompareIC::SMI: return Type::Smi(zone);
+ case CompareIC::NUMBER: return Type::Number(zone);
+ case CompareIC::STRING: return Type::String(zone);
+ case CompareIC::INTERNALIZED_STRING: return Type::InternalizedString(zone);
+ case CompareIC::UNIQUE_NAME: return Type::UniqueName(zone);
+ case CompareIC::OBJECT: return Type::Receiver(zone);
case CompareIC::KNOWN_OBJECT:
- return handle(
- map.is_null() ? Type::Receiver() : Type::Class(map), isolate);
- case CompareIC::GENERIC:
- return handle(Type::Any(), isolate);
+ return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone);
+ case CompareIC::GENERIC: return Type::Any(zone);
}
UNREACHABLE();
- return Handle<Type>();
+ return NULL;
}
void CompareIC::StubInfoToType(int stub_minor_key,
- Handle<Type>* left_type,
- Handle<Type>* right_type,
- Handle<Type>* overall_type,
+ Type** left_type,
+ Type** right_type,
+ Type** overall_type,
Handle<Map> map,
- Isolate* isolate) {
+ Zone* zone) {
State left_state, right_state, handler_state;
ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state,
&handler_state, NULL);
- *left_type = StateToType(isolate, left_state);
- *right_type = StateToType(isolate, right_state);
- *overall_type = StateToType(isolate, handler_state, map);
+ *left_type = StateToType(zone, left_state);
+ *right_type = StateToType(zone, right_state);
+ *overall_type = StateToType(zone, handler_state, map);
}
@@ -2543,7 +2612,7 @@ CompareIC::State CompareIC::TargetState(State old_state,
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope(isolate());
State previous_left, previous_right, previous_state;
ICCompareStub::DecodeMinorKey(target()->stub_info(), &previous_left,
@@ -2557,9 +2626,9 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
stub.set_known_map(
Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
}
- set_target(*stub.GetCode(isolate()));
+ Handle<Code> new_target = stub.GetCode(isolate());
+ set_target(*new_target);
-#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[CompareIC in ");
JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
@@ -2573,12 +2642,13 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
Token::Name(op_),
static_cast<void*>(*stub.GetCode(isolate())));
}
-#endif
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
}
+
+ return *new_target;
}
@@ -2587,14 +2657,13 @@ RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
- ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
- return ic.raw_target();
+ return ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
}
void CompareNilIC::Clear(Address address, Code* target) {
if (IsCleared(target)) return;
- Code::ExtraICState state = target->extended_extra_ic_state();
+ ExtraICState state = target->extra_ic_state();
CompareNilICStub stub(state, HydrogenCodeStub::UNINITIALIZED);
stub.ClearState();
@@ -2616,7 +2685,7 @@ MaybeObject* CompareNilIC::DoCompareNilSlow(NilValue nil,
MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
- Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state();
+ ExtraICState extra_ic_state = target()->extra_ic_state();
CompareNilICStub stub(extra_ic_state);
@@ -2631,8 +2700,8 @@ MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
// Find or create the specialized stub to support the new set of types.
Handle<Code> code;
if (stub.IsMonomorphic()) {
- Handle<Map> monomorphic_map(already_monomorphic
- ? target()->FindFirstMap()
+ Handle<Map> monomorphic_map(already_monomorphic && (first_map() != NULL)
+ ? first_map()
: HeapObject::cast(*object)->map());
code = isolate()->stub_cache()->ComputeCompareNil(monomorphic_map, stub);
} else {
@@ -2699,9 +2768,8 @@ Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
}
-MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object,
- Code::ExtraICState extra_ic_state) {
- ToBooleanStub stub(extra_ic_state);
+MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object) {
+ ToBooleanStub stub(target()->extra_ic_state());
bool to_boolean_value = stub.UpdateStatus(object);
Handle<Code> code = stub.GetCode(isolate());
set_target(*code);
@@ -2714,8 +2782,7 @@ RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss) {
HandleScope scope(isolate);
Handle<Object> object = args.at<Object>(0);
ToBooleanIC ic(isolate);
- Code::ExtraICState extra_ic_state = ic.target()->extended_extra_ic_state();
- return ic.ToBoolean(object, extra_ic_state);
+ return ic.ToBoolean(object);
}
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index fde4bc77a5..fce585f6d7 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -29,26 +29,24 @@
#define V8_IC_H_
#include "macro-assembler.h"
-#include "type-info.h"
namespace v8 {
namespace internal {
+const int kMaxKeyedPolymorphism = 4;
+
+
// IC_UTIL_LIST defines all utility functions called from generated
// inline caching code. The argument for the macro, ICU, is the function name.
#define IC_UTIL_LIST(ICU) \
ICU(LoadIC_Miss) \
ICU(KeyedLoadIC_Miss) \
- ICU(KeyedLoadIC_MissForceGeneric) \
- ICU(CallIC_Miss) \
- ICU(KeyedCallIC_Miss) \
ICU(StoreIC_Miss) \
ICU(StoreIC_ArrayLength) \
ICU(StoreIC_Slow) \
ICU(SharedStoreIC_ExtendStorage) \
ICU(KeyedStoreIC_Miss) \
- ICU(KeyedStoreIC_MissForceGeneric) \
ICU(KeyedStoreIC_Slow) \
/* Utilities for IC stubs. */ \
ICU(StoreCallbackProperty) \
@@ -63,8 +61,7 @@ namespace internal {
ICU(Unreachable) \
ICU(ToBooleanIC_Miss)
//
-// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
-// and KeyedStoreIC.
+// IC is the base class for LoadIC, StoreIC, KeyedLoadIC, and KeyedStoreIC.
//
class IC {
public:
@@ -94,10 +91,6 @@ class IC {
IC(FrameDepth depth, Isolate* isolate);
virtual ~IC() {}
- // Get the call-site target; used for determining the state.
- Handle<Code> target() const { return target_; }
- Code* raw_target() const { return GetTargetAtAddress(address()); }
-
State state() const { return state_; }
inline Address address() const;
@@ -110,40 +103,66 @@ class IC {
// Clear the inline cache to initial state.
static void Clear(Isolate* isolate, Address address);
- // Computes the reloc info for this IC. This is a fairly expensive
- // operation as it has to search through the heap to find the code
- // object that contains this IC site.
- RelocInfo::Mode ComputeMode();
-
- // Returns if this IC is for contextual (no explicit receiver)
- // access to properties.
- bool IsUndeclaredGlobal(Handle<Object> receiver) {
- if (receiver->IsGlobalObject()) {
- return SlowIsUndeclaredGlobal();
- } else {
- ASSERT(!SlowIsUndeclaredGlobal());
- return false;
- }
+#ifdef DEBUG
+ bool IsLoadStub() const {
+ return target()->is_load_stub() || target()->is_keyed_load_stub();
}
- bool SlowIsUndeclaredGlobal() {
- return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
+ bool IsStoreStub() const {
+ return target()->is_store_stub() || target()->is_keyed_store_stub();
}
+#endif
// Determines which map must be used for keeping the code stub.
// These methods should not be called with undefined or null.
- static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object,
- JSObject* holder);
- static inline JSObject* GetCodeCacheHolder(Isolate* isolate,
- Object* object,
- InlineCacheHolderFlag holder);
+ static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object);
+ // TODO(verwaest): This currently returns a HeapObject rather than JSObject*
+ // since loading the IC for loading the length from strings are stored on
+ // the string map directly, rather than on the JSObject-typed prototype.
+ static inline HeapObject* GetCodeCacheHolder(Isolate* isolate,
+ Object* object,
+ InlineCacheHolderFlag holder);
+
+ static inline InlineCacheHolderFlag GetCodeCacheFlag(HeapType* type);
+ static inline Handle<Map> GetCodeCacheHolder(InlineCacheHolderFlag flag,
+ HeapType* type,
+ Isolate* isolate);
static bool IsCleared(Code* code) {
InlineCacheState state = code->ic_state();
return state == UNINITIALIZED || state == PREMONOMORPHIC;
}
+ // Utility functions to convert maps to types and back. There are two special
+ // cases:
+ // - The heap_number_map is used as a marker which includes heap numbers as
+ // well as smis.
+ // - The oddball map is only used for booleans.
+ static Handle<Map> TypeToMap(HeapType* type, Isolate* isolate);
+ template <class T>
+ static typename T::TypeHandle MapToType(Handle<Map> map,
+ typename T::Region* region);
+
+ static Handle<HeapType> CurrentTypeOf(Handle<Object> object,
+ Isolate* isolate);
+
protected:
+ // Get the call-site target; used for determining the state.
+ Handle<Code> target() const { return target_; }
+
+ TypeHandleList* types() { return &types_; }
+ CodeHandleList* handlers() { return &handlers_; }
+ Map* first_map() {
+ return types_.length() == 0 ? NULL : *TypeToMap(*types_.at(0), isolate_);
+ }
+ Code* first_handler() {
+ return handlers_.length() == 0 ? NULL : *handlers_.at(0);
+ }
+ void GetMapsFromTypes(MapHandleList* maps) {
+ for (int i = 0; i < types_.length(); ++i) {
+ maps->Add(TypeToMap(*types_.at(i), isolate_));
+ }
+ }
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
Isolate* isolate() const { return isolate_; }
@@ -180,30 +199,33 @@ class IC {
// Compute the handler either by compiling or by retrieving a cached version.
Handle<Code> ComputeHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
+ Handle<Object> object,
Handle<String> name,
Handle<Object> value = Handle<Code>::null());
virtual Handle<Code> CompileHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
+ Handle<Object> object,
Handle<String> name,
- Handle<Object> value) {
+ Handle<Object> value,
+ InlineCacheHolderFlag cache_holder) {
UNREACHABLE();
return Handle<Code>::null();
}
- void UpdateMonomorphicIC(Handle<HeapObject> receiver,
+
+ void UpdateMonomorphicIC(Handle<HeapType> type,
Handle<Code> handler,
Handle<String> name);
- bool UpdatePolymorphicIC(Handle<HeapObject> receiver,
+ bool UpdatePolymorphicIC(Handle<HeapType> type,
Handle<String> name,
Handle<Code> code);
+ virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code);
+
void CopyICToMegamorphicCache(Handle<String> name);
- bool IsTransitionedMapOfMonomorphicTarget(Map* receiver_map);
- void PatchCache(Handle<HeapObject> receiver,
+ bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
+ void PatchCache(Handle<HeapType> type,
Handle<String> name,
Handle<Code> code);
- virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code);
virtual Code::Kind kind() const {
UNREACHABLE();
return Code::STUB;
@@ -220,12 +242,19 @@ class IC {
UNREACHABLE();
return Handle<Code>::null();
}
- virtual StrictModeFlag strict_mode() const { return kNonStrictMode; }
+
bool TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
Handle<String> name);
void TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name);
+ ExtraICState extra_ic_state() const { return extra_ic_state_; }
+ void set_extra_ic_state(ExtraICState state) {
+ extra_ic_state_ = state;
+ }
+
private:
+ Code* raw_target() const { return GetTargetAtAddress(address()); }
+
// Frame pointer for the frame that uses (calls) the IC.
Address fp_;
@@ -242,6 +271,11 @@ class IC {
State state_;
bool target_set_;
+ ExtraICState extra_ic_state_;
+
+ TypeHandleList types_;
+ CodeHandleList handlers_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
};
@@ -262,133 +296,38 @@ class IC_Utility {
};
-class CallICBase: public IC {
- public:
- class Contextual: public BitField<bool, 0, 1> {};
- class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
-
- // Returns a JSFunction or a Failure.
- MUST_USE_RESULT MaybeObject* LoadFunction(Handle<Object> object,
- Handle<String> name);
-
- protected:
- CallICBase(Code::Kind kind, Isolate* isolate)
- : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
-
- virtual Code::ExtraICState extra_ic_state() { return Code::kNoExtraICState; }
-
- // Compute a monomorphic stub if possible, otherwise return a null handle.
- Handle<Code> ComputeMonomorphicStub(LookupResult* lookup,
- Handle<Object> object,
- Handle<String> name);
-
- // Update the inline cache and the global stub cache based on the lookup
- // result.
- void UpdateCaches(LookupResult* lookup,
- Handle<Object> object,
- Handle<String> name);
-
- // Returns a JSFunction if the object can be called as a function, and
- // patches the stack to be ready for the call. Otherwise, it returns the
- // undefined value.
- Handle<Object> TryCallAsFunction(Handle<Object> object);
-
- void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
-
- static void Clear(Address address, Code* target);
-
- // Platform-specific code generation functions used by both call and
- // keyed call.
- static void GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state);
-
- static void GenerateNormal(MacroAssembler* masm, int argc);
-
- static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state);
-
- virtual Handle<Code> megamorphic_stub();
- virtual Handle<Code> pre_monomorphic_stub();
-
- Code::Kind kind_;
-
- friend class IC;
-};
-
-
-class CallIC: public CallICBase {
+class LoadIC: public IC {
public:
- explicit CallIC(Isolate* isolate)
- : CallICBase(Code::CALL_IC, isolate),
- extra_ic_state_(target()->extra_ic_state()) {
- ASSERT(target()->is_call_stub());
- }
-
- // Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_state) {
- GenerateMiss(masm, argc, extra_state);
- }
+ // ExtraICState bits
+ class ContextualModeBits: public BitField<ContextualMode, 0, 1> {};
+ STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0);
- static void GenerateMiss(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_state) {
- CallICBase::GenerateMiss(masm, argc, IC::kCallIC_Miss, extra_state);
+ static ExtraICState ComputeExtraICState(ContextualMode contextual_mode) {
+ return ContextualModeBits::encode(contextual_mode);
}
- static void GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state);
-
- static void GenerateNormal(MacroAssembler* masm, int argc) {
- CallICBase::GenerateNormal(masm, argc);
- GenerateMiss(masm, argc, Code::kNoExtraICState);
+ static ContextualMode GetContextualMode(ExtraICState state) {
+ return ContextualModeBits::decode(state);
}
- bool TryUpdateExtraICState(LookupResult* lookup, Handle<Object> object);
-
- protected:
- virtual Code::ExtraICState extra_ic_state() { return extra_ic_state_; }
-
- private:
- Code::ExtraICState extra_ic_state_;
-};
-
-class KeyedCallIC: public CallICBase {
- public:
- explicit KeyedCallIC(Isolate* isolate)
- : CallICBase(Code::KEYED_CALL_IC, isolate) {
- ASSERT(target()->is_keyed_call_stub());
- }
-
- MUST_USE_RESULT MaybeObject* LoadFunction(Handle<Object> object,
- Handle<Object> key);
-
- // Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm, int argc) {
- GenerateMiss(masm, argc);
+ ContextualMode contextual_mode() const {
+ return ContextualModeBits::decode(extra_ic_state());
}
- static void GenerateMiss(MacroAssembler* masm, int argc) {
- CallICBase::GenerateMiss(masm, argc, IC::kKeyedCallIC_Miss,
- Code::kNoExtraICState);
+ explicit LoadIC(FrameDepth depth, Isolate* isolate)
+ : IC(depth, isolate) {
+ ASSERT(IsLoadStub());
}
- static void GenerateMegamorphic(MacroAssembler* masm, int argc);
- static void GenerateNormal(MacroAssembler* masm, int argc);
- static void GenerateNonStrictArguments(MacroAssembler* masm, int argc);
-};
-
-
-class LoadIC: public IC {
- public:
- explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
- ASSERT(target()->is_load_stub() || target()->is_keyed_load_stub());
+ // Returns if this IC is for contextual (no explicit receiver)
+ // access to properties.
+ bool IsUndeclaredGlobal(Handle<Object> receiver) {
+ if (receiver->IsGlobalObject()) {
+ return contextual_mode() == CONTEXTUAL;
+ } else {
+ ASSERT(contextual_mode() != CONTEXTUAL);
+ return false;
+ }
}
// Code generator routines.
@@ -401,19 +340,28 @@ class LoadIC: public IC {
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ ExtraICState extra_state);
+
MUST_USE_RESULT MaybeObject* Load(Handle<Object> object,
Handle<String> name);
protected:
virtual Code::Kind kind() const { return Code::LOAD_IC; }
+ void set_target(Code* code) {
+ // The contextual mode must be preserved across IC patching.
+ ASSERT(GetContextualMode(code->extra_ic_state()) ==
+ GetContextualMode(target()->extra_ic_state()));
+
+ IC::set_target(code);
+ }
+
virtual Handle<Code> slow_stub() const {
return isolate()->builtins()->LoadIC_Slow();
}
- virtual Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->LoadIC_Megamorphic();
- }
+ virtual Handle<Code> megamorphic_stub();
// Update the inline cache and the global stub cache based on the
// lookup result.
@@ -422,22 +370,18 @@ class LoadIC: public IC {
Handle<String> name);
virtual Handle<Code> CompileHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
+ Handle<Object> object,
Handle<String> name,
- Handle<Object> unused);
+ Handle<Object> unused,
+ InlineCacheHolderFlag cache_holder);
private:
// Stub accessors.
- static Handle<Code> initialize_stub(Isolate* isolate) {
- return isolate->builtins()->LoadIC_Initialize();
- }
-
- static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
- return isolate->builtins()->LoadIC_PreMonomorphic();
- }
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+ ExtraICState exstra_state);
virtual Handle<Code> pre_monomorphic_stub() {
- return pre_monomorphic_stub(isolate());
+ return pre_monomorphic_stub(isolate(), extra_ic_state());
}
Handle<Code> SimpleFieldLoad(int offset,
@@ -451,12 +395,6 @@ class LoadIC: public IC {
};
-enum ICMissMode {
- MISS_FORCE_GENERIC,
- MISS
-};
-
-
class KeyedLoadIC: public LoadIC {
public:
explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
@@ -465,17 +403,14 @@ class KeyedLoadIC: public LoadIC {
}
MUST_USE_RESULT MaybeObject* Load(Handle<Object> object,
- Handle<Object> key,
- ICMissMode force_generic);
+ Handle<Object> key);
// Code generator routines.
- static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
+ static void GenerateMiss(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- static void GenerateInitialize(MacroAssembler* masm) {
- GenerateMiss(masm, MISS);
- }
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
@@ -504,13 +439,10 @@ class KeyedLoadIC: public LoadIC {
return isolate()->builtins()->KeyedLoadIC_Slow();
}
- virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
+ virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {}
private:
// Stub accessors.
- static Handle<Code> initialize_stub(Isolate* isolate) {
- return isolate->builtins()->KeyedLoadIC_Initialize();
- }
static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
}
@@ -535,13 +467,28 @@ class KeyedLoadIC: public LoadIC {
class StoreIC: public IC {
public:
+ class StrictModeState: public BitField<StrictModeFlag, 1, 1> {};
+ static ExtraICState ComputeExtraICState(StrictModeFlag flag) {
+ return StrictModeState::encode(flag);
+ }
+
+ static StrictModeFlag GetStrictMode(ExtraICState state) {
+ return StrictModeState::decode(state);
+ }
+
+ // For convenience, a statically declared encoding of strict mode extra
+ // IC state.
+ static const ExtraICState kStrictModeState =
+ 1 << StrictModeState::kShift;
+
StoreIC(FrameDepth depth, Isolate* isolate)
- : IC(depth, isolate),
- strict_mode_(Code::GetStrictMode(target()->extra_ic_state())) {
- ASSERT(target()->is_store_stub() || target()->is_keyed_store_stub());
+ : IC(depth, isolate) {
+ ASSERT(IsStoreStub());
}
- virtual StrictModeFlag strict_mode() const { return strict_mode_; }
+ StrictModeFlag strict_mode() const {
+ return StrictModeState::decode(extra_ic_state());
+ }
// Code generators for stub routines. Only called once at startup.
static void GenerateSlow(MacroAssembler* masm);
@@ -550,12 +497,14 @@ class StoreIC: public IC {
GenerateMiss(masm);
}
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode);
+ static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode);
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ StrictModeFlag strict_mode);
+
MUST_USE_RESULT MaybeObject* Store(
Handle<Object> object,
Handle<String> name,
@@ -565,28 +514,13 @@ class StoreIC: public IC {
protected:
virtual Code::Kind kind() const { return Code::STORE_IC; }
- virtual Handle<Code> megamorphic_stub() {
- if (strict_mode() == kStrictMode) {
- return isolate()->builtins()->StoreIC_Megamorphic_Strict();
- } else {
- return isolate()->builtins()->StoreIC_Megamorphic();
- }
- }
+ virtual Handle<Code> megamorphic_stub();
+
// Stub accessors.
- virtual Handle<Code> generic_stub() const {
- if (strict_mode() == kStrictMode) {
- return isolate()->builtins()->StoreIC_Generic_Strict();
- } else {
- return isolate()->builtins()->StoreIC_Generic();
- }
- }
+ virtual Handle<Code> generic_stub() const;
virtual Handle<Code> slow_stub() const {
- if (strict_mode() == kStrictMode) {
- return isolate()->builtins()->StoreIC_Slow_Strict();
- } else {
- return isolate()->builtins()->StoreIC_Slow();
- }
+ return isolate()->builtins()->StoreIC_Slow();
}
virtual Handle<Code> pre_monomorphic_stub() {
@@ -594,21 +528,7 @@ class StoreIC: public IC {
}
static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
- if (strict_mode == kStrictMode) {
- return isolate->builtins()->StoreIC_PreMonomorphic_Strict();
- } else {
- return isolate->builtins()->StoreIC_PreMonomorphic();
- }
- }
-
- virtual Handle<Code> global_proxy_stub() {
- if (strict_mode() == kStrictMode) {
- return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
- } else {
- return isolate()->builtins()->StoreIC_GlobalProxy();
- }
- }
+ StrictModeFlag strict_mode);
// Update the inline cache and the global stub cache based on the
// lookup result.
@@ -617,31 +537,21 @@ class StoreIC: public IC {
Handle<String> name,
Handle<Object> value);
virtual Handle<Code> CompileHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
+ Handle<Object> object,
Handle<String> name,
- Handle<Object> value);
+ Handle<Object> value,
+ InlineCacheHolderFlag cache_holder);
private:
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
- ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
- Code::GetStrictMode(target()->extra_ic_state()));
+ ASSERT(GetStrictMode(code->extra_ic_state()) ==
+ GetStrictMode(target()->extra_ic_state()));
IC::set_target(code);
}
- static Handle<Code> initialize_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
- if (strict_mode == kStrictMode) {
- return isolate->builtins()->StoreIC_Initialize_Strict();
- } else {
- return isolate->builtins()->StoreIC_Initialize();
- }
- }
-
static void Clear(Isolate* isolate, Address address, Code* target);
- StrictModeFlag strict_mode_;
-
friend class IC;
};
@@ -660,6 +570,22 @@ enum KeyedStoreIncrementLength {
class KeyedStoreIC: public StoreIC {
public:
+ // ExtraICState bits (building on IC)
+ // ExtraICState bits
+ class ExtraICStateKeyedAccessStoreMode:
+ public BitField<KeyedAccessStoreMode, 2, 4> {}; // NOLINT
+
+ static ExtraICState ComputeExtraICState(StrictModeFlag flag,
+ KeyedAccessStoreMode mode) {
+ return StrictModeState::encode(flag) |
+ ExtraICStateKeyedAccessStoreMode::encode(mode);
+ }
+
+ static KeyedAccessStoreMode GetKeyedAccessStoreMode(
+ ExtraICState extra_state) {
+ return ExtraICStateKeyedAccessStoreMode::decode(extra_state);
+ }
+
KeyedStoreIC(FrameDepth depth, Isolate* isolate)
: StoreIC(depth, isolate) {
ASSERT(target()->is_keyed_store_stub());
@@ -667,17 +593,14 @@ class KeyedStoreIC: public StoreIC {
MUST_USE_RESULT MaybeObject* Store(Handle<Object> object,
Handle<Object> name,
- Handle<Object> value,
- ICMissMode force_generic);
+ Handle<Object> value);
// Code generators for stub routines. Only called once at startup.
- static void GenerateInitialize(MacroAssembler* masm) {
- GenerateMiss(masm, MISS);
- }
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
- static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
+ static void GenerateMiss(MacroAssembler* masm);
static void GenerateSlow(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode);
@@ -687,7 +610,7 @@ class KeyedStoreIC: public StoreIC {
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
- virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
+ virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {}
virtual Handle<Code> pre_monomorphic_stub() {
return pre_monomorphic_stub(isolate(), strict_mode());
@@ -701,11 +624,7 @@ class KeyedStoreIC: public StoreIC {
}
}
virtual Handle<Code> slow_stub() const {
- if (strict_mode() == kStrictMode) {
- return isolate()->builtins()->KeyedStoreIC_Slow_Strict();
- } else {
- return isolate()->builtins()->KeyedStoreIC_Slow();
- }
+ return isolate()->builtins()->KeyedStoreIC_Slow();
}
virtual Handle<Code> megamorphic_stub() {
if (strict_mode() == kStrictMode) {
@@ -721,20 +640,11 @@ class KeyedStoreIC: public StoreIC {
private:
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
- ASSERT(Code::GetStrictMode(code->extra_ic_state()) == strict_mode());
+ ASSERT(GetStrictMode(code->extra_ic_state()) == strict_mode());
IC::set_target(code);
}
// Stub accessors.
- static Handle<Code> initialize_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
- if (strict_mode == kStrictMode) {
- return isolate->builtins()->KeyedStoreIC_Initialize_Strict();
- } else {
- return isolate->builtins()->KeyedStoreIC_Initialize();
- }
- }
-
virtual Handle<Code> generic_stub() const {
if (strict_mode() == kStrictMode) {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
@@ -760,27 +670,134 @@ class KeyedStoreIC: public StoreIC {
};
+// Mode to overwrite BinaryExpression values.
+enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
+
// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
class BinaryOpIC: public IC {
public:
- enum TypeInfo {
- UNINITIALIZED,
- SMI,
- INT32,
- NUMBER,
- ODDBALL,
- STRING, // Only used for addition operation.
- GENERIC
+ class State V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit State(ExtraICState extra_ic_state);
+
+ State(Token::Value op, OverwriteMode mode)
+ : op_(op), mode_(mode), left_kind_(NONE), right_kind_(NONE),
+ result_kind_(NONE) {
+ ASSERT_LE(FIRST_TOKEN, op);
+ ASSERT_LE(op, LAST_TOKEN);
+ }
+
+ InlineCacheState GetICState() const {
+ if (Max(left_kind_, right_kind_) == NONE) {
+ return ::v8::internal::UNINITIALIZED;
+ }
+ if (Max(left_kind_, right_kind_) == GENERIC) {
+ return ::v8::internal::MEGAMORPHIC;
+ }
+ if (Min(left_kind_, right_kind_) == GENERIC) {
+ return ::v8::internal::GENERIC;
+ }
+ return ::v8::internal::MONOMORPHIC;
+ }
+
+ ExtraICState GetExtraICState() const;
+
+ static void GenerateAheadOfTime(
+ Isolate*, void (*Generate)(Isolate*, const State&));
+
+ bool CanReuseDoubleBox() const {
+ return (result_kind_ > SMI && result_kind_ <= NUMBER) &&
+ ((mode_ == OVERWRITE_LEFT &&
+ left_kind_ > SMI && left_kind_ <= NUMBER) ||
+ (mode_ == OVERWRITE_RIGHT &&
+ right_kind_ > SMI && right_kind_ <= NUMBER));
+ }
+
+ // Returns true if the IC _could_ create allocation mementos.
+ bool CouldCreateAllocationMementos() const {
+ if (left_kind_ == STRING || right_kind_ == STRING) {
+ ASSERT_EQ(Token::ADD, op_);
+ return true;
+ }
+ return false;
+ }
+
+ // Returns true if the IC _should_ create allocation mementos.
+ bool ShouldCreateAllocationMementos() const {
+ return FLAG_allocation_site_pretenuring &&
+ CouldCreateAllocationMementos();
+ }
+
+ bool HasSideEffects() const {
+ return Max(left_kind_, right_kind_) == GENERIC;
+ }
+
+ // Returns true if the IC should enable the inline smi code (i.e. if either
+ // parameter may be a smi).
+ bool UseInlinedSmiCode() const {
+ return KindMaybeSmi(left_kind_) || KindMaybeSmi(right_kind_);
+ }
+
+ static const int FIRST_TOKEN = Token::BIT_OR;
+ static const int LAST_TOKEN = Token::MOD;
+
+ Token::Value op() const { return op_; }
+ OverwriteMode mode() const { return mode_; }
+ Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+
+ Type* GetLeftType(Zone* zone) const {
+ return KindToType(left_kind_, zone);
+ }
+ Type* GetRightType(Zone* zone) const {
+ return KindToType(right_kind_, zone);
+ }
+ Type* GetResultType(Zone* zone) const;
+
+ void Print(StringStream* stream) const;
+
+ void Update(Handle<Object> left,
+ Handle<Object> right,
+ Handle<Object> result);
+
+ private:
+ enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
+
+ Kind UpdateKind(Handle<Object> object, Kind kind) const;
+
+ static const char* KindToString(Kind kind);
+ static Type* KindToType(Kind kind, Zone* zone);
+ static bool KindMaybeSmi(Kind kind) {
+ return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
+ }
+
+ // We truncate the last bit of the token.
+ STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
+ class OpField: public BitField<int, 0, 4> {};
+ class OverwriteModeField: public BitField<OverwriteMode, 4, 2> {};
+ class SSE2Field: public BitField<bool, 6, 1> {};
+ class ResultKindField: public BitField<Kind, 7, 3> {};
+ class LeftKindField: public BitField<Kind, 10, 3> {};
+ // When fixed right arg is set, we don't need to store the right kind.
+ // Thus the two fields can overlap.
+ class HasFixedRightArgField: public BitField<bool, 13, 1> {};
+ class FixedRightArgValueField: public BitField<int, 14, 4> {};
+ class RightKindField: public BitField<Kind, 14, 3> {};
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ Kind left_kind_;
+ Kind right_kind_;
+ Kind result_kind_;
+ Maybe<int> fixed_right_arg_;
};
explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
- static const char* GetName(TypeInfo type_info);
-
- MUST_USE_RESULT MaybeObject* Transition(Handle<Object> left,
- Handle<Object> right);
+ MaybeObject* Transition(Handle<AllocationSite> allocation_site,
+ Handle<Object> left,
+ Handle<Object> right) V8_WARN_UNUSED_RESULT;
};
@@ -806,22 +823,22 @@ class CompareIC: public IC {
static State NewInputState(State old_state, Handle<Object> value);
- static Handle<Type> StateToType(Isolate* isolate,
- State state,
- Handle<Map> map = Handle<Map>());
+ static Type* StateToType(Zone* zone,
+ State state,
+ Handle<Map> map = Handle<Map>());
static void StubInfoToType(int stub_minor_key,
- Handle<Type>* left_type,
- Handle<Type>* right_type,
- Handle<Type>* overall_type,
+ Type** left_type,
+ Type** right_type,
+ Type** overall_type,
Handle<Map> map,
- Isolate* isolate);
+ Zone* zone);
CompareIC(Isolate* isolate, Token::Value op)
: IC(EXTRA_CALL_FRAME, isolate), op_(op) { }
// Update the inline cache for the given operands.
- void UpdateCaches(Handle<Object> x, Handle<Object> y);
+ Code* UpdateCaches(Handle<Object> x, Handle<Object> y);
// Factory method for getting an uninitialized compare stub.
@@ -874,7 +891,7 @@ class ToBooleanIC: public IC {
public:
explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
- MaybeObject* ToBoolean(Handle<Object> object, Code::ExtraICState state);
+ MaybeObject* ToBoolean(Handle<Object> object);
};
@@ -888,6 +905,7 @@ DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_MissWithAllocationSite);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss);
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index 4223dde211..1b9a28a5b7 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -498,12 +498,10 @@ bool IncrementalMarking::WorthActivating() {
// debug tests run with incremental marking and some without.
static const intptr_t kActivationThreshold = 0;
#endif
- // Only start incremental marking in a safe state: 1) when expose GC is
- // deactivated, 2) when incremental marking is turned on, 3) when we are
- // currently not in a GC, and 4) when we are currently not serializing
- // or deserializing the heap.
- return !FLAG_expose_gc &&
- FLAG_incremental_marking &&
+ // Only start incremental marking in a safe state: 1) when incremental
+ // marking is turned on, 2) when we are currently not in a GC, and
+ // 3) when we are currently not serializing or deserializing the heap.
+ return FLAG_incremental_marking &&
FLAG_incremental_marking_steps &&
heap_->gc_state() == Heap::NOT_IN_GC &&
!Serializer::enabled() &&
diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h
index d47c300ef3..a4dd5f3314 100644
--- a/deps/v8/src/incremental-marking.h
+++ b/deps/v8/src/incremental-marking.h
@@ -100,7 +100,7 @@ class IncrementalMarking {
// Do some marking every time this much memory has been allocated or that many
// heavy (color-checking) write barriers have been invoked.
static const intptr_t kAllocatedThreshold = 65536;
- static const intptr_t kWriteBarriersInvokedThreshold = 65536;
+ static const intptr_t kWriteBarriersInvokedThreshold = 32768;
// Start off by marking this many times more memory than has been allocated.
static const intptr_t kInitialMarkingSpeed = 1;
// But if we are promoting a lot of data we need to mark faster to keep up
diff --git a/deps/v8/src/interface.cc b/deps/v8/src/interface.cc
index 603dfe9b86..c39d50e358 100644
--- a/deps/v8/src/interface.cc
+++ b/deps/v8/src/interface.cc
@@ -89,9 +89,10 @@ void Interface::DoAdd(
ZoneHashMap** map = &Chase()->exports_;
ZoneAllocationPolicy allocator(zone);
- if (*map == NULL)
- *map = new ZoneHashMap(Match, ZoneHashMap::kDefaultHashMapCapacity,
- allocator);
+ if (*map == NULL) {
+ *map = new(zone->New(sizeof(ZoneHashMap)))
+ ZoneHashMap(Match, ZoneHashMap::kDefaultHashMapCapacity, allocator);
+ }
ZoneHashMap::Entry* p = (*map)->Lookup(name, hash, !IsFrozen(), allocator);
if (p == NULL) {
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 71cd301581..ca324603f7 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -29,7 +29,6 @@
#include "v8.h"
-#include "allocation-inl.h"
#include "ast.h"
#include "bootstrapper.h"
#include "codegen.h"
@@ -131,205 +130,6 @@ v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
}
-int SystemThreadManager::NumberOfParallelSystemThreads(
- ParallelSystemComponent type) {
- int number_of_threads = Min(CPU::NumberOfProcessorsOnline(), kMaxThreads);
- ASSERT(number_of_threads > 0);
- if (number_of_threads == 1) {
- return 0;
- }
- if (type == PARALLEL_SWEEPING) {
- return number_of_threads;
- } else if (type == CONCURRENT_SWEEPING) {
- return number_of_threads - 1;
- }
- return 1;
-}
-
-
-// Create a dummy thread that will wait forever on a semaphore. The only
-// purpose for this thread is to have some stack area to save essential data
-// into for use by a stacks only core dump (aka minidump).
-class PreallocatedMemoryThread: public Thread {
- public:
- char* data() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return data_;
- }
-
- unsigned length() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return length_;
- }
-
- // Stop the PreallocatedMemoryThread and release its resources.
- void StopThread() {
- keep_running_ = false;
- wait_for_ever_semaphore_->Signal();
-
- // Wait for the thread to terminate.
- Join();
-
- if (data_ready_semaphore_ != NULL) {
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
-
- delete wait_for_ever_semaphore_;
- wait_for_ever_semaphore_ = NULL;
- }
-
- protected:
- // When the thread starts running it will allocate a fixed number of bytes
- // on the stack and publish the location of this memory for others to use.
- void Run() {
- EmbeddedVector<char, 15 * 1024> local_buffer;
-
- // Initialize the buffer with a known good value.
- OS::StrNCpy(local_buffer, "Trace data was not generated.\n",
- local_buffer.length());
-
- // Publish the local buffer and signal its availability.
- data_ = local_buffer.start();
- length_ = local_buffer.length();
- data_ready_semaphore_->Signal();
-
- while (keep_running_) {
- // This thread will wait here until the end of time.
- wait_for_ever_semaphore_->Wait();
- }
-
- // Make sure we access the buffer after the wait to remove all possibility
- // of it being optimized away.
- OS::StrNCpy(local_buffer, "PreallocatedMemoryThread shutting down.\n",
- local_buffer.length());
- }
-
-
- private:
- PreallocatedMemoryThread()
- : Thread("v8:PreallocMem"),
- keep_running_(true),
- wait_for_ever_semaphore_(new Semaphore(0)),
- data_ready_semaphore_(new Semaphore(0)),
- data_(NULL),
- length_(0) {
- }
-
- // Used to make sure that the thread keeps looping even for spurious wakeups.
- bool keep_running_;
-
- // This semaphore is used by the PreallocatedMemoryThread to wait for ever.
- Semaphore* wait_for_ever_semaphore_;
- // Semaphore to signal that the data has been initialized.
- Semaphore* data_ready_semaphore_;
-
- // Location and size of the preallocated memory block.
- char* data_;
- unsigned length_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(PreallocatedMemoryThread);
-};
-
-
-void Isolate::PreallocatedMemoryThreadStart() {
- if (preallocated_memory_thread_ != NULL) return;
- preallocated_memory_thread_ = new PreallocatedMemoryThread();
- preallocated_memory_thread_->Start();
-}
-
-
-void Isolate::PreallocatedMemoryThreadStop() {
- if (preallocated_memory_thread_ == NULL) return;
- preallocated_memory_thread_->StopThread();
- // Done with the thread entirely.
- delete preallocated_memory_thread_;
- preallocated_memory_thread_ = NULL;
-}
-
-
-void Isolate::PreallocatedStorageInit(size_t size) {
- ASSERT(free_list_.next_ == &free_list_);
- ASSERT(free_list_.previous_ == &free_list_);
- PreallocatedStorage* free_chunk =
- reinterpret_cast<PreallocatedStorage*>(new char[size]);
- free_list_.next_ = free_list_.previous_ = free_chunk;
- free_chunk->next_ = free_chunk->previous_ = &free_list_;
- free_chunk->size_ = size - sizeof(PreallocatedStorage);
- preallocated_storage_preallocated_ = true;
-}
-
-
-void* Isolate::PreallocatedStorageNew(size_t size) {
- if (!preallocated_storage_preallocated_) {
- return FreeStoreAllocationPolicy().New(size);
- }
- ASSERT(free_list_.next_ != &free_list_);
- ASSERT(free_list_.previous_ != &free_list_);
-
- size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
- // Search for exact fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ == size) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Search for first fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- PreallocatedStorage* left_over =
- reinterpret_cast<PreallocatedStorage*>(
- reinterpret_cast<char*>(storage + 1) + size);
- left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
- ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
- storage->size_);
- storage->size_ = size;
- left_over->LinkTo(&free_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Allocation failure.
- ASSERT(false);
- return NULL;
-}
-
-
-// We don't attempt to coalesce.
-void Isolate::PreallocatedStorageDelete(void* p) {
- if (p == NULL) {
- return;
- }
- if (!preallocated_storage_preallocated_) {
- FreeStoreAllocationPolicy::Delete(p);
- return;
- }
- PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
- ASSERT(storage->next_->previous_ == storage);
- ASSERT(storage->previous_->next_ == storage);
- storage->Unlink();
- storage->LinkTo(&free_list_);
-}
-
Isolate* Isolate::default_isolate_ = NULL;
Thread::LocalStorageKey Isolate::isolate_key_;
Thread::LocalStorageKey Isolate::thread_id_key_;
@@ -853,24 +653,12 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
}
-void Isolate::PrintStack() {
- PrintStack(stdout);
-}
-
-
void Isolate::PrintStack(FILE* out) {
if (stack_trace_nesting_level_ == 0) {
stack_trace_nesting_level_++;
-
- StringAllocator* allocator;
- if (preallocated_message_space_ == NULL) {
- allocator = new HeapStringAllocator();
- } else {
- allocator = preallocated_message_space_;
- }
-
StringStream::ClearMentionedObjectCache(this);
- StringStream accumulator(allocator);
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
incomplete_message_ = &accumulator;
PrintStack(&accumulator);
accumulator.OutputToFile(out);
@@ -878,10 +666,6 @@ void Isolate::PrintStack(FILE* out) {
accumulator.Log(this);
incomplete_message_ = NULL;
stack_trace_nesting_level_ = 0;
- if (preallocated_message_space_ == NULL) {
- // Remove the HeapStringAllocator created above.
- delete allocator;
- }
} else if (stack_trace_nesting_level_ == 1) {
stack_trace_nesting_level_++;
OS::PrintError(
@@ -994,7 +778,7 @@ static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
+ ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
// The callers of this method are not expecting a GC.
DisallowHeapAllocation no_gc;
@@ -1045,7 +829,7 @@ bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
bool Isolate::MayIndexedAccess(JSObject* receiver,
uint32_t index,
v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
+ ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
ASSERT(context());
@@ -1162,6 +946,7 @@ Failure* Isolate::ReThrow(MaybeObject* exception) {
Failure* Isolate::ThrowIllegalOperation() {
+ if (FLAG_stack_trace_on_illegal) PrintStack(stdout);
return Throw(heap_.illegal_access_string());
}
@@ -1338,8 +1123,6 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// while the bootstrapper is active since the infrastructure may not have
// been properly initialized.
if (!bootstrapping) {
- Handle<String> stack_trace;
- if (FLAG_trace_exception) stack_trace = StackTraceString();
Handle<JSArray> stack_trace_object;
if (capture_stack_trace_for_uncaught_exceptions_) {
if (IsErrorObject(exception_handle)) {
@@ -1379,7 +1162,6 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
"uncaught_exception",
location,
HandleVector<Object>(&exception_arg, 1),
- stack_trace,
stack_trace_object);
thread_local_top()->pending_message_obj_ = *message_obj;
if (location != NULL) {
@@ -1398,7 +1180,7 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
fatal_exception_depth++;
PrintF(stderr,
"%s\n\nFROM\n",
- *MessageHandler::GetLocalizedMessage(this, message_obj));
+ MessageHandler::GetLocalizedMessage(this, message_obj).get());
PrintCurrentStackTrace(stderr);
OS::Abort();
}
@@ -1413,13 +1195,13 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
if (exception->IsString() && location->script()->name()->IsString()) {
OS::PrintError(
"Extension or internal compilation error: %s in %s at line %d.\n",
- *String::cast(exception)->ToCString(),
- *String::cast(location->script()->name())->ToCString(),
+ String::cast(exception)->ToCString().get(),
+ String::cast(location->script()->name())->ToCString().get(),
line_number + 1);
} else if (location->script()->name()->IsString()) {
OS::PrintError(
"Extension or internal compilation error in %s at line %d.\n",
- *String::cast(location->script()->name())->ToCString(),
+ String::cast(location->script()->name())->ToCString().get(),
line_number + 1);
} else {
OS::PrintError("Extension or internal compilation error.\n");
@@ -1539,11 +1321,6 @@ MessageLocation Isolate::GetMessageLocation() {
}
-void Isolate::TraceException(bool flag) {
- FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use.
-}
-
-
bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
ASSERT(has_pending_exception());
PropagatePendingExceptionToExternalTryCatch();
@@ -1735,13 +1512,11 @@ void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
Isolate::Isolate()
- : state_(UNINITIALIZED),
- embedder_data_(NULL),
+ : embedder_data_(),
+ state_(UNINITIALIZED),
entry_stack_(NULL),
stack_trace_nesting_level_(0),
incomplete_message_(NULL),
- preallocated_memory_thread_(NULL),
- preallocated_message_space_(NULL),
bootstrapper_(NULL),
runtime_profiler_(NULL),
compilation_cache_(NULL),
@@ -1752,10 +1527,10 @@ Isolate::Isolate()
stats_table_(NULL),
stub_cache_(NULL),
deoptimizer_data_(NULL),
+ materialized_object_store_(NULL),
capture_stack_trace_for_uncaught_exceptions_(false),
stack_trace_for_uncaught_exceptions_frame_limit_(0),
stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
- transcendental_cache_(NULL),
memory_allocator_(NULL),
keyed_lookup_cache_(NULL),
context_slot_cache_(NULL),
@@ -1763,14 +1538,10 @@ Isolate::Isolate()
handle_scope_implementer_(NULL),
unicode_cache_(NULL),
runtime_zone_(this),
- in_use_list_(0),
- free_list_(0),
- preallocated_storage_preallocated_(false),
inner_pointer_to_code_cache_(NULL),
write_iterator_(NULL),
global_handles_(NULL),
eternal_handles_(NULL),
- context_switcher_(NULL),
thread_manager_(NULL),
fp_stubs_generated_(false),
has_installed_extensions_(false),
@@ -1778,6 +1549,7 @@ Isolate::Isolate()
regexp_stack_(NULL),
date_cache_(NULL),
code_stub_interface_descriptors_(NULL),
+ call_descriptors_(NULL),
// TODO(bmeurer) Initialized lazily because it depends on flags; can
// be fixed once the default isolate cleanup is done.
random_number_generator_(NULL),
@@ -1790,7 +1562,10 @@ Isolate::Isolate()
deferred_handles_head_(NULL),
optimizing_compiler_thread_(NULL),
sweeper_thread_(NULL),
- stress_deopt_count_(0) {
+ num_sweeper_threads_(0),
+ max_available_threads_(0),
+ stress_deopt_count_(0),
+ next_optimization_id_(0) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
@@ -1806,6 +1581,7 @@ Isolate::Isolate()
thread_manager_->isolate_ = this;
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__)
simulator_initialized_ = false;
simulator_i_cache_ = NULL;
@@ -1882,17 +1658,23 @@ void Isolate::Deinit() {
debugger()->UnloadDebugger();
#endif
- if (FLAG_concurrent_recompilation) {
+ if (concurrent_recompilation_enabled()) {
optimizing_compiler_thread_->Stop();
delete optimizing_compiler_thread_;
+ optimizing_compiler_thread_ = NULL;
}
- if (FLAG_sweeper_threads > 0) {
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- sweeper_thread_[i]->Stop();
- delete sweeper_thread_[i];
- }
- delete[] sweeper_thread_;
+ for (int i = 0; i < num_sweeper_threads_; i++) {
+ sweeper_thread_[i]->Stop();
+ delete sweeper_thread_[i];
+ sweeper_thread_[i] = NULL;
+ }
+ delete[] sweeper_thread_;
+ sweeper_thread_ = NULL;
+
+ if (FLAG_job_based_sweeping &&
+ heap_.mark_compact_collector()->IsConcurrentSweepingInProgress()) {
+ heap_.mark_compact_collector()->WaitUntilSweepingCompleted();
}
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
@@ -1907,20 +1689,10 @@ void Isolate::Deinit() {
delete deoptimizer_data_;
deoptimizer_data_ = NULL;
- if (FLAG_preemption) {
- v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
- v8::Locker::StopPreemption(reinterpret_cast<v8::Isolate*>(this));
- }
builtins_.TearDown();
bootstrapper_->TearDown();
- // Remove the external reference to the preallocated stack memory.
- delete preallocated_message_space_;
- preallocated_message_space_ = NULL;
- PreallocatedMemoryThreadStop();
-
if (runtime_profiler_ != NULL) {
- runtime_profiler_->TearDown();
delete runtime_profiler_;
runtime_profiler_ = NULL;
}
@@ -1992,6 +1764,9 @@ Isolate::~Isolate() {
delete[] code_stub_interface_descriptors_;
code_stub_interface_descriptors_ = NULL;
+ delete[] call_descriptors_;
+ call_descriptors_ = NULL;
+
delete regexp_stack_;
regexp_stack_ = NULL;
@@ -2002,13 +1777,14 @@ Isolate::~Isolate() {
delete keyed_lookup_cache_;
keyed_lookup_cache_ = NULL;
- delete transcendental_cache_;
- transcendental_cache_ = NULL;
delete stub_cache_;
stub_cache_ = NULL;
delete stats_table_;
stats_table_ = NULL;
+ delete materialized_object_store_;
+ materialized_object_store_ = NULL;
+
delete logger_;
logger_ = NULL;
@@ -2027,8 +1803,6 @@ Isolate::~Isolate() {
delete write_iterator_;
write_iterator_ = NULL;
- delete context_switcher_;
- context_switcher_ = NULL;
delete thread_manager_;
thread_manager_ = NULL;
@@ -2170,7 +1944,6 @@ bool Isolate::Init(Deserializer* des) {
string_tracker_ = new StringTracker();
string_tracker_->isolate_ = this;
compilation_cache_ = new CompilationCache(this);
- transcendental_cache_ = new TranscendentalCache(this);
keyed_lookup_cache_ = new KeyedLookupCache();
context_slot_cache_ = new ContextSlotCache();
descriptor_lookup_cache_ = new DescriptorLookupCache();
@@ -2182,11 +1955,14 @@ bool Isolate::Init(Deserializer* des) {
bootstrapper_ = new Bootstrapper(this);
handle_scope_implementer_ = new HandleScopeImplementer(this);
stub_cache_ = new StubCache(this);
+ materialized_object_store_ = new MaterializedObjectStore(this);
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
code_stub_interface_descriptors_ =
new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS];
+ call_descriptors_ =
+ new CallInterfaceDescriptor[NUMBER_OF_CALL_DESCRIPTORS];
cpu_profiler_ = new CpuProfiler(this);
heap_profiler_ = new HeapProfiler(heap());
@@ -2195,7 +1971,7 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_A64 || V8_TARGET_ARCH_MIPS
Simulator::Initialize(this);
#endif
#endif
@@ -2217,11 +1993,6 @@ bool Isolate::Init(Deserializer* des) {
deoptimizer_data_ = new DeoptimizerData(memory_allocator_);
- if (FLAG_concurrent_recompilation) {
- optimizing_compiler_thread_ = new OptimizingCompilerThread(this);
- optimizing_compiler_thread_->Start();
- }
-
const bool create_heap_objects = (des == NULL);
if (create_heap_objects && !heap_.CreateHeapObjects()) {
V8::FatalProcessOutOfMemory("heap object creation");
@@ -2238,20 +2009,32 @@ bool Isolate::Init(Deserializer* des) {
bootstrapper_->Initialize(create_heap_objects);
builtins_.SetUp(this, create_heap_objects);
- // Only preallocate on the first initialization.
- if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
- // Start the thread which will set aside some memory.
- PreallocatedMemoryThreadStart();
- preallocated_message_space_ =
- new NoAllocationStringAllocator(
- preallocated_memory_thread_->data(),
- preallocated_memory_thread_->length());
- PreallocatedStorageInit(preallocated_memory_thread_->length() / 4);
+ // Set default value if not yet set.
+ // TODO(yangguo): move this to ResourceConstraints::ConfigureDefaults
+ // once ResourceConstraints becomes an argument to the Isolate constructor.
+ if (max_available_threads_ < 1) {
+ // Choose the default between 1 and 4.
+ max_available_threads_ = Max(Min(CPU::NumberOfProcessorsOnline(), 4), 1);
+ }
+
+ if (!FLAG_job_based_sweeping) {
+ num_sweeper_threads_ =
+ SweeperThread::NumberOfThreads(max_available_threads_);
}
- if (FLAG_preemption) {
- v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
- v8::Locker::StartPreemption(reinterpret_cast<v8::Isolate*>(this), 100);
+ if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs) {
+ PrintF("Concurrent recompilation has been disabled for tracing.\n");
+ } else if (OptimizingCompilerThread::Enabled(max_available_threads_)) {
+ optimizing_compiler_thread_ = new OptimizingCompilerThread(this);
+ optimizing_compiler_thread_->Start();
+ }
+
+ if (num_sweeper_threads_ > 0) {
+ sweeper_thread_ = new SweeperThread*[num_sweeper_threads_];
+ for (int i = 0; i < num_sweeper_threads_; i++) {
+ sweeper_thread_[i] = new SweeperThread(this);
+ sweeper_thread_[i]->Start();
+ }
}
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -2277,17 +2060,26 @@ bool Isolate::Init(Deserializer* des) {
if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
runtime_profiler_ = new RuntimeProfiler(this);
- runtime_profiler_->SetUp();
// If we are deserializing, log non-function code objects and compiled
// functions found in the snapshot.
if (!create_heap_objects &&
- (FLAG_log_code || FLAG_ll_prof || logger_->is_logging_code_events())) {
+ (FLAG_log_code ||
+ FLAG_ll_prof ||
+ FLAG_perf_jit_prof ||
+ FLAG_perf_basic_prof ||
+ logger_->is_logging_code_events())) {
HandleScope scope(this);
LOG(this, LogCodeObjects());
LOG(this, LogCompiledFunctions());
}
+ // If we are profiling with the Linux perf tool, we need to disable
+ // code relocation.
+ if (FLAG_perf_jit_prof || FLAG_perf_basic_prof) {
+ FLAG_compact_code_space = false;
+ }
+
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
Internals::kIsolateEmbedderDataOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
@@ -2321,22 +2113,20 @@ bool Isolate::Init(Deserializer* des) {
DONT_TRACK_ALLOCATION_SITE, 0);
stub.InitializeInterfaceDescriptor(
this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
- BinaryOpStub::InitializeForIsolate(this);
+ BinaryOpICStub::InstallDescriptors(this);
+ BinaryOpWithAllocationSiteStub::InstallDescriptors(this);
CompareNilICStub::InitializeForIsolate(this);
ToBooleanStub::InitializeForIsolate(this);
ArrayConstructorStubBase::InstallDescriptors(this);
InternalArrayConstructorStubBase::InstallDescriptors(this);
FastNewClosureStub::InstallDescriptors(this);
+ FastNewContextStub::InstallDescriptors(this);
NumberToStringStub::InstallDescriptors(this);
+ StringAddStub::InstallDescriptors(this);
+ RegExpConstructResultStub::InstallDescriptors(this);
}
- if (FLAG_sweeper_threads > 0) {
- sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- sweeper_thread_[i] = new SweeperThread(this);
- sweeper_thread_[i]->Start();
- }
- }
+ CallDescriptors::InitializeForIsolate(this);
initialized_from_snapshot_ = (des != NULL);
@@ -2465,6 +2255,12 @@ HTracer* Isolate::GetHTracer() {
}
+CodeTracer* Isolate::GetCodeTracer() {
+ if (code_tracer() == NULL) set_code_tracer(new CodeTracer(id()));
+ return code_tracer();
+}
+
+
Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
Context* native_context = context()->native_context();
Object* maybe_map_array = native_context->js_array_maps();
@@ -2509,6 +2305,13 @@ CodeStubInterfaceDescriptor*
}
+CallInterfaceDescriptor*
+ Isolate::call_descriptor(CallDescriptorKey index) {
+ ASSERT(0 <= index && index < NUMBER_OF_CALL_DESCRIPTORS);
+ return &call_descriptors_[index];
+}
+
+
Object* Isolate::FindCodeObject(Address a) {
return inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(a);
}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 9aa14ee025..ef1dd30b22 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -30,7 +30,6 @@
#include "../include/v8-debug.h"
#include "allocation.h"
-#include "apiutils.h"
#include "assert-scope.h"
#include "atomicops.h"
#include "builtins.h"
@@ -52,12 +51,14 @@ namespace v8 {
namespace internal {
class Bootstrapper;
+struct CallInterfaceDescriptor;
class CodeGenerator;
class CodeRange;
struct CodeStubInterfaceDescriptor;
+class CodeTracer;
class CompilationCache;
+class ConsStringIteratorOp;
class ContextSlotCache;
-class ContextSwitcher;
class Counters;
class CpuFeatures;
class CpuProfiler;
@@ -73,20 +74,19 @@ class HeapProfiler;
class HStatistics;
class HTracer;
class InlineRuntimeFunctionsTable;
-class NoAllocationStringAllocator;
class InnerPointerToCodeCache;
-class PreallocatedMemoryThread;
+class MaterializedObjectStore;
+class NoAllocationStringAllocator;
class RandomNumberGenerator;
class RegExpStack;
class SaveContext;
-class UnicodeCache;
-class ConsStringIteratorOp;
class StringTracker;
class StubCache;
class SweeperThread;
class ThreadManager;
class ThreadState;
class ThreadVisitor; // Defined in v8threads.h
+class UnicodeCache;
template <StateTag Tag> class VMState;
// 'void function pointer', used to roundtrip the
@@ -102,6 +102,7 @@ class DebuggerAgent;
#endif
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
class Redirection;
class Simulator;
@@ -207,6 +208,11 @@ class ThreadId {
};
+#define FIELD_ACCESSOR(type, name) \
+ inline void set_##name(type v) { name##_ = v; } \
+ inline type name() const { return name##_; }
+
+
class ThreadLocalTop BASE_EMBEDDED {
public:
// Does early low-level initialization that does not depend on the
@@ -233,14 +239,7 @@ class ThreadLocalTop BASE_EMBEDDED {
// stack, try_catch_handler_address returns a JS stack address that
// corresponds to the place on the JS stack where the C++ handler
// would have been if the stack were not separate.
- inline Address try_catch_handler_address() {
- return try_catch_handler_address_;
- }
-
- // Set the address of the top C++ try catch handler.
- inline void set_try_catch_handler_address(Address address) {
- try_catch_handler_address_ = address;
- }
+ FIELD_ACCESSOR(Address, try_catch_handler_address)
void Free() {
ASSERT(!has_pending_message_);
@@ -300,20 +299,6 @@ class ThreadLocalTop BASE_EMBEDDED {
};
-class SystemThreadManager {
- public:
- enum ParallelSystemComponent {
- PARALLEL_SWEEPING,
- CONCURRENT_SWEEPING,
- PARALLEL_RECOMPILATION
- };
-
- static int NumberOfParallelSystemThreads(ParallelSystemComponent type);
-
- static const int kMaxThreads = 4;
-};
-
-
#ifdef ENABLE_DEBUGGER_SUPPORT
#define ISOLATE_DEBUGGER_INIT_LIST(V) \
@@ -344,7 +329,7 @@ class SystemThreadManager {
V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
-typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
+typedef List<HeapObject*> DebugObjectCache;
#define ISOLATE_INIT_LIST(V) \
/* SerializerDeserializer state. */ \
@@ -374,11 +359,18 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
/* AstNode state. */ \
V(int, ast_node_id, 0) \
V(unsigned, ast_node_count, 0) \
- V(bool, observer_delivery_pending, false) \
+ V(bool, microtask_pending, false) \
+ V(bool, autorun_microtasks, true) \
V(HStatistics*, hstatistics, NULL) \
V(HTracer*, htracer, NULL) \
+ V(CodeTracer*, code_tracer, NULL) \
ISOLATE_DEBUGGER_INIT_LIST(V)
+#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
+ inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
+ inline type name() const { return thread_local_top_.name##_; }
+
+
class Isolate {
// These forward declarations are required to make the friend declarations in
// PerIsolateThreadData work on some older versions of gcc.
@@ -398,6 +390,7 @@ class Isolate {
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
simulator_(NULL),
#endif
@@ -405,17 +398,14 @@ class Isolate {
prev_(NULL) { }
Isolate* isolate() const { return isolate_; }
ThreadId thread_id() const { return thread_id_; }
- void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
- uintptr_t stack_limit() const { return stack_limit_; }
- ThreadState* thread_state() const { return thread_state_; }
- void set_thread_state(ThreadState* value) { thread_state_ = value; }
+
+ FIELD_ACCESSOR(uintptr_t, stack_limit)
+ FIELD_ACCESSOR(ThreadState*, thread_state)
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
- Simulator* simulator() const { return simulator_; }
- void set_simulator(Simulator* simulator) {
- simulator_ = simulator;
- }
+ FIELD_ACCESSOR(Simulator*, simulator)
#endif
bool Matches(Isolate* isolate, ThreadId thread_id) const {
@@ -429,6 +419,7 @@ class Isolate {
ThreadState* thread_state_;
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif
@@ -554,38 +545,35 @@ class Isolate {
}
Context** context_address() { return &thread_local_top_.context_; }
- SaveContext* save_context() { return thread_local_top_.save_context_; }
- void set_save_context(SaveContext* save) {
- thread_local_top_.save_context_ = save;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
// Access to current thread id.
- ThreadId thread_id() { return thread_local_top_.thread_id_; }
- void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
+ THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
// Interface to pending exception.
MaybeObject* pending_exception() {
ASSERT(has_pending_exception());
return thread_local_top_.pending_exception_;
}
- bool external_caught_exception() {
- return thread_local_top_.external_caught_exception_;
- }
- void set_external_caught_exception(bool value) {
- thread_local_top_.external_caught_exception_ = value;
- }
+
void set_pending_exception(MaybeObject* exception) {
thread_local_top_.pending_exception_ = exception;
}
+
void clear_pending_exception() {
thread_local_top_.pending_exception_ = heap_.the_hole_value();
}
+
MaybeObject** pending_exception_address() {
return &thread_local_top_.pending_exception_;
}
+
bool has_pending_exception() {
return !thread_local_top_.pending_exception_->IsTheHole();
}
+
+ THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
+
void clear_pending_message() {
thread_local_top_.has_pending_message_ = false;
thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
@@ -600,12 +588,8 @@ class Isolate {
bool* external_caught_exception_address() {
return &thread_local_top_.external_caught_exception_;
}
- v8::TryCatch* catcher() {
- return thread_local_top_.catcher_;
- }
- void set_catcher(v8::TryCatch* catcher) {
- thread_local_top_.catcher_ = catcher;
- }
+
+ THREAD_LOCAL_TOP_ACCESSOR(v8::TryCatch*, catcher)
MaybeObject** scheduled_exception_address() {
return &thread_local_top_.scheduled_exception_;
@@ -721,18 +705,12 @@ class Isolate {
// Tells whether the current context has experienced an out of memory
// exception.
bool is_out_of_memory();
- bool ignore_out_of_memory() {
- return thread_local_top_.ignore_out_of_memory_;
- }
- void set_ignore_out_of_memory(bool value) {
- thread_local_top_.ignore_out_of_memory_ = value;
- }
+
+ THREAD_LOCAL_TOP_ACCESSOR(bool, ignore_out_of_memory)
void PrintCurrentStackTrace(FILE* out);
- void PrintStackTrace(FILE* out, char* thread_data);
void PrintStack(StringStream* accumulator);
void PrintStack(FILE* out);
- void PrintStack();
Handle<String> StackTraceString();
NO_INLINE(void PushStackTraceAndDie(unsigned int magic,
Object* object,
@@ -801,9 +779,6 @@ class Isolate {
// result in the target out parameter.
void ComputeLocation(MessageLocation* target);
- // Override command line flag.
- void TraceException(bool flag);
-
// Out of resource exception helpers.
Failure* StackOverflow();
Failure* TerminateExecution();
@@ -888,9 +863,8 @@ class Isolate {
StubCache* stub_cache() { return stub_cache_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
-
- TranscendentalCache* transcendental_cache() const {
- return transcendental_cache_;
+ MaterializedObjectStore* materialized_object_store() {
+ return materialized_object_store_;
}
MemoryAllocator* memory_allocator() {
@@ -909,9 +883,8 @@ class Isolate {
return descriptor_lookup_cache_;
}
- v8::ImplementationUtilities::HandleScopeData* handle_scope_data() {
- return &handle_scope_data_;
- }
+ HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
+
HandleScopeImplementer* handle_scope_implementer() {
ASSERT(handle_scope_implementer_);
return handle_scope_implementer_;
@@ -934,12 +907,6 @@ class Isolate {
ThreadManager* thread_manager() { return thread_manager_; }
- ContextSwitcher* context_switcher() { return context_switcher_; }
-
- void set_context_switcher(ContextSwitcher* switcher) {
- context_switcher_ = switcher;
- }
-
StringTracker* string_tracker() { return string_tracker_; }
unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
@@ -964,11 +931,7 @@ class Isolate {
RuntimeState* runtime_state() { return &runtime_state_; }
- void set_fp_stubs_generated(bool value) {
- fp_stubs_generated_ = value;
- }
-
- bool fp_stubs_generated() { return fp_stubs_generated_; }
+ FIELD_ACCESSOR(bool, fp_stubs_generated);
Builtins* builtins() { return &builtins_; }
@@ -990,10 +953,6 @@ class Isolate {
return &interp_canonicalize_mapping_;
}
- void* PreallocatedStorageNew(size_t size);
- void PreallocatedStorageDelete(void* p);
- void PreallocatedStorageInit(size_t size);
-
inline bool IsCodePreAgingActive();
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1024,54 +983,32 @@ class Isolate {
#endif
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__)
- bool simulator_initialized() { return simulator_initialized_; }
- void set_simulator_initialized(bool initialized) {
- simulator_initialized_ = initialized;
- }
-
- HashMap* simulator_i_cache() { return simulator_i_cache_; }
- void set_simulator_i_cache(HashMap* hash_map) {
- simulator_i_cache_ = hash_map;
- }
-
- Redirection* simulator_redirection() {
- return simulator_redirection_;
- }
- void set_simulator_redirection(Redirection* redirection) {
- simulator_redirection_ = redirection;
- }
+ FIELD_ACCESSOR(bool, simulator_initialized)
+ FIELD_ACCESSOR(HashMap*, simulator_i_cache)
+ FIELD_ACCESSOR(Redirection*, simulator_redirection)
#endif
Factory* factory() { return reinterpret_cast<Factory*>(this); }
static const int kJSRegexpStaticOffsetsVectorSize = 128;
- ExternalCallbackScope* external_callback_scope() {
- return thread_local_top_.external_callback_scope_;
- }
- void set_external_callback_scope(ExternalCallbackScope* scope) {
- thread_local_top_.external_callback_scope_ = scope;
- }
-
- StateTag current_vm_state() {
- return thread_local_top_.current_vm_state_;
- }
-
- void set_current_vm_state(StateTag state) {
- thread_local_top_.current_vm_state_ = state;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
- void SetData(void* data) { embedder_data_ = data; }
- void* GetData() { return embedder_data_; }
+ THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
- LookupResult* top_lookup_result() {
- return thread_local_top_.top_lookup_result_;
+ void SetData(uint32_t slot, void* data) {
+ ASSERT(slot < Internals::kNumIsolateDataSlots);
+ embedder_data_[slot] = data;
}
- void SetTopLookupResult(LookupResult* top) {
- thread_local_top_.top_lookup_result_ = top;
+ void* GetData(uint32_t slot) {
+ ASSERT(slot < Internals::kNumIsolateDataSlots);
+ return embedder_data_[slot];
}
+ THREAD_LOCAL_TOP_ACCESSOR(LookupResult*, top_lookup_result)
+
bool IsDead() { return has_fatal_error_; }
void SignalFatalError() { has_fatal_error_ = true; }
@@ -1101,6 +1038,17 @@ class Isolate {
CodeStubInterfaceDescriptor*
code_stub_interface_descriptor(int index);
+ enum CallDescriptorKey {
+ KeyedCall,
+ NamedCall,
+ CallHandler,
+ ArgumentAdaptorCall,
+ ApiFunctionCall,
+ NUMBER_OF_CALL_DESCRIPTORS
+ };
+
+ CallInterfaceDescriptor* call_descriptor(CallDescriptorKey index);
+
void IterateDeferredHandles(ObjectVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1109,23 +1057,44 @@ class Isolate {
bool IsDeferredHandle(Object** location);
#endif // DEBUG
+ FIELD_ACCESSOR(int, max_available_threads);
+
+ bool concurrent_recompilation_enabled() {
+ // Thread is only available with flag enabled.
+ ASSERT(optimizing_compiler_thread_ == NULL ||
+ FLAG_concurrent_recompilation);
+ return optimizing_compiler_thread_ != NULL;
+ }
+
+ bool concurrent_osr_enabled() const {
+ // Thread is only available with flag enabled.
+ ASSERT(optimizing_compiler_thread_ == NULL ||
+ FLAG_concurrent_recompilation);
+ return optimizing_compiler_thread_ != NULL && FLAG_concurrent_osr;
+ }
+
OptimizingCompilerThread* optimizing_compiler_thread() {
return optimizing_compiler_thread_;
}
- // PreInits and returns a default isolate. Needed when a new thread tries
- // to create a Locker for the first time (the lock itself is in the isolate).
- // TODO(svenpanne) This method is on death row...
- static v8::Isolate* GetDefaultIsolateForLocking();
+ int num_sweeper_threads() const {
+ return num_sweeper_threads_;
+ }
SweeperThread** sweeper_threads() {
return sweeper_thread_;
}
+ // PreInits and returns a default isolate. Needed when a new thread tries
+ // to create a Locker for the first time (the lock itself is in the isolate).
+ // TODO(svenpanne) This method is on death row...
+ static v8::Isolate* GetDefaultIsolateForLocking();
+
int id() const { return static_cast<int>(id_); }
HStatistics* GetHStatistics();
HTracer* GetHTracer();
+ CodeTracer* GetCodeTracer();
FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
@@ -1139,6 +1108,14 @@ class Isolate {
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
+ int NextOptimizationId() {
+ int id = next_optimization_id_++;
+ if (!Smi::IsValid(next_optimization_id_)) {
+ next_optimization_id_ = 0;
+ }
+ return id;
+ }
+
private:
Isolate();
@@ -1153,9 +1130,9 @@ class Isolate {
// These fields are accessed through the API, offsets must be kept in sync
// with v8::internal::Internals (in include/v8.h) constants. This is also
// verified in Isolate::Init() using runtime checks.
- State state_; // Will be padded to kApiPointerSize.
- void* embedder_data_;
+ void* embedder_data_[Internals::kNumIsolateDataSlots];
Heap heap_;
+ State state_; // Will be padded to kApiPointerSize.
// The per-process lock should be acquired before the ThreadDataTable is
// modified.
@@ -1231,11 +1208,8 @@ class Isolate {
// at the same time, this should be prevented using external locking.
void Exit();
- void PreallocatedMemoryThreadStart();
- void PreallocatedMemoryThreadStop();
void InitializeThreadLocal();
- void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
void MarkCompactPrologue(bool is_compacting,
ThreadLocalTop* archived_thread_data);
void MarkCompactEpilogue(bool is_compacting,
@@ -1255,10 +1229,7 @@ class Isolate {
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
StringStream* incomplete_message_;
- // The preallocated memory thread singleton.
- PreallocatedMemoryThread* preallocated_memory_thread_;
Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
- NoAllocationStringAllocator* preallocated_message_space_;
Bootstrapper* bootstrapper_;
RuntimeProfiler* runtime_profiler_;
CompilationCache* compilation_cache_;
@@ -1272,27 +1243,23 @@ class Isolate {
StatsTable* stats_table_;
StubCache* stub_cache_;
DeoptimizerData* deoptimizer_data_;
+ MaterializedObjectStore* materialized_object_store_;
ThreadLocalTop thread_local_top_;
bool capture_stack_trace_for_uncaught_exceptions_;
int stack_trace_for_uncaught_exceptions_frame_limit_;
StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
- TranscendentalCache* transcendental_cache_;
MemoryAllocator* memory_allocator_;
KeyedLookupCache* keyed_lookup_cache_;
ContextSlotCache* context_slot_cache_;
DescriptorLookupCache* descriptor_lookup_cache_;
- v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
+ HandleScopeData handle_scope_data_;
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
Zone runtime_zone_;
- PreallocatedStorage in_use_list_;
- PreallocatedStorage free_list_;
- bool preallocated_storage_preallocated_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
ConsStringIteratorOp* write_iterator_;
GlobalHandles* global_handles_;
EternalHandles* eternal_handles_;
- ContextSwitcher* context_switcher_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
bool fp_stubs_generated_;
@@ -1310,6 +1277,7 @@ class Isolate {
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
+ CallInterfaceDescriptor* call_descriptors_;
RandomNumberGenerator* random_number_generator_;
// True if fatal error has been signaled for this isolate.
@@ -1325,6 +1293,7 @@ class Isolate {
double time_millis_at_init_;
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__)
bool simulator_initialized_;
HashMap* simulator_i_cache_;
@@ -1370,10 +1339,17 @@ class Isolate {
DeferredHandles* deferred_handles_head_;
OptimizingCompilerThread* optimizing_compiler_thread_;
SweeperThread** sweeper_thread_;
+ int num_sweeper_threads_;
+
+ // TODO(yangguo): This will become obsolete once ResourceConstraints
+ // becomes an argument to Isolate constructor.
+ int max_available_threads_;
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
+ int next_optimization_id_;
+
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
@@ -1393,6 +1369,10 @@ class Isolate {
};
+#undef FIELD_ACCESSOR
+#undef THREAD_LOCAL_TOP_ACCESSOR
+
+
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
@@ -1481,18 +1461,21 @@ class StackLimitCheck BASE_EMBEDDED {
class PostponeInterruptsScope BASE_EMBEDDED {
public:
explicit PostponeInterruptsScope(Isolate* isolate)
- : stack_guard_(isolate->stack_guard()) {
+ : stack_guard_(isolate->stack_guard()), isolate_(isolate) {
+ ExecutionAccess access(isolate_);
stack_guard_->thread_local_.postpone_interrupts_nesting_++;
stack_guard_->DisableInterrupts();
}
~PostponeInterruptsScope() {
+ ExecutionAccess access(isolate_);
if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
stack_guard_->EnableInterrupts();
}
}
private:
StackGuard* stack_guard_;
+ Isolate* isolate_;
};
@@ -1507,6 +1490,73 @@ inline void Context::mark_out_of_memory() {
native_context()->set_out_of_memory(GetIsolate()->heap()->true_value());
}
+class CodeTracer V8_FINAL : public Malloced {
+ public:
+ explicit CodeTracer(int isolate_id)
+ : file_(NULL),
+ scope_depth_(0) {
+ if (!ShouldRedirect()) {
+ file_ = stdout;
+ return;
+ }
+
+ if (FLAG_redirect_code_traces_to == NULL) {
+ OS::SNPrintF(filename_,
+ "code-%d-%d.asm",
+ OS::GetCurrentProcessId(),
+ isolate_id);
+ } else {
+ OS::StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
+ }
+
+ WriteChars(filename_.start(), "", 0, false);
+ }
+
+ class Scope {
+ public:
+ explicit Scope(CodeTracer* tracer) : tracer_(tracer) { tracer->OpenFile(); }
+ ~Scope() { tracer_->CloseFile(); }
+
+ FILE* file() const { return tracer_->file(); }
+
+ private:
+ CodeTracer* tracer_;
+ };
+
+ void OpenFile() {
+ if (!ShouldRedirect()) {
+ return;
+ }
+
+ if (file_ == NULL) {
+ file_ = OS::FOpen(filename_.start(), "a");
+ }
+
+ scope_depth_++;
+ }
+
+ void CloseFile() {
+ if (!ShouldRedirect()) {
+ return;
+ }
+
+ if (--scope_depth_ == 0) {
+ fclose(file_);
+ file_ = NULL;
+ }
+ }
+
+ FILE* file() const { return file_; }
+
+ private:
+ static bool ShouldRedirect() {
+ return FLAG_redirect_code_traces;
+ }
+
+ EmbeddedVector<char, 128> filename_;
+ FILE* file_;
+ int scope_depth_;
+};
} } // namespace v8::internal
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index c21e6351d4..0799deadfe 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -210,6 +210,21 @@ function JSONStringify(value, replacer, space) {
} else {
gap = "";
}
+ if (IS_ARRAY(replacer)) {
+ // Deduplicate replacer array items.
+ var property_list = new InternalArray();
+ var seen_properties = {};
+ var length = replacer.length;
+ for (var i = 0; i < length; i++) {
+ var item = replacer[i];
+ if (IS_NUMBER(item)) item = %_NumberToString(item);
+ if (IS_STRING(item) && !(item in seen_properties)) {
+ property_list.push(item);
+ seen_properties[item] = true;
+ }
+ }
+ replacer = property_list;
+ }
return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
}
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 3a3d91599c..d9057dbf56 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -49,6 +49,8 @@
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/regexp-macro-assembler-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -645,8 +647,8 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
#if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG)
if (FLAG_trace_regexp_bytecodes) {
String* pattern = regexp->Pattern();
- PrintF("\n\nRegexp match: /%s/\n\n", *(pattern->ToCString()));
- PrintF("\n\nSubject string: '%s'\n\n", *(subject->ToCString()));
+ PrintF("\n\nRegexp match: /%s/\n\n", pattern->ToCString().get());
+ PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
}
#endif
int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
@@ -1150,7 +1152,9 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
work_list_ = NULL;
#ifdef DEBUG
if (FLAG_print_code) {
- Handle<Code>::cast(code)->Disassemble(*pattern->ToCString());
+ CodeTracer::Scope trace_scope(heap->isolate()->GetCodeTracer());
+ Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(),
+ trace_scope.file());
}
if (FLAG_trace_regexp_assembler) {
delete macro_assembler_;
@@ -3595,9 +3599,12 @@ class AlternativeGenerationList {
// The '2' variant is has inclusive from and exclusive to.
-static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1, 0x00A0,
- 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B, 0x2028, 0x202A,
- 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001, 0xFEFF, 0xFF00, 0x10000 };
+// This covers \s as defined in ECMA-262 5.1, 15.10.2.12,
+// which include WhiteSpace (7.2) or LineTerminator (7.3) values.
+static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1,
+ 0x00A0, 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B,
+ 0x2028, 0x202A, 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001,
+ 0xFEFF, 0xFF00, 0x10000 };
static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
static const int kWordRanges[] = {
@@ -4372,7 +4379,7 @@ void DotPrinter::PrintNode(const char* label, RegExpNode* node) {
stream()->Add("\"];\n");
Visit(node);
stream()->Add("}\n");
- printf("%s", *(stream()->ToCString()));
+ printf("%s", stream()->ToCString().get());
}
@@ -4667,7 +4674,7 @@ void DispatchTable::Dump() {
StringStream stream(&alloc);
DispatchTableDumper dumper(&stream);
tree()->ForEach(&dumper);
- OS::PrintError("%s", *stream.ToCString());
+ OS::PrintError("%s", stream.ToCString().get());
}
@@ -6083,9 +6090,14 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#elif V8_TARGET_ARCH_A64
+ RegExpMacroAssemblerA64 macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#else
+#error "Unsupported architecture"
#endif
#else // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
new file mode 100644
index 0000000000..1e21ca4e82
--- /dev/null
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -0,0 +1,89 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "default-platform.h"
+
+#include <queue>
+
+// TODO(jochen): We should have our own version of checks.h.
+#include "../checks.h"
+// TODO(jochen): Why is cpu.h not in platform/?
+#include "../cpu.h"
+#include "worker-thread.h"
+
+namespace v8 {
+namespace internal {
+
+
+DefaultPlatform::DefaultPlatform()
+ : initialized_(false), thread_pool_size_(0) {}
+
+
+DefaultPlatform::~DefaultPlatform() {
+ LockGuard<Mutex> guard(&lock_);
+ queue_.Terminate();
+ if (initialized_) {
+ for (std::vector<WorkerThread*>::iterator i = thread_pool_.begin();
+ i != thread_pool_.end(); ++i) {
+ delete *i;
+ }
+ }
+}
+
+
+void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
+ LockGuard<Mutex> guard(&lock_);
+ ASSERT(thread_pool_size >= 0);
+ if (thread_pool_size < 1)
+ thread_pool_size = CPU::NumberOfProcessorsOnline();
+ thread_pool_size_ = Max(Min(thread_pool_size, kMaxThreadPoolSize), 1);
+}
+
+
+void DefaultPlatform::EnsureInitialized() {
+ LockGuard<Mutex> guard(&lock_);
+ if (initialized_) return;
+ initialized_ = true;
+
+ for (int i = 0; i < thread_pool_size_; ++i)
+ thread_pool_.push_back(new WorkerThread(&queue_));
+}
+
+void DefaultPlatform::CallOnBackgroundThread(Task *task,
+ ExpectedRuntime expected_runtime) {
+ EnsureInitialized();
+ queue_.Append(task);
+}
+
+
+void DefaultPlatform::CallOnForegroundThread(v8::Isolate* isolate, Task* task) {
+ // TODO(jochen): implement.
+ task->Run();
+ delete task;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/defaults.cc b/deps/v8/src/libplatform/default-platform.h
index a03cf69b08..877b3a63e7 100644
--- a/deps/v8/src/defaults.cc
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -25,46 +25,53 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// The GYP based build ends up defining USING_V8_SHARED when compiling this
-// file.
-#undef USING_V8_SHARED
-#include "../include/v8-defaults.h"
+#ifndef V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
+#define V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
-#include "platform.h"
-#include "globals.h"
-#include "v8.h"
+#include <vector>
+
+#include "../../include/v8-platform.h"
+// TODO(jochen): We should have our own version of globals.h.
+#include "../globals.h"
+#include "../platform/mutex.h"
+#include "task-queue.h"
namespace v8 {
+namespace internal {
+
+class TaskQueue;
+class Thread;
+class WorkerThread;
+
+class DefaultPlatform : public Platform {
+ public:
+ DefaultPlatform();
+ virtual ~DefaultPlatform();
+
+ void SetThreadPoolSize(int thread_pool_size);
+
+ // v8::Platform implementation.
+ virtual void CallOnBackgroundThread(
+ Task *task, ExpectedRuntime expected_runtime) V8_OVERRIDE;
+ virtual void CallOnForegroundThread(v8::Isolate *isolate,
+ Task *task) V8_OVERRIDE;
+
+ private:
+ static const int kMaxThreadPoolSize = 4;
+
+ void EnsureInitialized();
+
+ Mutex lock_;
+ bool initialized_;
+ int thread_pool_size_;
+ std::vector<WorkerThread*> thread_pool_;
+ TaskQueue queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
+};
+
+
+} } // namespace v8::internal
-bool ConfigureResourceConstraintsForCurrentPlatform(
- ResourceConstraints* constraints) {
- if (constraints == NULL) {
- return false;
- }
-
- int lump_of_memory = (i::kPointerSize / 4) * i::MB;
-
- // The young_space_size should be a power of 2 and old_generation_size should
- // be a multiple of Page::kPageSize.
-#if V8_OS_ANDROID
- constraints->set_max_young_space_size(8 * lump_of_memory);
- constraints->set_max_old_space_size(256 * lump_of_memory);
- constraints->set_max_executable_size(192 * lump_of_memory);
-#else
- constraints->set_max_young_space_size(16 * lump_of_memory);
- constraints->set_max_old_space_size(700 * lump_of_memory);
- constraints->set_max_executable_size(256 * lump_of_memory);
-#endif
- return true;
-}
-
-
-bool SetDefaultResourceConstraintsForCurrentPlatform() {
- ResourceConstraints constraints;
- if (!ConfigureResourceConstraintsForCurrentPlatform(&constraints))
- return false;
- return SetResourceConstraints(&constraints);
-}
-
-} // namespace v8
+#endif // V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
diff --git a/deps/v8/src/libplatform/task-queue.cc b/deps/v8/src/libplatform/task-queue.cc
new file mode 100644
index 0000000000..1ea31eb26e
--- /dev/null
+++ b/deps/v8/src/libplatform/task-queue.cc
@@ -0,0 +1,80 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "task-queue.h"
+
+// TODO(jochen): We should have our own version of checks.h.
+#include "../checks.h"
+
+namespace v8 {
+namespace internal {
+
+TaskQueue::TaskQueue() : process_queue_semaphore_(0), terminated_(false) {}
+
+
+TaskQueue::~TaskQueue() {
+ LockGuard<Mutex> guard(&lock_);
+ ASSERT(terminated_);
+ ASSERT(task_queue_.empty());
+}
+
+
+void TaskQueue::Append(Task* task) {
+ LockGuard<Mutex> guard(&lock_);
+ ASSERT(!terminated_);
+ task_queue_.push(task);
+ process_queue_semaphore_.Signal();
+}
+
+
+Task* TaskQueue::GetNext() {
+ for (;;) {
+ {
+ LockGuard<Mutex> guard(&lock_);
+ if (!task_queue_.empty()) {
+ Task* result = task_queue_.front();
+ task_queue_.pop();
+ return result;
+ }
+ if (terminated_) {
+ process_queue_semaphore_.Signal();
+ return NULL;
+ }
+ }
+ process_queue_semaphore_.Wait();
+ }
+}
+
+
+void TaskQueue::Terminate() {
+ LockGuard<Mutex> guard(&lock_);
+ ASSERT(!terminated_);
+ terminated_ = true;
+ process_queue_semaphore_.Signal();
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/libplatform/task-queue.h b/deps/v8/src/libplatform/task-queue.h
new file mode 100644
index 0000000000..a3182d3531
--- /dev/null
+++ b/deps/v8/src/libplatform/task-queue.h
@@ -0,0 +1,71 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LIBPLATFORM_TASK_QUEUE_H_
+#define V8_LIBPLATFORM_TASK_QUEUE_H_
+
+#include <queue>
+
+// TODO(jochen): We should have our own version of globals.h.
+#include "../globals.h"
+#include "../platform/mutex.h"
+#include "../platform/semaphore.h"
+
+namespace v8 {
+
+class Task;
+
+namespace internal {
+
+class TaskQueue {
+ public:
+ TaskQueue();
+ ~TaskQueue();
+
+ // Appends a task to the queue. The queue takes ownership of |task|.
+ void Append(Task* task);
+
+ // Returns the next task to process. Blocks if no task is available. Returns
+ // NULL if the queue is terminated.
+ Task* GetNext();
+
+ // Terminate the queue.
+ void Terminate();
+
+ private:
+ Mutex lock_;
+ Semaphore process_queue_semaphore_;
+ std::queue<Task*> task_queue_;
+ bool terminated_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskQueue);
+};
+
+} } // namespace v8::internal
+
+
+#endif // V8_LIBPLATFORM_TASK_QUEUE_H_
diff --git a/deps/v8/src/libplatform/worker-thread.cc b/deps/v8/src/libplatform/worker-thread.cc
new file mode 100644
index 0000000000..cca8a9719a
--- /dev/null
+++ b/deps/v8/src/libplatform/worker-thread.cc
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "worker-thread.h"
+
+// TODO(jochen): We should have our own version of checks.h.
+#include "../checks.h"
+#include "../../include/v8-platform.h"
+#include "task-queue.h"
+
+namespace v8 {
+namespace internal {
+
+WorkerThread::WorkerThread(TaskQueue* queue)
+ : Thread("V8 WorkerThread"), queue_(queue) {
+ Start();
+}
+
+
+WorkerThread::~WorkerThread() {
+ Join();
+}
+
+
+void WorkerThread::Run() {
+ while (Task* task = queue_->GetNext()) {
+ task->Run();
+ delete task;
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/libplatform/worker-thread.h b/deps/v8/src/libplatform/worker-thread.h
new file mode 100644
index 0000000000..f0b9019f59
--- /dev/null
+++ b/deps/v8/src/libplatform/worker-thread.h
@@ -0,0 +1,62 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LIBPLATFORM_WORKER_THREAD_H_
+#define V8_LIBPLATFORM_WORKER_THREAD_H_
+
+#include <queue>
+
+// TODO(jochen): We should have our own version of globals.h.
+#include "../globals.h"
+#include "../platform.h"
+
+namespace v8 {
+
+namespace internal {
+
+class TaskQueue;
+
+class WorkerThread : public Thread {
+ public:
+ explicit WorkerThread(TaskQueue* queue);
+ virtual ~WorkerThread();
+
+ // Thread implementation.
+ virtual void Run() V8_OVERRIDE;
+
+ private:
+ friend class QuitTask;
+
+ TaskQueue* queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(WorkerThread);
+};
+
+} } // namespace v8::internal
+
+
+#endif // V8_LIBPLATFORM_WORKER_THREAD_H_
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index 143c830ee9..a80aa67434 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -166,6 +166,7 @@ void List<T, P>::Clear() {
template<typename T, class P>
void List<T, P>::Rewind(int pos) {
+ ASSERT(0 <= pos && pos <= length_);
length_ = pos;
}
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 41666deb26..71aa821959 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -91,6 +91,10 @@ class List {
inline T& last() const { return at(length_ - 1); }
inline T& first() const { return at(0); }
+ typedef T* iterator;
+ inline iterator begin() const { return &data_[0]; }
+ inline iterator end() const { return &data_[length_]; }
+
INLINE(bool is_empty() const) { return length_ == 0; }
INLINE(int length() const) { return length_; }
INLINE(int capacity() const) { return capacity_; }
@@ -196,12 +200,23 @@ class List {
DISALLOW_COPY_AND_ASSIGN(List);
};
+
+template<typename T, class P>
+size_t GetMemoryUsedByList(const List<T, P>& list) {
+ return list.length() * sizeof(T) + sizeof(list);
+}
+
+
class Map;
+template<class> class TypeImpl;
+struct HeapTypeConfig;
+typedef TypeImpl<HeapTypeConfig> HeapType;
class Code;
template<typename T> class Handle;
typedef List<Map*> MapList;
typedef List<Code*> CodeList;
typedef List<Handle<Map> > MapHandleList;
+typedef List<Handle<HeapType> > TypeHandleList;
typedef List<Handle<Code> > CodeHandleList;
// Perform binary search for an element in an already sorted
diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/lithium-allocator-inl.h
index deee98877d..1d43b269f9 100644
--- a/deps/v8/src/lithium-allocator-inl.h
+++ b/deps/v8/src/lithium-allocator-inl.h
@@ -34,6 +34,8 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 29c31942e4..eae2995695 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -35,6 +35,8 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -1369,7 +1371,7 @@ void LAllocator::BuildLiveRanges() {
ASSERT(chunk_->info()->IsOptimizing());
AllowHandleDereference allow_deref;
PrintF("Function: %s\n",
- *chunk_->info()->function()->debug_name()->ToCString());
+ chunk_->info()->function()->debug_name()->ToCString().get());
}
PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index);
diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/lithium-codegen.cc
index 19ebe7e516..9eecedc2f0 100644
--- a/deps/v8/src/lithium-codegen.cc
+++ b/deps/v8/src/lithium-codegen.cc
@@ -38,6 +38,9 @@
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/lithium-a64.h"
+#include "a64/lithium-codegen-a64.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
@@ -104,11 +107,9 @@ bool LCodeGenBase::GenerateBody() {
GenerateBodyInstructionPre(instr);
HValue* value = instr->hydrogen_value();
- if (value->position() != RelocInfo::kNoPosition) {
- ASSERT(!graph()->info()->IsOptimizing() ||
- !FLAG_emit_opt_code_positions ||
- value->position() != RelocInfo::kNoPosition);
- RecordAndWritePosition(value->position());
+ if (!value->position().IsUnknown()) {
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
}
instr->CompileToNative(codegen);
@@ -147,4 +148,48 @@ int LCodeGenBase::GetNextEmittedBlock() const {
}
+void LCodeGenBase::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+ ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
+ ZoneList<Handle<Cell> > cells(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::CELL &&
+ Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_cell())) {
+ Handle<Cell> cell(it.rinfo()->target_cell());
+ cells.Add(cell, zone());
+ } else if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
+ } else if (it.rinfo()->target_object()->IsCell()) {
+ Handle<Cell> cell(Cell::cast(it.rinfo()->target_object()));
+ cells.Add(cell, zone());
+ }
+ }
+ }
+#ifdef VERIFY_HEAP
+ // This disables verification of weak embedded objects after full GC.
+ // AddDependentCode can cause a GC, which would observe the state where
+ // this code is not yet in the depended code lists of the embedded maps.
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
+#endif
+ for (int i = 0; i < maps.length(); i++) {
+ maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
+ }
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
+ for (int i = 0; i < cells.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), cells.at(i), code);
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/lithium-codegen.h b/deps/v8/src/lithium-codegen.h
index 9caab8127d..f6806781de 100644
--- a/deps/v8/src/lithium-codegen.h
+++ b/deps/v8/src/lithium-codegen.h
@@ -66,6 +66,8 @@ class LCodeGenBase BASE_EMBEDDED {
int GetNextEmittedBlock() const;
+ void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
+
protected:
enum Status {
UNUSED,
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index b66a64f90d..ab1e630c70 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -41,6 +41,9 @@
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/lithium-a64.h"
+#include "a64/lithium-codegen-a64.h"
#else
#error "Unknown architecture."
#endif
@@ -237,7 +240,8 @@ int StackSlotOffset(int index) {
if (index >= 0) {
// Local or spill slot. Skip the frame pointer, function, and
// context in the fixed part of the frame.
- return -(index + 3) * kPointerSize;
+ return -(index + 1) * kPointerSize -
+ StandardFrameConstants::kFixedFrameSizeFromFp;
} else {
// Incoming parameter. Skip the return address.
return -(index + 1) * kPointerSize + kFPOnStackSize + kPCOnStackSize;
@@ -342,7 +346,8 @@ int LChunk::GetParameterStackSlot(int index) const {
// shift all parameter indexes down by the number of parameters, and
// make sure they end up negative so they are distinguishable from
// spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
+ int result = index - info()->num_parameters() - 1;
+
ASSERT(result < 0);
return result;
}
@@ -461,6 +466,139 @@ void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
}
+LEnvironment* LChunkBuilderBase::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize) {
+ if (hydrogen_env == NULL) return NULL;
+
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
+ argument_index_accumulator,
+ objects_to_materialize);
+ BailoutId ast_id = hydrogen_env->ast_id();
+ ASSERT(!ast_id.IsNone() ||
+ hydrogen_env->frame_type() != JS_FUNCTION);
+ int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
+ LEnvironment* result =
+ new(zone()) LEnvironment(hydrogen_env->closure(),
+ hydrogen_env->frame_type(),
+ ast_id,
+ hydrogen_env->parameter_count(),
+ argument_count_,
+ value_count,
+ outer,
+ hydrogen_env->entry(),
+ zone());
+ int argument_index = *argument_index_accumulator;
+
+ // Store the environment description into the environment
+ // (with holes for nested objects)
+ for (int i = 0; i < hydrogen_env->length(); ++i) {
+ if (hydrogen_env->is_special_index(i)) continue;
+
+ LOperand* op;
+ HValue* value = hydrogen_env->values()->at(i);
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ op = LEnvironment::materialization_marker();
+ } else if (value->IsPushArgument()) {
+ op = new(zone()) LArgument(argument_index++);
+ } else {
+ op = UseAny(value);
+ }
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
+ }
+
+ // Recursively store the nested objects into the environment
+ for (int i = 0; i < hydrogen_env->length(); ++i) {
+ if (hydrogen_env->is_special_index(i)) continue;
+
+ HValue* value = hydrogen_env->values()->at(i);
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ AddObjectToMaterialize(value, objects_to_materialize, result);
+ }
+ }
+
+ if (hydrogen_env->frame_type() == JS_FUNCTION) {
+ *argument_index_accumulator = argument_index;
+ }
+
+ return result;
+}
+
+
+// Add an object to the supplied environment and object materialization list.
+//
+// Notes:
+//
+// We are building three lists here:
+//
+// 1. In the result->object_mapping_ list (added to by the
+// LEnvironment::Add*Object methods), we store the lengths (number
+// of fields) of the captured objects in depth-first traversal order, or
+// in case of duplicated objects, we store the index to the duplicate object
+// (with a tag to differentiate between captured and duplicated objects).
+//
+// 2. The object fields are stored in the result->values_ list
+// (added to by the LEnvironment.AddValue method) sequentially as lists
+// of fields with holes for nested objects (the holes will be expanded
+// later by LCodegen::AddToTranslation according to the
+// LEnvironment.object_mapping_ list).
+//
+// 3. The auxiliary objects_to_materialize array stores the hydrogen values
+// in the same order as result->object_mapping_ list. This is used
+// to detect duplicate values and calculate the corresponding object index.
+void LChunkBuilderBase::AddObjectToMaterialize(HValue* value,
+ ZoneList<HValue*>* objects_to_materialize, LEnvironment* result) {
+ int object_index = objects_to_materialize->length();
+ // Store the hydrogen value into the de-duplication array
+ objects_to_materialize->Add(value, zone());
+ // Find out whether we are storing a duplicated value
+ int previously_materialized_object = -1;
+ for (int prev = 0; prev < object_index; ++prev) {
+ if (objects_to_materialize->at(prev) == value) {
+ previously_materialized_object = prev;
+ break;
+ }
+ }
+ // Store the captured object length (or duplicated object index)
+ // into the environment. For duplicated objects, we stop here.
+ int length = value->OperandCount();
+ bool is_arguments = value->IsArgumentsObject();
+ if (previously_materialized_object >= 0) {
+ result->AddDuplicateObject(previously_materialized_object);
+ return;
+ } else {
+ result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ }
+ // Store the captured object's fields into the environment
+ for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+ LOperand* op;
+ HValue* arg_value = value->OperandAt(i);
+ if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
+ // Insert a hole for nested objects
+ op = LEnvironment::materialization_marker();
+ } else {
+ ASSERT(!arg_value->IsPushArgument());
+ // For ordinary values, tell the register allocator we need the value
+ // to be alive here
+ op = UseAny(arg_value);
+ }
+ result->AddValue(op,
+ arg_value->representation(),
+ arg_value->CheckFlag(HInstruction::kUint32));
+ }
+ // Recursively store all the nested captured objects into the environment
+ for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+ HValue* arg_value = value->OperandAt(i);
+ if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
+ AddObjectToMaterialize(arg_value, objects_to_materialize, result);
+ }
+ }
+}
+
+
LInstruction* LChunkBuilder::CheckElideControlInstruction(
HControlInstruction* instr) {
HBasicBlock* successor;
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index d4395f2d7e..754f88da82 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -791,6 +791,35 @@ class LChunk : public ZoneObject {
};
+class LChunkBuilderBase BASE_EMBEDDED {
+ public:
+ explicit LChunkBuilderBase(Zone* zone)
+ : argument_count_(0),
+ zone_(zone) { }
+
+ virtual ~LChunkBuilderBase() { }
+
+ protected:
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) = 0;
+
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize);
+ void AddObjectToMaterialize(HValue* value,
+ ZoneList<HValue*>* objects_to_materialize,
+ LEnvironment* result);
+
+ Zone* zone() const { return zone_; }
+
+ int argument_count_;
+
+ private:
+ Zone* zone_;
+};
+
+
int StackSlotOffset(int index);
enum NumberUntagDMode {
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 3d459d4ffb..002e062436 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -602,27 +602,6 @@ Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1,
}
-static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
- // TODO(635): support extensions.
- PostponeInterruptsScope postpone(isolate);
-
- // Build AST.
- CompilationInfoWithZone info(script);
- info.MarkAsGlobal();
- // Parse and don't allow skipping lazy functions.
- if (Parser::Parse(&info)) {
- // Compile the code.
- LiveEditFunctionTracker tracker(info.isolate(), info.function());
- if (Compiler::MakeCodeForLiveEdit(&info)) {
- ASSERT(!info.code().is_null());
- tracker.RecordRootFunctionInfo(info.code());
- } else {
- info.isolate()->StackOverflow();
- }
- }
-}
-
-
// Unwraps JSValue object, returning its field "value"
static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
return Handle<Object>(jsValue->value(), jsValue->GetIsolate());
@@ -951,7 +930,7 @@ JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
try_catch.SetVerbose(true);
// A logical 'try' section.
- CompileScriptForTracker(isolate, script);
+ Compiler::CompileForLiveEdit(script);
}
// A logical 'catch' section.
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index ec8415e4b6..f1a21e2cc1 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -47,7 +47,8 @@ class Log {
static bool InitLogAtStart() {
return FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_ll_prof || FLAG_log_internal_timer_events;
+ || FLAG_log_regexp || FLAG_ll_prof || FLAG_perf_basic_prof
+ || FLAG_perf_jit_prof || FLAG_log_internal_timer_events;
}
// Frees all resources acquired in Initialize and Open... functions.
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index b353f548fb..1c332d1736 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -246,6 +246,231 @@ void CodeEventLogger::RegExpCodeCreateEvent(Code* code, String* source) {
}
+// Linux perf tool logging support
+class PerfBasicLogger : public CodeEventLogger {
+ public:
+ PerfBasicLogger();
+ virtual ~PerfBasicLogger();
+
+ virtual void CodeMoveEvent(Address from, Address to) { }
+ virtual void CodeDeleteEvent(Address from) { }
+
+ private:
+ virtual void LogRecordedBuffer(Code* code,
+ SharedFunctionInfo* shared,
+ const char* name,
+ int length);
+
+ // Extension added to V8 log file name to get the low-level log name.
+ static const char kFilenameFormatString[];
+ static const int kFilenameBufferPadding;
+
+ // File buffer size of the low-level log. We don't use the default to
+ // minimize the associated overhead.
+ static const int kLogBufferSize = 2 * MB;
+
+ FILE* perf_output_handle_;
+};
+
+const char PerfBasicLogger::kFilenameFormatString[] = "/tmp/perf-%d.map";
+// Extra space for the PID in the filename
+const int PerfBasicLogger::kFilenameBufferPadding = 16;
+
+PerfBasicLogger::PerfBasicLogger()
+ : perf_output_handle_(NULL) {
+ // Open the perf JIT dump file.
+ int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
+ ScopedVector<char> perf_dump_name(bufferSize);
+ int size = OS::SNPrintF(
+ perf_dump_name,
+ kFilenameFormatString,
+ OS::GetCurrentProcessId());
+ CHECK_NE(size, -1);
+ perf_output_handle_ = OS::FOpen(perf_dump_name.start(), OS::LogFileOpenMode);
+ CHECK_NE(perf_output_handle_, NULL);
+ setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
+}
+
+
+PerfBasicLogger::~PerfBasicLogger() {
+ fclose(perf_output_handle_);
+ perf_output_handle_ = NULL;
+}
+
+
+void PerfBasicLogger::LogRecordedBuffer(Code* code,
+ SharedFunctionInfo*,
+ const char* name,
+ int length) {
+ ASSERT(code->instruction_start() == code->address() + Code::kHeaderSize);
+
+ OS::FPrint(perf_output_handle_, "%llx %x %.*s\n",
+ reinterpret_cast<uint64_t>(code->instruction_start()),
+ code->instruction_size(),
+ length, name);
+}
+
+
+// Linux perf tool logging support
+class PerfJitLogger : public CodeEventLogger {
+ public:
+ PerfJitLogger();
+ virtual ~PerfJitLogger();
+
+ virtual void CodeMoveEvent(Address from, Address to) { }
+ virtual void CodeDeleteEvent(Address from) { }
+
+ private:
+ virtual void LogRecordedBuffer(Code* code,
+ SharedFunctionInfo* shared,
+ const char* name,
+ int length);
+
+ // Extension added to V8 log file name to get the low-level log name.
+ static const char kFilenameFormatString[];
+ static const int kFilenameBufferPadding;
+
+ // File buffer size of the low-level log. We don't use the default to
+ // minimize the associated overhead.
+ static const int kLogBufferSize = 2 * MB;
+
+ void LogWriteBytes(const char* bytes, int size);
+ void LogWriteHeader();
+
+ static const uint32_t kJitHeaderMagic = 0x4F74496A;
+ static const uint32_t kJitHeaderVersion = 0x2;
+ static const uint32_t kElfMachIA32 = 3;
+ static const uint32_t kElfMachX64 = 62;
+ static const uint32_t kElfMachARM = 40;
+ static const uint32_t kElfMachMIPS = 10;
+
+ struct jitheader {
+ uint32_t magic;
+ uint32_t version;
+ uint32_t total_size;
+ uint32_t elf_mach;
+ uint32_t pad1;
+ uint32_t pid;
+ uint64_t timestamp;
+ };
+
+ enum jit_record_type {
+ JIT_CODE_LOAD = 0
+ // JIT_CODE_UNLOAD = 1,
+ // JIT_CODE_CLOSE = 2,
+ // JIT_CODE_DEBUG_INFO = 3,
+ // JIT_CODE_PAGE_MAP = 4,
+ // JIT_CODE_MAX = 5
+ };
+
+ struct jr_code_load {
+ uint32_t id;
+ uint32_t total_size;
+ uint64_t timestamp;
+ uint64_t vma;
+ uint64_t code_addr;
+ uint32_t code_size;
+ uint32_t align;
+ };
+
+ uint32_t GetElfMach() {
+#if V8_TARGET_ARCH_IA32
+ return kElfMachIA32;
+#elif V8_TARGET_ARCH_X64
+ return kElfMachX64;
+#elif V8_TARGET_ARCH_ARM
+ return kElfMachARM;
+#elif V8_TARGET_ARCH_MIPS
+ return kElfMachMIPS;
+#else
+ UNIMPLEMENTED();
+ return 0;
+#endif
+ }
+
+ FILE* perf_output_handle_;
+};
+
+const char PerfJitLogger::kFilenameFormatString[] = "/tmp/jit-%d.dump";
+
+// Extra padding for the PID in the filename
+const int PerfJitLogger::kFilenameBufferPadding = 16;
+
+PerfJitLogger::PerfJitLogger()
+ : perf_output_handle_(NULL) {
+ // Open the perf JIT dump file.
+ int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
+ ScopedVector<char> perf_dump_name(bufferSize);
+ int size = OS::SNPrintF(
+ perf_dump_name,
+ kFilenameFormatString,
+ OS::GetCurrentProcessId());
+ CHECK_NE(size, -1);
+ perf_output_handle_ = OS::FOpen(perf_dump_name.start(), OS::LogFileOpenMode);
+ CHECK_NE(perf_output_handle_, NULL);
+ setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
+
+ LogWriteHeader();
+}
+
+
+PerfJitLogger::~PerfJitLogger() {
+ fclose(perf_output_handle_);
+ perf_output_handle_ = NULL;
+}
+
+
+void PerfJitLogger::LogRecordedBuffer(Code* code,
+ SharedFunctionInfo*,
+ const char* name,
+ int length) {
+ ASSERT(code->instruction_start() == code->address() + Code::kHeaderSize);
+ ASSERT(perf_output_handle_ != NULL);
+
+ const char* code_name = name;
+ uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->instruction_start());
+ uint32_t code_size = code->instruction_size();
+
+ static const char string_terminator[] = "\0";
+
+ jr_code_load code_load;
+ code_load.id = JIT_CODE_LOAD;
+ code_load.total_size = sizeof(code_load) + length + 1 + code_size;
+ code_load.timestamp =
+ static_cast<uint64_t>(OS::TimeCurrentMillis() * 1000.0);
+ code_load.vma = 0x0; // Our addresses are absolute.
+ code_load.code_addr = reinterpret_cast<uint64_t>(code->instruction_start());
+ code_load.code_size = code_size;
+ code_load.align = 0;
+
+ LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load));
+ LogWriteBytes(code_name, length);
+ LogWriteBytes(string_terminator, 1);
+ LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
+}
+
+
+void PerfJitLogger::LogWriteBytes(const char* bytes, int size) {
+ size_t rv = fwrite(bytes, 1, size, perf_output_handle_);
+ ASSERT(static_cast<size_t>(size) == rv);
+ USE(rv);
+}
+
+
+void PerfJitLogger::LogWriteHeader() {
+ ASSERT(perf_output_handle_ != NULL);
+ jitheader header;
+ header.magic = kJitHeaderMagic;
+ header.version = kJitHeaderVersion;
+ header.total_size = sizeof(jitheader);
+ header.pad1 = 0xdeadbeef;
+ header.elf_mach = GetElfMach();
+ header.pid = OS::GetCurrentProcessId();
+ header.timestamp = static_cast<uint64_t>(OS::TimeCurrentMillis() * 1000.0);
+ LogWriteBytes(reinterpret_cast<const char*>(&header), sizeof(header));
+}
+
+
// Low-level logging support.
#define LL_LOG(Call) if (ll_logger_) ll_logger_->Call;
@@ -711,6 +936,8 @@ Logger::Logger(Isolate* isolate)
log_events_(NULL),
is_logging_(false),
log_(new Log(this)),
+ perf_basic_logger_(NULL),
+ perf_jit_logger_(NULL),
ll_logger_(NULL),
jit_logger_(NULL),
listeners_(5),
@@ -814,7 +1041,7 @@ void Logger::ApiNamedSecurityCheck(Object* key) {
if (key->IsString()) {
SmartArrayPointer<char> str =
String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,check-security,\"%s\"\n", *str);
+ ApiEvent("api,check-security,\"%s\"\n", str.get());
} else if (key->IsSymbol()) {
Symbol* symbol = Symbol::cast(key);
if (symbol->name()->IsUndefined()) {
@@ -824,7 +1051,7 @@ void Logger::ApiNamedSecurityCheck(Object* key) {
SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,check-security,symbol(\"%s\" hash %x)\n",
- *str,
+ str.get(),
Symbol::cast(key)->Hash());
}
} else if (key->IsUndefined()) {
@@ -1026,17 +1253,18 @@ void Logger::ApiNamedPropertyAccess(const char* tag,
if (name->IsString()) {
SmartArrayPointer<char> property_name =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
+ ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, class_name.get(),
+ property_name.get());
} else {
Symbol* symbol = Symbol::cast(name);
uint32_t hash = symbol->Hash();
if (symbol->name()->IsUndefined()) {
- ApiEvent("api,%s,\"%s\",symbol(hash %x)\n", tag, *class_name, hash);
+ ApiEvent("api,%s,\"%s\",symbol(hash %x)\n", tag, class_name.get(), hash);
} else {
SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",symbol(\"%s\" hash %x)\n",
- tag, *class_name, *str, hash);
+ tag, class_name.get(), str.get(), hash);
}
}
}
@@ -1048,7 +1276,7 @@ void Logger::ApiIndexedPropertyAccess(const char* tag,
String* class_name_obj = holder->class_name();
SmartArrayPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
+ ApiEvent("api,%s,\"%s\",%u\n", tag, class_name.get(), index);
}
@@ -1057,7 +1285,7 @@ void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
String* class_name_obj = object->class_name();
SmartArrayPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
+ ApiEvent("api,%s,\"%s\"\n", tag, class_name.get());
}
@@ -1105,7 +1333,7 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
if (name->IsString()) {
SmartArrayPointer<char> str =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append(",1,\"%s%s\"", prefix, *str);
+ msg.Append(",1,\"%s%s\"", prefix, str.get());
} else {
Symbol* symbol = Symbol::cast(name);
if (symbol->name()->IsUndefined()) {
@@ -1113,7 +1341,8 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
} else {
SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append(",1,symbol(\"%s\" hash %x)", prefix, *str, symbol->Hash());
+ msg.Append(",1,symbol(\"%s\" hash %x)", prefix, str.get(),
+ symbol->Hash());
}
}
msg.Append('\n');
@@ -1203,8 +1432,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, name));
if (!FLAG_log_code || !log_->IsEnabled()) return;
- if (code == isolate_->builtins()->builtin(
- Builtins::kLazyCompile))
+ if (code == isolate_->builtins()->builtin(Builtins::kCompileUnoptimized))
return;
Log::MessageBuilder msg(log_);
@@ -1212,7 +1440,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
if (name->IsString()) {
SmartArrayPointer<char> str =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("\"%s\"", *str);
+ msg.Append("\"%s\"", str.get());
} else {
msg.AppendSymbolName(Symbol::cast(name));
}
@@ -1243,11 +1471,11 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
AppendCodeCreateHeader(&msg, tag, code);
SmartArrayPointer<char> name =
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("\"%s ", *name);
+ msg.Append("\"%s ", name.get());
if (source->IsString()) {
SmartArrayPointer<char> sourcestr =
String::cast(source)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s", *sourcestr);
+ msg.Append("%s", sourcestr.get());
} else {
msg.AppendSymbolName(Symbol::cast(source));
}
@@ -1611,12 +1839,7 @@ void Logger::LogCodeObject(Object* object) {
case Code::FUNCTION:
case Code::OPTIMIZED_FUNCTION:
return; // We log this later using LogCompiledFunctions.
- case Code::BINARY_OP_IC: {
- BinaryOpStub stub(code_object->extended_extra_ic_state());
- description = stub.GetName().Detach();
- tag = Logger::STUB_TAG;
- break;
- }
+ case Code::BINARY_OP_IC:
case Code::COMPARE_IC: // fall through
case Code::COMPARE_NIL_IC: // fall through
case Code::TO_BOOLEAN_IC: // fall through
@@ -1655,14 +1878,6 @@ void Logger::LogCodeObject(Object* object) {
description = "A keyed store IC from the snapshot";
tag = Logger::KEYED_STORE_IC_TAG;
break;
- case Code::CALL_IC:
- description = "A call IC from the snapshot";
- tag = Logger::CALL_IC_TAG;
- break;
- case Code::KEYED_CALL_IC:
- description = "A keyed call IC from the snapshot";
- tag = Logger::KEYED_CALL_IC_TAG;
- break;
case Code::NUMBER_OF_KINDS:
break;
}
@@ -1743,8 +1958,8 @@ void Logger::LogCompiledFunctions() {
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
- if (*code_objects[i] == isolate_->builtins()->builtin(
- Builtins::kLazyCompile))
+ if (code_objects[i].is_identical_to(
+ isolate_->builtins()->CompileUnoptimized()))
continue;
LogExistingFunction(sfis[i], code_objects[i]);
}
@@ -1842,10 +2057,21 @@ bool Logger::SetUp(Isolate* isolate) {
SmartArrayPointer<const char> log_file_name =
PrepareLogFileName(isolate, FLAG_logfile);
- log_->Initialize(*log_file_name);
+ log_->Initialize(log_file_name.get());
+
+
+ if (FLAG_perf_basic_prof) {
+ perf_basic_logger_ = new PerfBasicLogger();
+ addCodeEventListener(perf_basic_logger_);
+ }
+
+ if (FLAG_perf_jit_prof) {
+ perf_jit_logger_ = new PerfJitLogger();
+ addCodeEventListener(perf_jit_logger_);
+ }
if (FLAG_ll_prof) {
- ll_logger_ = new LowLevelLogger(*log_file_name);
+ ll_logger_ = new LowLevelLogger(log_file_name.get());
addCodeEventListener(ll_logger_);
}
@@ -1906,6 +2132,18 @@ FILE* Logger::TearDown() {
delete ticker_;
ticker_ = NULL;
+ if (perf_basic_logger_) {
+ removeCodeEventListener(perf_basic_logger_);
+ delete perf_basic_logger_;
+ perf_basic_logger_ = NULL;
+ }
+
+ if (perf_jit_logger_) {
+ removeCodeEventListener(perf_jit_logger_);
+ delete perf_jit_logger_;
+ perf_jit_logger_ = NULL;
+ }
+
if (ll_logger_) {
removeCodeEventListener(ll_logger_);
delete ll_logger_;
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index c0efd6504d..d4dc76a21c 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -113,16 +113,21 @@ struct TickSample;
V(BUILTIN_TAG, "Builtin") \
V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
- V(CALL_IC_TAG, "CallIC") \
V(CALL_INITIALIZE_TAG, "CallInitialize") \
V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
V(CALL_MISS_TAG, "CallMiss") \
V(CALL_NORMAL_TAG, "CallNormal") \
V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic") \
+ V(LOAD_INITIALIZE_TAG, "LoadInitialize") \
+ V(LOAD_PREMONOMORPHIC_TAG, "LoadPreMonomorphic") \
+ V(LOAD_MEGAMORPHIC_TAG, "LoadMegamorphic") \
+ V(STORE_INITIALIZE_TAG, "StoreInitialize") \
+ V(STORE_PREMONOMORPHIC_TAG, "StorePreMonomorphic") \
+ V(STORE_GENERIC_TAG, "StoreGeneric") \
+ V(STORE_MEGAMORPHIC_TAG, "StoreMegamorphic") \
V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, \
"KeyedCallDebugPrepareStepIn") \
- V(KEYED_CALL_IC_TAG, "KeyedCallIC") \
V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
@@ -154,7 +159,9 @@ struct TickSample;
class JitLogger;
+class PerfBasicLogger;
class LowLevelLogger;
+class PerfJitLogger;
class Sampler;
class Logger {
@@ -437,6 +444,8 @@ class Logger {
bool is_logging_;
Log* log_;
+ PerfBasicLogger* perf_basic_logger_;
+ PerfJitLogger* perf_jit_logger_;
LowLevelLogger* ll_logger_;
JitLogger* jit_logger_;
List<CodeEventListener*> listeners_;
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 9fdf2ee7d8..230c68ab0e 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -72,6 +72,14 @@ const int kInvalidProtoDepth = -1;
#include "x64/assembler-x64-inl.h"
#include "code.h" // must be after assembler_*.h
#include "x64/macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/constants-a64.h"
+#include "assembler.h"
+#include "a64/assembler-a64.h"
+#include "a64/assembler-a64-inl.h"
+#include "code.h" // must be after assembler_*.h
+#include "a64/macro-assembler-a64.h"
+#include "a64/macro-assembler-a64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#include "assembler.h"
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 1785d44a8c..1722c6c7de 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -139,6 +139,10 @@ macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# we cannot handle those anyway.
macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
+# Macro for ES6 CheckObjectCoercible
+# Will throw a TypeError of the form "[functionName] called on null or undefined".
+macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL_OR_UNDEFINED(arg) && !IS_UNDETECTABLE(arg)) throw MakeTypeError('called_on_null_or_undefined', [functionName]);
+
# Indices in bound function info retrieved by %BoundFunctionGetBindings(...).
const kBoundFunctionIndex = 0;
const kBoundThisIndex = 1;
@@ -157,6 +161,14 @@ macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber
macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
+# Private names.
+macro NEW_PRIVATE(name) = (%CreatePrivateSymbol(name));
+macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
+macro HAS_PRIVATE(obj, sym) = (sym in obj);
+macro GET_PRIVATE(obj, sym) = (obj[sym]);
+macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
+macro DELETE_PRIVATE(obj, sym) = (delete obj[sym]);
+
# Constants. The compiler constant folds them.
const NAN = $NaN;
const INFINITY = (1/0);
@@ -253,3 +265,9 @@ const COMPILATION_TYPE_JSON = 2;
# Matches Messages::kNoLineNumberInfo from v8.h
const kNoLineNumberInfo = 0;
+
+# Matches PropertyAttributes from property-details.h
+const PROPERTY_ATTRIBUTES_NONE = 0;
+const PROPERTY_ATTRIBUTES_STRING = 8;
+const PROPERTY_ATTRIBUTES_SYMBOLIC = 16;
+const PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL = 32;
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 4494b75332..26a1a960c8 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -56,7 +56,7 @@ const char* Marking::kImpossibleBitPattern = "01";
// -------------------------------------------------------------------------
// MarkCompactCollector
-MarkCompactCollector::MarkCompactCollector() : // NOLINT
+MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
#ifdef DEBUG
state_(IDLE),
#endif
@@ -67,10 +67,11 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
compacting_(false),
was_marked_incrementally_(false),
sweeping_pending_(false),
+ pending_sweeper_jobs_semaphore_(0),
sequential_sweeping_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
- heap_(NULL),
+ heap_(heap),
code_flusher_(NULL),
encountered_weak_collections_(NULL),
have_code_to_deoptimize_(false) { }
@@ -93,7 +94,16 @@ class VerifyMarkingVisitor: public ObjectVisitor {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(),
rinfo->target_object())) {
- VisitPointer(rinfo->target_object_address());
+ Object* p = rinfo->target_object();
+ VisitPointer(&p);
+ }
+ }
+
+ void VisitCell(RelocInfo* rinfo) {
+ Code* code = rinfo->host();
+ ASSERT(rinfo->rmode() == RelocInfo::CELL);
+ if (!Code::IsWeakEmbeddedObject(code->kind(), rinfo->target_cell())) {
+ ObjectVisitor::VisitCell(rinfo);
}
}
@@ -339,6 +349,12 @@ static void VerifyNativeContextSeparation(Heap* heap) {
#endif
+void MarkCompactCollector::SetUp() {
+ free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
+ free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
+}
+
+
void MarkCompactCollector::TearDown() {
AbortCompaction();
}
@@ -405,8 +421,6 @@ void MarkCompactCollector::CollectGarbage() {
ASSERT(state_ == PREPARE_GC);
ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
- heap()->allocation_mementos_found_ = 0;
-
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
@@ -448,11 +462,6 @@ void MarkCompactCollector::CollectGarbage() {
marking_parity_ = EVEN_MARKING_PARITY;
}
- if (FLAG_trace_track_allocation_sites &&
- heap()->allocation_mementos_found_ > 0) {
- PrintF("AllocationMementos found during mark-sweep = %d\n",
- heap()->allocation_mementos_found_);
- }
tracer_ = NULL;
}
@@ -561,33 +570,81 @@ void MarkCompactCollector::ClearMarkbits() {
}
+class MarkCompactCollector::SweeperTask : public v8::Task {
+ public:
+ SweeperTask(Heap* heap, PagedSpace* space)
+ : heap_(heap), space_(space) {}
+
+ virtual ~SweeperTask() {}
+
+ private:
+ // v8::Task overrides.
+ virtual void Run() V8_OVERRIDE {
+ heap_->mark_compact_collector()->SweepInParallel(space_);
+ heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
+ }
+
+ Heap* heap_;
+ PagedSpace* space_;
+
+ DISALLOW_COPY_AND_ASSIGN(SweeperTask);
+};
+
+
void MarkCompactCollector::StartSweeperThreads() {
+ // TODO(hpayer): This check is just used for debugging purpose and
+ // should be removed or turned into an assert after investigating the
+ // crash in concurrent sweeping.
+ CHECK(free_list_old_pointer_space_.get()->IsEmpty());
+ CHECK(free_list_old_data_space_.get()->IsEmpty());
sweeping_pending_ = true;
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->StartSweeping();
}
+ if (FLAG_job_based_sweeping) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(heap(), heap()->old_data_space()),
+ v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(heap(), heap()->old_pointer_space()),
+ v8::Platform::kShortRunningTask);
+ }
}
void MarkCompactCollector::WaitUntilSweepingCompleted() {
ASSERT(sweeping_pending_ == true);
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->WaitForSweeperThread();
}
+ if (FLAG_job_based_sweeping) {
+ // Wait twice for both jobs.
+ pending_sweeper_jobs_semaphore_.Wait();
+ pending_sweeper_jobs_semaphore_.Wait();
+ }
+ ParallelSweepSpacesComplete();
sweeping_pending_ = false;
- StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
- StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
+ RefillFreeLists(heap()->paged_space(OLD_DATA_SPACE));
+ RefillFreeLists(heap()->paged_space(OLD_POINTER_SPACE));
heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
}
-intptr_t MarkCompactCollector::
- StealMemoryFromSweeperThreads(PagedSpace* space) {
- intptr_t freed_bytes = 0;
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space);
+intptr_t MarkCompactCollector::RefillFreeLists(PagedSpace* space) {
+ FreeList* free_list;
+
+ if (space == heap()->old_pointer_space()) {
+ free_list = free_list_old_pointer_space_.get();
+ } else if (space == heap()->old_data_space()) {
+ free_list = free_list_old_data_space_.get();
+ } else {
+ // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
+ // to only refill them for old data and pointer spaces.
+ return 0;
}
+
+ intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
space->AddToAccountingStats(freed_bytes);
space->DecrementUnsweptFreeBytes(freed_bytes);
return freed_bytes;
@@ -595,7 +652,7 @@ intptr_t MarkCompactCollector::
bool MarkCompactCollector::AreSweeperThreadsActivated() {
- return isolate()->sweeper_threads() != NULL;
+ return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
}
@@ -732,7 +789,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
static const int kMaxMaxEvacuationCandidates = 1000;
int number_of_pages = space->CountTotalPages();
int max_evacuation_candidates =
- static_cast<int>(sqrt(number_of_pages / 2.0) + 1);
+ static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
if (FLAG_stress_compaction || FLAG_always_compact) {
max_evacuation_candidates = kMaxMaxEvacuationCandidates;
@@ -992,7 +1049,8 @@ void MarkCompactCollector::Finish() {
// objects have been marked.
void CodeFlusher::ProcessJSFunctionCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
+ Code* lazy_compile =
+ isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
Object* undefined = isolate_->heap()->undefined_value();
JSFunction* candidate = jsfunction_candidates_head_;
@@ -1037,7 +1095,8 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
+ Code* lazy_compile =
+ isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
SharedFunctionInfo* next_candidate;
@@ -1069,55 +1128,40 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
void CodeFlusher::ProcessOptimizedCodeMaps() {
- static const int kEntriesStart = SharedFunctionInfo::kEntriesStart;
- static const int kEntryLength = SharedFunctionInfo::kEntryLength;
- static const int kContextOffset = 0;
- static const int kCodeOffset = 1;
- static const int kLiteralsOffset = 2;
- STATIC_ASSERT(kEntryLength == 3);
+ STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
SharedFunctionInfo* holder = optimized_code_map_holder_head_;
SharedFunctionInfo* next_holder;
+
while (holder != NULL) {
next_holder = GetNextCodeMap(holder);
ClearNextCodeMap(holder);
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- int new_length = kEntriesStart;
+ int new_length = SharedFunctionInfo::kEntriesStart;
int old_length = code_map->length();
- for (int i = kEntriesStart; i < old_length; i += kEntryLength) {
- Code* code = Code::cast(code_map->get(i + kCodeOffset));
- MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
- continue;
+ for (int i = SharedFunctionInfo::kEntriesStart;
+ i < old_length;
+ i += SharedFunctionInfo::kEntryLength) {
+ Code* code =
+ Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
+ if (!Marking::MarkBitFrom(code).Get()) continue;
+
+ // Move every slot in the entry.
+ for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
+ int dst_index = new_length++;
+ Object** slot = code_map->RawFieldOfElementAt(dst_index);
+ Object* object = code_map->get(i + j);
+ code_map->set(dst_index, object);
+ if (j == SharedFunctionInfo::kOsrAstIdOffset) {
+ ASSERT(object->IsSmi());
+ } else {
+ ASSERT(Marking::IsBlack(
+ Marking::MarkBitFrom(HeapObject::cast(*slot))));
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(slot, slot, *slot);
+ }
}
-
- // Update and record the context slot in the optimized code map.
- Object** context_slot = HeapObject::RawField(code_map,
- FixedArray::OffsetOfElementAt(new_length));
- code_map->set(new_length++, code_map->get(i + kContextOffset));
- ASSERT(Marking::IsBlack(
- Marking::MarkBitFrom(HeapObject::cast(*context_slot))));
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(context_slot, context_slot, *context_slot);
-
- // Update and record the code slot in the optimized code map.
- Object** code_slot = HeapObject::RawField(code_map,
- FixedArray::OffsetOfElementAt(new_length));
- code_map->set(new_length++, code_map->get(i + kCodeOffset));
- ASSERT(Marking::IsBlack(
- Marking::MarkBitFrom(HeapObject::cast(*code_slot))));
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(code_slot, code_slot, *code_slot);
-
- // Update and record the literals slot in the optimized code map.
- Object** literals_slot = HeapObject::RawField(code_map,
- FixedArray::OffsetOfElementAt(new_length));
- code_map->set(new_length++, code_map->get(i + kLiteralsOffset));
- ASSERT(Marking::IsBlack(
- Marking::MarkBitFrom(HeapObject::cast(*literals_slot))));
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(literals_slot, literals_slot, *literals_slot);
}
// Trim the optimized code map if entries have been removed.
@@ -1649,7 +1693,7 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
int object_size = obj->Size();
ASSERT(map->instance_type() == CODE_TYPE);
Code* code_obj = Code::cast(obj);
- heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetAge(),
+ heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
object_size);
ObjectStatsVisitBase(kVisitCode, map, obj);
}
@@ -1845,6 +1889,7 @@ class RootMarkingVisitor : public ObjectVisitor {
// Helper class for pruning the string table.
+template<bool finalize_external_strings>
class StringTableCleaner : public ObjectVisitor {
public:
explicit StringTableCleaner(Heap* heap)
@@ -1856,22 +1901,20 @@ class StringTableCleaner : public ObjectVisitor {
Object* o = *p;
if (o->IsHeapObject() &&
!Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
- // Check if the internalized string being pruned is external. We need to
- // delete the associated external data as this string is going away.
-
- // Since no objects have yet been moved we can safely access the map of
- // the object.
- if (o->IsExternalString()) {
+ if (finalize_external_strings) {
+ ASSERT(o->IsExternalString());
heap_->FinalizeExternalString(String::cast(*p));
+ } else {
+ pointers_removed_++;
}
// Set the entry to the_hole_value (as deleted).
*p = heap_->the_hole_value();
- pointers_removed_++;
}
}
}
int PointersRemoved() {
+ ASSERT(!finalize_external_strings);
return pointers_removed_;
}
@@ -1881,6 +1924,10 @@ class StringTableCleaner : public ObjectVisitor {
};
+typedef StringTableCleaner<false> InternalizedStringTableCleaner;
+typedef StringTableCleaner<true> ExternalStringTableCleaner;
+
+
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
// are retained.
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
@@ -1888,6 +1935,14 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
virtual Object* RetainAs(Object* object) {
if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
return object;
+ } else if (object->IsAllocationSite() &&
+ !(AllocationSite::cast(object)->IsZombie())) {
+ // "dead" AllocationSites need to live long enough for a traversal of new
+ // space. These sites get a one-time reprieve.
+ AllocationSite* site = AllocationSite::cast(object);
+ site->MarkZombie();
+ site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
+ return object;
} else {
return NULL;
}
@@ -1999,12 +2054,7 @@ int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
int size = object->Size();
survivors_size += size;
- if (FLAG_trace_track_allocation_sites && object->IsJSObject()) {
- if (AllocationMemento::FindForJSObject(JSObject::cast(object), true)
- != NULL) {
- heap()->allocation_mementos_found_++;
- }
- }
+ Heap::UpdateAllocationSiteFeedback(object);
offset++;
current_cell >>= 1;
@@ -2097,6 +2147,12 @@ void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
}
+void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
+ MarkBit mark_bit = Marking::MarkBitFrom(site);
+ SetMark(site, mark_bit);
+}
+
+
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
@@ -2395,10 +2451,12 @@ void MarkCompactCollector::AfterMarking() {
// string table. Cannot use string_table() here because the string
// table is marked.
StringTable* string_table = heap()->string_table();
- StringTableCleaner v(heap());
- string_table->IterateElements(&v);
- string_table->ElementsRemoved(v.PointersRemoved());
- heap()->external_string_table_.Iterate(&v);
+ InternalizedStringTableCleaner internalized_visitor(heap());
+ string_table->IterateElements(&internalized_visitor);
+ string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
+
+ ExternalStringTableCleaner external_visitor(heap());
+ heap()->external_string_table_.Iterate(&external_visitor);
heap()->external_string_table_.CleanUp();
// Process the weak references.
@@ -2419,11 +2477,6 @@ void MarkCompactCollector::AfterMarking() {
}
}
- if (!FLAG_watch_ic_patching) {
- // Clean up dead objects from the runtime profiler.
- heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
- }
-
if (FLAG_track_gc_object_stats) {
heap()->CheckpointObjectStats();
}
@@ -2540,6 +2593,17 @@ void MarkCompactCollector::ClearNonLiveReferences() {
}
}
+ // Iterate over allocation sites, removing dependent code that is not
+ // otherwise kept alive by strong references.
+ Object* undefined = heap()->undefined_value();
+ for (Object* site = heap()->allocation_sites_list();
+ site != undefined;
+ site = AllocationSite::cast(site)->weak_next()) {
+ if (IsMarked(site)) {
+ ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
+ }
+ }
+
if (heap_->weak_object_to_code_table()->IsHashTable()) {
WeakHashTable* table =
WeakHashTable::cast(heap_->weak_object_to_code_table());
@@ -2550,6 +2614,16 @@ void MarkCompactCollector::ClearNonLiveReferences() {
if (!table->IsKey(key)) continue;
uint32_t value_index = table->EntryToValueIndex(i);
Object* value = table->get(value_index);
+ if (key->IsCell() && !IsMarked(key)) {
+ Cell* cell = Cell::cast(key);
+ Object* object = cell->value();
+ if (IsMarked(object)) {
+ MarkBit mark = Marking::MarkBitFrom(cell);
+ SetMark(cell, mark);
+ Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
+ RecordSlot(value_slot, value_slot, *value_slot);
+ }
+ }
if (IsMarked(key)) {
if (!IsMarked(value)) {
HeapObject* obj = HeapObject::cast(value);
@@ -2592,9 +2666,7 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
cached_map,
SKIP_WRITE_BARRIER);
}
- Object** slot =
- HeapObject::RawField(prototype_transitions,
- FixedArray::OffsetOfElementAt(proto_index));
+ Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
RecordSlot(slot, slot, prototype);
new_number_of_transitions++;
}
@@ -2699,12 +2771,10 @@ void MarkCompactCollector::ProcessWeakCollections() {
for (int i = 0; i < table->Capacity(); i++) {
if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
Object** key_slot =
- HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
- ObjectHashTable::EntryToIndex(i)));
+ table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
RecordSlot(anchor, key_slot, *key_slot);
Object** value_slot =
- HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
- ObjectHashTable::EntryToValueIndex(i)));
+ table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
MarkCompactMarkingVisitor::MarkObjectByPointer(
this, anchor, value_slot);
}
@@ -2754,11 +2824,11 @@ void MarkCompactCollector::MigrateObject(Address dst,
int size,
AllocationSpace dest) {
HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
- if (heap_profiler->is_profiling()) {
+ if (heap_profiler->is_tracking_object_moves()) {
heap_profiler->ObjectMoveEvent(src, dst, size);
}
ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
- ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
Address src_slot = src;
Address dst_slot = dst;
@@ -2932,16 +3002,14 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
int object_size) {
// TODO(hpayer): Replace that check with an assert.
- CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ CHECK(object_size <= Page::kMaxRegularHeapObjectSize);
OldSpace* target_space = heap()->TargetSpace(object);
ASSERT(target_space == heap()->old_pointer_space() ||
target_space == heap()->old_data_space());
Object* result;
- MaybeObject* maybe_result = target_space->AllocateRaw(
- object_size,
- PagedSpace::MOVE_OBJECT);
+ MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
MigrateObject(target->address(),
@@ -3014,7 +3082,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
int size = object->Size();
- MaybeObject* target = space->AllocateRaw(size, PagedSpace::MOVE_OBJECT);
+ MaybeObject* target = space->AllocateRaw(size);
if (target->IsFailure()) {
// OS refused to give us memory.
V8::FatalProcessOutOfMemory("Evacuation");
@@ -3041,8 +3109,12 @@ void MarkCompactCollector::EvacuatePages() {
int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
- ASSERT(p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+ // TODO(hpayer): This check is just used for debugging purpose and
+ // should be removed or turned into an assert after investigating the
+ // crash in concurrent sweeping.
+ CHECK(p->IsEvacuationCandidate() ||
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+ CHECK_EQ(static_cast<int>(p->parallel_sweeping()), 0);
if (p->IsEvacuationCandidate()) {
// During compaction we might have to request a new page.
// Check that space still have room for that.
@@ -3502,12 +3574,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
heap_->UpdateReferencesInExternalStringTable(
&UpdateReferenceInExternalStringTableEntry);
- if (!FLAG_watch_ic_patching) {
- // Update JSFunction pointers from the runtime profiler.
- heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
- &updating_visitor);
- }
-
EvacuationWeakObjectRetainer evacuation_object_retainer;
heap()->ProcessWeakReferences(&evacuation_object_retainer);
@@ -3872,13 +3938,20 @@ template<MarkCompactCollector::SweepingParallelism mode>
intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
FreeList* free_list,
Page* p) {
- ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
+ // TODO(hpayer): This check is just used for debugging purpose and
+ // should be removed or turned into an assert after investigating the
+ // crash in concurrent sweeping.
+ CHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
free_list != NULL) ||
(mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
free_list == NULL));
- p->MarkSweptConservatively();
+ // When parallel sweeping is active, the page will be marked after
+ // sweeping by the main thread.
+ if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
+ p->MarkSweptConservatively();
+ }
intptr_t freed_bytes = 0;
size_t size = 0;
@@ -3956,16 +4029,18 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
}
-void MarkCompactCollector::SweepInParallel(PagedSpace* space,
- FreeList* private_free_list,
- FreeList* free_list) {
+void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
PageIterator it(space);
+ FreeList* free_list = space == heap()->old_pointer_space()
+ ? free_list_old_pointer_space_.get()
+ : free_list_old_data_space_.get();
+ FreeList private_free_list(space);
while (it.has_next()) {
Page* p = it.next();
if (p->TryParallelSweeping()) {
- SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
- free_list->Concatenate(private_free_list);
+ SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
+ free_list->Concatenate(&private_free_list);
}
}
}
@@ -3988,7 +4063,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
while (it.has_next()) {
Page* p = it.next();
- ASSERT(p->parallel_sweeping() == 0);
+ ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
ASSERT(!p->IsEvacuationCandidate());
// Clear sweeping flags indicating that marking bits are still intact.
@@ -4061,7 +4136,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
reinterpret_cast<intptr_t>(p));
}
- p->set_parallel_sweeping(1);
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
space->IncreaseUnsweptFreeBytes(p);
}
break;
@@ -4103,9 +4178,10 @@ void MarkCompactCollector::SweepSpaces() {
#endif
SweeperType how_to_sweep =
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
- if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
- if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
- if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
+ if (AreSweeperThreadsActivated()) {
+ if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
+ if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
+ }
if (sweep_precisely_) how_to_sweep = PRECISE;
// Unlink evacuation candidates before sweeper threads access the list of
@@ -4152,6 +4228,24 @@ void MarkCompactCollector::SweepSpaces() {
}
+void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+ if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_IN_PROGRESS) {
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
+ p->MarkSweptConservatively();
+ }
+ }
+}
+
+
+void MarkCompactCollector::ParallelSweepSpacesComplete() {
+ ParallelSweepSpaceComplete(heap()->old_pointer_space());
+ ParallelSweepSpaceComplete(heap()->old_data_space());
+}
+
+
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (isolate()->debug()->IsLoaded() ||
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index aea5e1cf66..c966e2018e 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -571,6 +571,8 @@ class MarkCompactCollector {
static void Initialize();
+ void SetUp();
+
void TearDown();
void CollectEvacuationCandidates(PagedSpace* space);
@@ -715,13 +717,11 @@ class MarkCompactCollector {
MarkingParity marking_parity() { return marking_parity_; }
// Concurrent and parallel sweeping support.
- void SweepInParallel(PagedSpace* space,
- FreeList* private_free_list,
- FreeList* free_list);
+ void SweepInParallel(PagedSpace* space);
void WaitUntilSweepingCompleted();
- intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
+ intptr_t RefillFreeLists(PagedSpace* space);
bool AreSweeperThreadsActivated();
@@ -739,8 +739,14 @@ class MarkCompactCollector {
// marking its contents.
void MarkWeakObjectToCodeTable();
+ // Special case for processing weak references in a full collection. We need
+ // to artifically keep AllocationSites alive for a time.
+ void MarkAllocationSite(AllocationSite* site);
+
private:
- MarkCompactCollector();
+ class SweeperTask;
+
+ explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector();
bool MarkInvalidatedCode();
@@ -787,6 +793,8 @@ class MarkCompactCollector {
// True if concurrent or parallel sweeping is currently in progress.
bool sweeping_pending_;
+ Semaphore pending_sweeper_jobs_semaphore_;
+
bool sequential_sweeping_;
// A pointer to the current stack-allocated GC tracer object during a full
@@ -936,6 +944,12 @@ class MarkCompactCollector {
void SweepSpace(PagedSpace* space, SweeperType sweeper);
+ // Finalizes the parallel sweeping phase. Marks all the pages that were
+ // swept in parallel.
+ void ParallelSweepSpacesComplete();
+
+ void ParallelSweepSpaceComplete(PagedSpace* space);
+
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
@@ -953,6 +967,9 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;
+ SmartPointer<FreeList> free_list_old_data_space_;
+ SmartPointer<FreeList> free_list_old_pointer_space_;
+
friend class Heap;
};
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index efab63a186..da96d967e1 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -74,12 +74,13 @@ function MathAtan2(y, x) {
// ECMA 262 - 15.8.2.6
function MathCeil(x) {
- return %Math_ceil(TO_NUMBER_INLINE(x));
+ return -MathFloor(-x);
}
// ECMA 262 - 15.8.2.7
function MathCos(x) {
- return %_MathCos(TO_NUMBER_INLINE(x));
+ x = MathAbs(x); // Convert to number and get rid of -0.
+ return TrigonometricInterpolation(x, 1);
}
// ECMA 262 - 15.8.2.8
@@ -117,9 +118,8 @@ function MathMax(arg1, arg2) { // length == 2
if (arg2 > arg1) return arg2;
if (arg1 > arg2) return arg1;
if (arg1 == arg2) {
- // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be
- // a Smi or a heap number.
- return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg2 : arg1;
+ // Make sure -0 is considered less than +0.
+ return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg2 : arg1;
}
// All comparisons failed, one of the arguments must be NaN.
return NAN;
@@ -128,10 +128,8 @@ function MathMax(arg1, arg2) { // length == 2
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- // Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
- // a Smi or heap number.
- if (NUMBER_IS_NAN(n) || n > r ||
- (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) {
+ // Make sure +0 is considered greater than -0.
+ if (NUMBER_IS_NAN(n) || n > r || (r === 0 && n === 0 && %_IsMinusZero(r))) {
r = n;
}
}
@@ -147,9 +145,8 @@ function MathMin(arg1, arg2) { // length == 2
if (arg2 > arg1) return arg1;
if (arg1 > arg2) return arg2;
if (arg1 == arg2) {
- // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be
- // a Smi or a heap number.
- return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg1 : arg2;
+ // Make sure -0 is considered less than +0.
+ return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg1 : arg2;
}
// All comparisons failed, one of the arguments must be NaN.
return NAN;
@@ -158,10 +155,8 @@ function MathMin(arg1, arg2) { // length == 2
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be a
- // Smi or a heap number.
- if (NUMBER_IS_NAN(n) || n < r ||
- (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) {
+ // Make sure -0 is considered less than +0.
+ if (NUMBER_IS_NAN(n) || n < r || (r === 0 && n === 0 && %_IsMinusZero(n))) {
r = n;
}
}
@@ -174,8 +169,15 @@ function MathPow(x, y) {
}
// ECMA 262 - 15.8.2.14
+var rngstate; // Initialized to a Uint32Array during genesis.
function MathRandom() {
- return %_RandomHeapNumber();
+ var r0 = (MathImul(18273, rngstate[0] & 0xFFFF) + (rngstate[0] >>> 16)) | 0;
+ rngstate[0] = r0;
+ var r1 = (MathImul(36969, rngstate[1] & 0xFFFF) + (rngstate[1] >>> 16)) | 0;
+ rngstate[1] = r1;
+ var x = ((r0 << 16) + (r1 & 0xFFFF)) | 0;
+ // Division by 0x100000000 through multiplication by reciprocal.
+ return (x < 0 ? (x + 0x100000000) : x) * 2.3283064365386962890625e-10;
}
// ECMA 262 - 15.8.2.15
@@ -185,7 +187,9 @@ function MathRound(x) {
// ECMA 262 - 15.8.2.16
function MathSin(x) {
- return %_MathSin(TO_NUMBER_INLINE(x));
+ x = x * 1; // Convert to number and deal with -0.
+ if (%_IsMinusZero(x)) return x;
+ return TrigonometricInterpolation(x, 0);
}
// ECMA 262 - 15.8.2.17
@@ -195,7 +199,7 @@ function MathSqrt(x) {
// ECMA 262 - 15.8.2.18
function MathTan(x) {
- return %_MathTan(TO_NUMBER_INLINE(x));
+ return MathSin(x) / MathCos(x);
}
// Non-standard extension.
@@ -204,6 +208,73 @@ function MathImul(x, y) {
}
+var kInversePiHalf = 0.636619772367581343; // 2 / pi
+var kInversePiHalfS26 = 9.48637384723993156e-9; // 2 / pi / (2^26)
+var kS26 = 1 << 26;
+var kTwoStepThreshold = 1 << 27;
+// pi / 2 rounded up
+var kPiHalf = 1.570796326794896780; // 0x192d4454fb21f93f
+// We use two parts for pi/2 to emulate a higher precision.
+// pi_half_1 only has 26 significant bits for mantissa.
+// Note that pi_half > pi_half_1 + pi_half_2
+var kPiHalf1 = 1.570796325802803040; // 0x00000054fb21f93f
+var kPiHalf2 = 9.920935796805404252e-10; // 0x3326a611460b113e
+
+var kSamples; // Initialized to a number during genesis.
+var kIndexConvert; // Initialized to kSamples / (pi/2) during genesis.
+var kSinTable; // Initialized to a Float64Array during genesis.
+var kCosXIntervalTable; // Initialized to a Float64Array during genesis.
+
+// This implements sine using the following algorithm.
+// 1) Multiplication takes care of to-number conversion.
+// 2) Reduce x to the first quadrant [0, pi/2].
+// Conveniently enough, in case of +/-Infinity, we get NaN.
+// Note that we try to use only 26 instead of 52 significant bits for
+// mantissa to avoid rounding errors when multiplying. For very large
+// input we therefore have additional steps.
+// 3) Replace x by (pi/2-x) if x was in the 2nd or 4th quadrant.
+// 4) Do a table lookup for the closest samples to the left and right of x.
+// 5) Find the derivatives at those sampling points by table lookup:
+// dsin(x)/dx = cos(x) = sin(pi/2-x) for x in [0, pi/2].
+// 6) Use cubic spline interpolation to approximate sin(x).
+// 7) Negate the result if x was in the 3rd or 4th quadrant.
+// 8) Get rid of -0 by adding 0.
+function TrigonometricInterpolation(x, phase) {
+ if (x < 0 || x > kPiHalf) {
+ var multiple;
+ while (x < -kTwoStepThreshold || x > kTwoStepThreshold) {
+ // Let's assume this loop does not terminate.
+ // All numbers x in each loop forms a set S.
+ // (1) abs(x) > 2^27 for all x in S.
+ // (2) abs(multiple) != 0 since (2^27 * inverse_pi_half_s26) > 1
+ // (3) multiple is rounded down in 2^26 steps, so the rounding error is
+ // at most max(ulp, 2^26).
+ // (4) so for x > 2^27, we subtract at most (1+pi/4)x and at least
+ // (1-pi/4)x
+ // (5) The subtraction results in x' so that abs(x') <= abs(x)*pi/4.
+ // Note that this difference cannot be simply rounded off.
+ // Set S cannot exist since (5) violates (1). Loop must terminate.
+ multiple = MathFloor(x * kInversePiHalfS26) * kS26;
+ x = x - multiple * kPiHalf1 - multiple * kPiHalf2;
+ }
+ multiple = MathFloor(x * kInversePiHalf);
+ x = x - multiple * kPiHalf1 - multiple * kPiHalf2;
+ phase += multiple;
+ }
+ var double_index = x * kIndexConvert;
+ if (phase & 1) double_index = kSamples - double_index;
+ var index = double_index | 0;
+ var t1 = double_index - index;
+ var t2 = 1 - t1;
+ var y1 = kSinTable[index];
+ var y2 = kSinTable[index + 1];
+ var dy = y2 - y1;
+ return (t2 * y1 + t1 * y2 +
+ t1 * t2 * ((kCosXIntervalTable[index] - dy) * t2 +
+ (dy - kCosXIntervalTable[index + 1]) * t1))
+ * (1 - (phase & 2)) + 0;
+}
+
// -------------------------------------------------------------------
function SetUpMath() {
@@ -214,44 +285,20 @@ function SetUpMath() {
%FunctionSetInstanceClassName(MathConstructor, 'Math');
// Set up math constants.
- // ECMA-262, section 15.8.1.1.
- %OptimizeObjectForAddingMultipleProperties($Math, 8);
- %SetProperty($Math,
- "E",
- 2.7182818284590452354,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- // ECMA-262, section 15.8.1.2.
- %SetProperty($Math,
- "LN10",
- 2.302585092994046,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- // ECMA-262, section 15.8.1.3.
- %SetProperty($Math,
- "LN2",
- 0.6931471805599453,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- // ECMA-262, section 15.8.1.4.
- %SetProperty($Math,
- "LOG2E",
- 1.4426950408889634,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "LOG10E",
- 0.4342944819032518,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "PI",
- 3.1415926535897932,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "SQRT1_2",
- 0.7071067811865476,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetProperty($Math,
- "SQRT2",
- 1.4142135623730951,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %ToFastProperties($Math);
+ InstallConstants($Math, $Array(
+ // ECMA-262, section 15.8.1.1.
+ "E", 2.7182818284590452354,
+ // ECMA-262, section 15.8.1.2.
+ "LN10", 2.302585092994046,
+ // ECMA-262, section 15.8.1.3.
+ "LN2", 0.6931471805599453,
+ // ECMA-262, section 15.8.1.4.
+ "LOG2E", 1.4426950408889634,
+ "LOG10E", 0.4342944819032518,
+ "PI", 3.1415926535897932,
+ "SQRT1_2", 0.7071067811865476,
+ "SQRT2", 1.4142135623730951
+ ));
// Set up non-enumerable functions of the Math object and
// set their names.
@@ -276,6 +323,13 @@ function SetUpMath() {
"min", MathMin,
"imul", MathImul
));
+
+ %SetInlineBuiltinFlag(MathCeil);
+ %SetInlineBuiltinFlag(MathRandom);
+ %SetInlineBuiltinFlag(MathSin);
+ %SetInlineBuiltinFlag(MathCos);
+ %SetInlineBuiltinFlag(MathTan);
+ %SetInlineBuiltinFlag(TrigonometricInterpolation);
}
SetUpMath();
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 9eae67a728..0077d0309f 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -43,15 +43,15 @@ void MessageHandler::DefaultMessageReport(Isolate* isolate,
Handle<Object> message_obj) {
SmartArrayPointer<char> str = GetLocalizedMessage(isolate, message_obj);
if (loc == NULL) {
- PrintF("%s\n", *str);
+ PrintF("%s\n", str.get());
} else {
HandleScope scope(isolate);
Handle<Object> data(loc->script()->name(), isolate);
SmartArrayPointer<char> data_str;
if (data->IsString())
data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS);
- PrintF("%s:%i: %s\n", *data_str ? *data_str : "<unknown>",
- loc->start_pos(), *str);
+ PrintF("%s:%i: %s\n", data_str.get() ? data_str.get() : "<unknown>",
+ loc->start_pos(), str.get());
}
}
@@ -61,7 +61,6 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
- Handle<String> stack_trace,
Handle<JSArray> stack_frames) {
Factory* factory = isolate->factory();
Handle<String> type_handle = factory->InternalizeUtf8String(type);
@@ -82,10 +81,6 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
script_handle = GetScriptWrapper(loc->script());
}
- Handle<Object> stack_trace_handle = stack_trace.is_null()
- ? Handle<Object>::cast(factory->undefined_value())
- : Handle<Object>::cast(stack_trace);
-
Handle<Object> stack_frames_handle = stack_frames.is_null()
? Handle<Object>::cast(factory->undefined_value())
: Handle<Object>::cast(stack_frames);
@@ -96,7 +91,6 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
start,
end,
script_handle,
- stack_trace_handle,
stack_frames_handle);
return message;
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 5d84e46caa..2f4be518b2 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -95,7 +95,6 @@ class MessageHandler {
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
- Handle<String> stack_trace,
Handle<JSArray> stack_frames);
// Report a formatted message (needs JS allocation).
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 0a301228d7..733fe95e2f 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -78,7 +78,7 @@ var kMessages = {
getter_must_be_callable: ["Getter must be a function: ", "%0"],
setter_must_be_callable: ["Setter must be a function: ", "%0"],
value_and_accessor: ["Invalid property. A property cannot both have accessors and be writable or have a value, ", "%0"],
- proto_object_or_null: ["Object prototype may only be an Object or null"],
+ proto_object_or_null: ["Object prototype may only be an Object or null: ", "%0"],
property_desc_object: ["Property description must be an object: ", "%0"],
redefine_disallowed: ["Cannot redefine property: ", "%0"],
define_disallowed: ["Cannot define property:", "%0", ", object is not extensible."],
@@ -104,17 +104,23 @@ var kMessages = {
observe_perform_non_string: ["Invalid non-string changeType"],
observe_perform_non_function: ["Cannot perform non-function"],
observe_notify_non_notifier: ["notify called on non-notifier object"],
- proto_poison_pill: ["Generic use of __proto__ accessor not allowed"],
not_typed_array: ["this is not a typed array."],
invalid_argument: ["invalid_argument"],
data_view_not_array_buffer: ["First argument to DataView constructor must be an ArrayBuffer"],
constructor_not_function: ["Constructor ", "%0", " requires 'new'"],
+ not_a_promise: ["%0", " is not a promise"],
+ resolver_not_a_function: ["Promise resolver ", "%0", " is not a function"],
+ promise_cyclic: ["Chaining cycle detected for promise ", "%0"],
+ array_functions_on_frozen: ["Cannot modify frozen array elements"],
+ array_functions_change_sealed: ["Cannot add/remove sealed array elements"],
+ first_argument_not_regexp: ["First argument to ", "%0", " must not be a regular expression"],
// RangeError
invalid_array_length: ["Invalid array length"],
invalid_array_buffer_length: ["Invalid array buffer length"],
+ invalid_string_length: ["Invalid string length"],
invalid_typed_array_offset: ["Start offset is too large:"],
invalid_typed_array_length: ["Invalid typed array length"],
- invalid_typed_array_alignment: ["%0", "of", "%1", "should be a multiple of", "%3"],
+ invalid_typed_array_alignment: ["%0", " of ", "%1", " should be a multiple of ", "%2"],
typed_array_set_source_too_large:
["Source is too large"],
typed_array_set_negative_offset:
@@ -148,22 +154,15 @@ var kMessages = {
illegal_access: ["Illegal access"],
invalid_preparser_data: ["Invalid preparser data for function ", "%0"],
strict_mode_with: ["Strict mode code may not include a with statement"],
- strict_catch_variable: ["Catch variable may not be eval or arguments in strict mode"],
+ strict_eval_arguments: ["Unexpected eval or arguments in strict mode"],
too_many_arguments: ["Too many arguments in function call (only 32766 allowed)"],
too_many_parameters: ["Too many parameters in function definition (only 32766 allowed)"],
too_many_variables: ["Too many variables declared (only 131071 allowed)"],
- strict_param_name: ["Parameter name eval or arguments is not allowed in strict mode"],
strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
- strict_var_name: ["Variable name may not be eval or arguments in strict mode"],
- strict_function_name: ["Function name may not be eval or arguments in strict mode"],
strict_octal_literal: ["Octal literals are not allowed in strict mode."],
strict_duplicate_property: ["Duplicate data property in object literal not allowed in strict mode"],
accessor_data_property: ["Object literal may not have data and accessor property with the same name"],
accessor_get_set: ["Object literal may not have multiple get/set accessors with the same name"],
- strict_lhs_assignment: ["Assignment to eval or arguments is not allowed in strict mode"],
- strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
- strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
- strict_reserved_word: ["Use of future reserved word in strict mode"],
strict_delete: ["Delete of an unqualified identifier in strict mode."],
strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
strict_const: ["Use of const in strict mode."],
@@ -196,6 +195,10 @@ function FormatString(format, args) {
// str is one of %0, %1, %2 or %3.
try {
str = NoSideEffectToString(args[arg_num]);
+ if (str.length > 256) {
+ str = %SubString(str, 0, 239) + "...<omitted>..." +
+ %SubString(str, str.length - 2, str.length);
+ }
} catch (e) {
if (%IsJSModule(args[arg_num]))
str = "module";
@@ -783,64 +786,67 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// ----------------------------------------------------------------------------
// Error implementation
-var CallSiteReceiverKey = %CreateSymbol("receiver");
-var CallSiteFunctionKey = %CreateSymbol("function");
-var CallSitePositionKey = %CreateSymbol("position");
-var CallSiteStrictModeKey = %CreateSymbol("strict mode");
+//TODO(rossberg)
+var CallSiteReceiverKey = NEW_PRIVATE("receiver");
+var CallSiteFunctionKey = NEW_PRIVATE("function");
+var CallSitePositionKey = NEW_PRIVATE("position");
+var CallSiteStrictModeKey = NEW_PRIVATE("strict mode");
function CallSite(receiver, fun, pos, strict_mode) {
- this[CallSiteReceiverKey] = receiver;
- this[CallSiteFunctionKey] = fun;
- this[CallSitePositionKey] = pos;
- this[CallSiteStrictModeKey] = strict_mode;
+ SET_PRIVATE(this, CallSiteReceiverKey, receiver);
+ SET_PRIVATE(this, CallSiteFunctionKey, fun);
+ SET_PRIVATE(this, CallSitePositionKey, pos);
+ SET_PRIVATE(this, CallSiteStrictModeKey, strict_mode);
}
function CallSiteGetThis() {
- return this[CallSiteStrictModeKey] ? UNDEFINED : this[CallSiteReceiverKey];
+ return GET_PRIVATE(this, CallSiteStrictModeKey)
+ ? UNDEFINED : GET_PRIVATE(this, CallSiteReceiverKey);
}
function CallSiteGetTypeName() {
- return GetTypeName(this[CallSiteReceiverKey], false);
+ return GetTypeName(GET_PRIVATE(this, CallSiteReceiverKey), false);
}
function CallSiteIsToplevel() {
- if (this[CallSiteReceiverKey] == null) {
+ if (GET_PRIVATE(this, CallSiteReceiverKey) == null) {
return true;
}
- return IS_GLOBAL(this[CallSiteReceiverKey]);
+ return IS_GLOBAL(GET_PRIVATE(this, CallSiteReceiverKey));
}
function CallSiteIsEval() {
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
return script && script.compilation_type == COMPILATION_TYPE_EVAL;
}
function CallSiteGetEvalOrigin() {
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
return FormatEvalOrigin(script);
}
function CallSiteGetScriptNameOrSourceURL() {
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
return script ? script.nameOrSourceURL() : null;
}
function CallSiteGetFunction() {
- return this[CallSiteStrictModeKey] ? UNDEFINED : this[CallSiteFunctionKey];
+ return GET_PRIVATE(this, CallSiteStrictModeKey)
+ ? UNDEFINED : GET_PRIVATE(this, CallSiteFunctionKey);
}
function CallSiteGetFunctionName() {
// See if the function knows its own name
- var name = this[CallSiteFunctionKey].name;
+ var name = GET_PRIVATE(this, CallSiteFunctionKey).name;
if (name) {
return name;
}
- name = %FunctionGetInferredName(this[CallSiteFunctionKey]);
+ name = %FunctionGetInferredName(GET_PRIVATE(this, CallSiteFunctionKey));
if (name) {
return name;
}
// Maybe this is an evaluation?
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
if (script && script.compilation_type == COMPILATION_TYPE_EVAL) {
return "eval";
}
@@ -850,8 +856,8 @@ function CallSiteGetFunctionName() {
function CallSiteGetMethodName() {
// See if we can find a unique property on the receiver that holds
// this function.
- var receiver = this[CallSiteReceiverKey];
- var fun = this[CallSiteFunctionKey];
+ var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
+ var fun = GET_PRIVATE(this, CallSiteFunctionKey);
var ownName = fun.name;
if (ownName && receiver &&
(%_CallFunction(receiver, ownName, ObjectLookupGetter) === fun ||
@@ -880,49 +886,51 @@ function CallSiteGetMethodName() {
}
function CallSiteGetFileName() {
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
return script ? script.name : null;
}
function CallSiteGetLineNumber() {
- if (this[CallSitePositionKey] == -1) {
+ if (GET_PRIVATE(this, CallSitePositionKey) == -1) {
return null;
}
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
var location = null;
if (script) {
- location = script.locationFromPosition(this[CallSitePositionKey], true);
+ location = script.locationFromPosition(
+ GET_PRIVATE(this, CallSitePositionKey), true);
}
return location ? location.line + 1 : null;
}
function CallSiteGetColumnNumber() {
- if (this[CallSitePositionKey] == -1) {
+ if (GET_PRIVATE(this, CallSitePositionKey) == -1) {
return null;
}
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
var location = null;
if (script) {
- location = script.locationFromPosition(this[CallSitePositionKey], true);
+ location = script.locationFromPosition(
+ GET_PRIVATE(this, CallSitePositionKey), true);
}
return location ? location.column + 1: null;
}
function CallSiteIsNative() {
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
return script ? (script.type == TYPE_NATIVE) : false;
}
function CallSiteGetPosition() {
- return this[CallSitePositionKey];
+ return GET_PRIVATE(this, CallSitePositionKey);
}
function CallSiteIsConstructor() {
- var receiver = this[CallSiteReceiverKey];
+ var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
var constructor = (receiver != null && IS_OBJECT(receiver))
? %GetDataProperty(receiver, "constructor") : null;
if (!constructor) return false;
- return this[CallSiteFunctionKey] === constructor;
+ return GET_PRIVATE(this, CallSiteFunctionKey) === constructor;
}
function CallSiteToString() {
@@ -931,14 +939,10 @@ function CallSiteToString() {
if (this.isNative()) {
fileLocation = "native";
} else {
- if (this.isEval()) {
- fileName = this.getScriptNameOrSourceURL();
- if (!fileName) {
- fileLocation = this.getEvalOrigin();
- fileLocation += ", "; // Expecting source position to follow.
- }
- } else {
- fileName = this.getFileName();
+ fileName = this.getScriptNameOrSourceURL();
+ if (!fileName && this.isEval()) {
+ fileLocation = this.getEvalOrigin();
+ fileLocation += ", "; // Expecting source position to follow.
}
if (fileName) {
@@ -965,7 +969,7 @@ function CallSiteToString() {
var isConstructor = this.isConstructor();
var isMethodCall = !(this.isToplevel() || isConstructor);
if (isMethodCall) {
- var typeName = GetTypeName(this[CallSiteReceiverKey], true);
+ var typeName = GetTypeName(GET_PRIVATE(this, CallSiteReceiverKey), true);
var methodName = this.getMethodName();
if (functionName) {
if (typeName &&
@@ -1247,23 +1251,24 @@ var visited_errors = new InternalArray();
var cyclic_error_marker = new $Object();
function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
+ var current = error;
// Climb the prototype chain until we find the holder.
- while (error && !%HasLocalProperty(error, name)) {
- error = %GetPrototype(error);
+ while (current && !%HasLocalProperty(current, name)) {
+ current = %GetPrototype(current);
}
- if (IS_NULL(error)) return UNDEFINED;
- if (!IS_OBJECT(error)) return error[name];
+ if (IS_NULL(current)) return UNDEFINED;
+ if (!IS_OBJECT(current)) return error[name];
// If the property is an accessor on one of the predefined errors that can be
// generated statically by the compiler, don't touch it. This is to address
// http://code.google.com/p/chromium/issues/detail?id=69187
- var desc = %GetOwnProperty(error, name);
+ var desc = %GetOwnProperty(current, name);
if (desc && desc[IS_ACCESSOR_INDEX]) {
var isName = name === "name";
- if (error === $ReferenceError.prototype)
+ if (current === $ReferenceError.prototype)
return isName ? "ReferenceError" : UNDEFINED;
- if (error === $SyntaxError.prototype)
+ if (current === $SyntaxError.prototype)
return isName ? "SyntaxError" : UNDEFINED;
- if (error === $TypeError.prototype)
+ if (current === $TypeError.prototype)
return isName ? "TypeError" : UNDEFINED;
}
// Otherwise, read normally.
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 8c825d24ee..514b3aaa4f 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -190,16 +190,6 @@ Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
}
-Object** RelocInfo::target_object_address() {
- // Provide a "natural pointer" to the embedded object,
- // which can be de-referenced during heap iteration.
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- reconstructed_obj_ptr_ =
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
- return &reconstructed_obj_ptr_;
-}
-
-
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
@@ -213,10 +203,9 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
}
-Address* RelocInfo::target_reference_address() {
+Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
- return &reconstructed_adr_ptr_;
+ return Assembler::target_address_at(pc_);
}
@@ -324,6 +313,15 @@ void RelocInfo::set_call_object(Object* target) {
}
+void RelocInfo::WipeOut() {
+ ASSERT(IsEmbeddedObject(rmode_) ||
+ IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, NULL);
+}
+
+
bool RelocInfo::IsPatchedReturnSequence() {
Instr instr0 = Assembler::instr_at(pc_);
Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 0972a8295c..f551dd5e10 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -260,6 +260,12 @@ MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
}
+MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
+ OffsetAddend offset_addend) : Operand(rm) {
+ offset_ = unit * multiplier + offset_addend;
+}
+
+
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@@ -326,6 +332,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->origin = this;
}
@@ -1622,6 +1629,15 @@ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
}
+void Assembler::pref(int32_t hint, const MemOperand& rs) {
+ ASSERT(kArchVariant != kLoongson);
+ ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
+ Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
+ | (rs.offset_);
+ emit(instr);
+}
+
+
//--------Coprocessor-instructions----------------
// Load, store, move.
@@ -2031,6 +2047,14 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::emit_code_stub_address(Code* stub) {
+ CheckBuffer();
+ *reinterpret_cast<uint32_t*>(pc_) =
+ reinterpret_cast<uint32_t>(stub->instruction_start());
+ pc_ += sizeof(uint32_t);
+}
+
+
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, rmode, data, NULL);
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 2468c3c340..70f77eaeda 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -386,7 +386,15 @@ class Operand BASE_EMBEDDED {
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
+ // Immediate value attached to offset.
+ enum OffsetAddend {
+ offset_minus_one = -1,
+ offset_zero = 0
+ };
+
explicit MemOperand(Register rn, int32_t offset = 0);
+ explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
+ OffsetAddend offset_addend = offset_zero);
int32_t offset() const { return offset_; }
bool OffsetIsInt16Encodable() const {
@@ -537,13 +545,6 @@ class Assembler : public AssemblerBase {
target);
}
- // This sets the branch destination.
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
@@ -718,6 +719,11 @@ class Assembler : public AssemblerBase {
void swr(Register rd, const MemOperand& rs);
+ //----------------Prefetch--------------------
+
+ void pref(int32_t hint, const MemOperand& rs);
+
+
//-------------Misc-instructions--------------
// Break / Trap instructions.
@@ -896,6 +902,9 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
+ // Emits the address of the code stub's first instruction.
+ void emit_code_stub_address(Code* stub);
+
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Postpone the generation of the trampoline pool for the specified number of
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index d0ae073737..7a097a35a5 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -36,6 +36,7 @@
#include "deoptimizer.h"
#include "full-codegen.h"
#include "runtime.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
@@ -122,7 +123,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
+ __ SmiTst(a2, t0);
__ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction,
t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
@@ -152,7 +153,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
+ __ SmiTst(a2, t0);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction1,
t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
@@ -297,21 +298,16 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
- // Function is also the parameter to the runtime call.
- __ push(a1);
+ // Push call kind information and function as parameter to the runtime call.
+ __ Push(a1, a1);
__ CallRuntime(function_id, 1);
- // Restore call kind information.
- __ pop(t1);
- // Restore receiver.
- __ pop(a1);
+ // Restore call kind information and receiver.
+ __ Pop(a1);
}
@@ -323,7 +319,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
}
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
@@ -333,22 +335,14 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
- CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
- // Tail call to returned code.
- __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
- GenerateTailCallToSharedCode(masm);
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -421,14 +415,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ sb(t0, constructor_count);
__ Branch(&allocate, ne, t0, Operand(zero_reg));
- __ Push(a1, a2);
-
- __ push(a1); // Constructor.
+ __ Push(a1, a2, a1); // a1 = Constructor.
// The call will replace the stub, so the countdown is only done once.
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
- __ pop(a2);
- __ pop(a1);
+ __ Pop(a1, a2);
__ bind(&allocate);
}
@@ -594,8 +585,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Receiver for constructor call allocated.
// t4: JSObject
__ bind(&allocated);
- __ push(t4);
- __ push(t4);
+ __ Push(t4, t4);
// Reload the number of arguments from the stack.
// sp[0]: receiver
@@ -638,13 +628,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ __ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Store offset of return address for deoptimizer.
@@ -777,8 +764,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ CallStub(&stub);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Leave internal frame.
@@ -798,22 +784,39 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyCompile);
- // Do a tail-call of the compiled function.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
- // Do a tail-call of the compiled function.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ // Push function as parameter to the runtime call.
+ __ Push(a1, a1);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kCompileOptimized, 2);
+ // Restore receiver.
+ __ Pop(a1);
}
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
@@ -833,7 +836,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
(a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
- __ PrepareCallCFunction(1, 0, a2);
+ __ PrepareCallCFunction(2, 0, a2);
__ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
@@ -872,7 +875,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
(a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
- __ PrepareCallCFunction(1, 0, a2);
+ __ PrepareCallCFunction(2, 0, a2);
__ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
@@ -881,7 +884,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// Perform prologue operations usually performed by the young code stub.
__ Push(ra, fp, cp, a1);
- __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Jump to point after the code-age stub.
__ Addu(a0, a0, Operand((kNoCodeAgeSequenceLength) * Assembler::kInstrSize));
@@ -977,18 +980,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Lookup and calculate pc offset.
- __ lw(a1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ lw(a2, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Subu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Subu(a1, a1, a2);
- __ SmiTag(a1);
-
- // Pass both function and pc offset as arguments.
+ // Pass function as argument.
__ push(a0);
- __ push(a1);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
// If the code object is null, just return to the unoptimized code.
@@ -1097,9 +1091,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ sll(a0, a0, kSmiTagSize); // Smi tagged.
- __ push(a0);
-
- __ push(a2);
+ __ Push(a0, a2);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(a2, v0);
@@ -1114,14 +1106,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ li(t0, Operand(0, RelocInfo::NONE32));
__ Branch(&patch_receiver);
- // Use the global receiver object from the called function as the
- // receiver.
__ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ lw(a2, FieldMemOperand(cp, kGlobalIndex));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ lw(a2, FieldMemOperand(a2, kGlobalIndex));
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -1183,17 +1169,16 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Branch(&function, eq, t0, Operand(zero_reg));
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
__ mov(a2, zero_reg);
- __ SetCallKind(t1, CALL_AS_METHOD);
__ Branch(&non_proxy, ne, t0, Operand(1));
__ push(a1); // Re-add proxy object as additional argument.
__ Addu(a0, a0, Operand(1));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&non_proxy);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
@@ -1208,24 +1193,24 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ lw(a2,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
__ sra(a2, a2, kSmiTagSize);
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ SetCallKind(t1, CALL_AS_METHOD);
// Check formal and actual parameter counts.
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(0);
- __ InvokeCode(a3, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeCode(a3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset = -5 * kPointerSize;
- const int kLimitOffset = -4 * kPointerSize;
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
@@ -1250,16 +1235,14 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Out of stack space.
__ lw(a1, MemOperand(fp, kFunctionOffset));
- __ push(a1);
- __ push(v0);
+ __ Push(a1, v0);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
// End of stack check.
// Push current limit and index.
__ bind(&okay);
- __ push(v0); // Limit.
- __ mov(a1, zero_reg); // Initial index.
- __ push(a1);
+ __ mov(a1, zero_reg);
+ __ Push(v0, a1); // Limit and initial index.
// Get the receiver.
__ lw(a0, MemOperand(fp, kRecvOffset));
@@ -1308,13 +1291,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
__ Branch(&push_receiver);
- // Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
- __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+ __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
@@ -1332,8 +1310,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// a0: current argument index
__ bind(&loop);
__ lw(a1, MemOperand(fp, kArgsOffset));
- __ push(a1);
- __ push(a0);
+ __ Push(a1, a0);
// Call the runtime to access the property in the arguments array.
__ CallRuntime(Runtime::kGetProperty, 2);
@@ -1350,7 +1327,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ lw(a1, MemOperand(fp, kLimitOffset));
__ Branch(&loop, ne, a0, Operand(a1));
- // Invoke the function.
+ // Call the function.
Label call_proxy;
ParameterCount actual(a0);
__ sra(a0, a0, kSmiTagSize);
@@ -1358,20 +1335,18 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ GetObjectType(a1, a2, a2);
__ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ Ret(USE_DELAY_SLOT);
__ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
- // Invoke the function proxy.
+ // Call the function proxy.
__ bind(&call_proxy);
__ push(a1); // Add function proxy as last argument.
__ Addu(a0, a0, Operand(1));
__ li(a2, Operand(0, RelocInfo::NONE32));
- __ SetCallKind(t1, CALL_AS_METHOD);
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
// Tear down the internal frame and remove function, receiver and args.
@@ -1386,7 +1361,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ sll(a0, a0, kSmiTagSize);
__ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
- __ Addu(fp, sp, Operand(3 * kPointerSize));
+ __ Addu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -1396,7 +1372,8 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ lw(a1, MemOperand(fp, -3 * kPointerSize));
+ __ lw(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
__ mov(sp, fp);
__ MultiPop(fp.bit() | ra.bit());
__ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
@@ -1412,13 +1389,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- a0: actual arguments count
// -- a1: function (passed through to callee)
// -- a2: expected arguments count
- // -- a3: callee code entry
- // -- t1: call kind information
// -----------------------------------
Label invoke, dont_adapt_arguments;
Label enough, too_few;
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Branch(&dont_adapt_arguments, eq,
a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
@@ -1494,7 +1470,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ sll(t2, a2, kPointerSizeLog2);
__ Subu(a2, fp, Operand(t2));
- __ Addu(a2, a2, Operand(-4 * kPointerSize)); // Adjust for frame.
+ // Adjust for frame.
+ __ Subu(a2, a2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ 2 * kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index e334b2896e..e83447ada0 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -50,6 +50,16 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
}
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -78,7 +88,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
}
@@ -96,8 +106,8 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { a2, a3 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -114,6 +124,28 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = {a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2, a1, a0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -177,15 +209,22 @@ static void InitializeArrayConstructorDescriptor(
// register state
// a0 -- number of arguments
// a1 -- function
- // a2 -- type info cell with elements kind
- static Register registers[] = { a1, a2 };
- descriptor->register_param_count_ = 2;
- if (constant_stack_parameter_count != 0) {
+ // a2 -- allocation site with elements kind
+ static Register registers_variable_args[] = { a1, a2, a0 };
+ static Register registers_no_args[] = { a1, a2 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
descriptor->stack_parameter_count_ = a0;
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
@@ -199,15 +238,21 @@ static void InitializeInternalArrayConstructorDescriptor(
// register state
// a0 -- number of arguments
// a1 -- constructor function
- static Register registers[] = { a1 };
- descriptor->register_param_count_ = 1;
+ static Register registers_variable_args[] = { a1, a0 };
+ static Register registers_no_args[] = { a1 };
- if (constant_stack_parameter_count != 0) {
- // Stack param count needs (constructor pointer, and single argument).
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
descriptor->stack_parameter_count_ = a0;
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
@@ -291,6 +336,124 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2, a1, a0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { a1, // JSFunction
+ cp, // context
+ a0, // actual number of arguments
+ a2, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { cp, // context
+ a2, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { cp, // context
+ a2, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { cp, // context
+ a0, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { a0, // callee
+ t0, // call_data
+ a2, // holder
+ a1, // api_function_address
+ cp, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -320,9 +483,12 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
ASSERT(descriptor->register_param_count_ == 0 ||
a0.is(descriptor->register_params_[param_count - 1]));
- // Push arguments
+ // Push arguments, adjust sp.
+ __ Subu(sp, sp, Operand(param_count * kPointerSize));
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor->register_params_[i]);
+ // Store argument to stack.
+ __ sw(descriptor->register_params_[i],
+ MemOperand(sp, (param_count-1-i) * kPointerSize));
}
ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
@@ -332,107 +498,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ lw(a3, MemOperand(sp, 0));
-
- // Set up the object header.
- __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
- __ li(a2, Operand(Smi::FromInt(length)));
- __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ li(a1, Operand(Smi::FromInt(0)));
- __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, v0);
- __ DropAndRet(1);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ lw(a3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ li(a2, Operand(Smi::FromInt(length)));
- __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(a3, &after_sentinel);
- if (FLAG_debug_code) {
- __ Assert(eq, kExpected0AsASmiSentinel, a3, Operand(zero_reg));
- }
- __ lw(a3, GlobalObjectOperand());
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
- __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
- __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
- __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
- __ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, v0);
- __ DropAndRet(2);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
@@ -562,7 +627,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Try a conversion to a signed integer.
__ Trunc_w_d(double_scratch, double_scratch);
// Move the converted value into the result register.
- __ mfc1(result_reg, double_scratch);
+ __ mfc1(scratch3, double_scratch);
// Retrieve and restore the FCSR.
__ cfc1(scratch, FCSR);
@@ -573,8 +638,12 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
scratch, scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
| kFCSRInvalidOpFlagMask);
- // If we had no exceptions we are done.
- __ Branch(&done, eq, scratch, Operand(zero_reg));
+ // If we had no exceptions then set result_reg and we are done.
+ Label error;
+ __ Branch(&error, ne, scratch, Operand(zero_reg));
+ __ Move(result_reg, scratch3);
+ __ Branch(&done);
+ __ bind(&error);
}
// Load the double value and perform a manual truncation.
@@ -666,33 +735,12 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(a1) &&
- the_heap_number_.is(v0) &&
- scratch_.is(a2) &&
- sign_.is(a3)) {
- return true;
- }
- if (the_int_.is(a2) &&
- the_heap_number_.is(v0) &&
- scratch_.is(a3) &&
- sign_.is(a0)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
+ stub2.GetCode(isolate);
}
@@ -1207,6 +1255,31 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ mov(t9, ra);
+ __ pop(ra);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PushSafepointRegistersAndDoubles();
+ } else {
+ __ PushSafepointRegisters();
+ }
+ __ Jump(t9);
+}
+
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ mov(t9, ra);
+ __ pop(ra);
+ __ StoreToSafepointRegisterSlot(t9, t9);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PopSafepointRegistersAndDoubles();
+ } else {
+ __ PopSafepointRegisters();
+ }
+ __ Jump(t9);
+}
+
+
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@@ -1234,253 +1307,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void BinaryOpStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a1, a0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Untagged case: double input in f4, double result goes
- // into f4.
- // Tagged case: tagged input on top of stack and in a0,
- // tagged result (heap number) goes into v0.
-
- Label input_not_smi;
- Label loaded;
- Label calculate;
- Label invalid_cache;
- const Register scratch0 = t5;
- const Register scratch1 = t3;
- const Register cache_entry = a0;
- const bool tagged = (argument_type_ == TAGGED);
-
- if (tagged) {
- // Argument is a number and is on stack and in a0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(a0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into a2, a3.
- __ sra(t0, a0, kSmiTagSize);
- __ mtc1(t0, f4);
- __ cvt_d_w(f4, f4);
- __ Move(a2, a3, f4);
- __ Branch(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(a0,
- a1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Store the
- // low and high words into a2, a3.
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
- __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
- } else {
- // Input is untagged double in f4. Output goes to f4.
- __ Move(a2, a3, f4);
- }
- __ bind(&loaded);
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ Xor(a1, a2, a3);
- __ sra(t0, a1, 16);
- __ Xor(a1, a1, t0);
- __ sra(t0, a1, 8);
- __ Xor(a1, a1, t0);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // a1 = TranscendentalCache::hash(double value).
- __ li(cache_entry, Operand(
- ExternalReference::transcendental_cache_array_address(
- masm->isolate())));
- // a0 points to cache array.
- __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
- Isolate::Current()->transcendental_cache()->caches_[0])));
- // a0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
- __ sll(t0, a1, 1);
- __ Addu(a1, a1, t0);
- __ sll(t0, a1, 2);
- __ Addu(cache_entry, cache_entry, t0);
-
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ lw(t0, MemOperand(cache_entry, 0));
- __ lw(t1, MemOperand(cache_entry, 4));
- __ lw(t2, MemOperand(cache_entry, 8));
- __ Branch(&calculate, ne, a2, Operand(t0));
- __ Branch(&calculate, ne, a3, Operand(t1));
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into v0.
- __ Drop(1);
- __ mov(v0, t2);
- } else {
- // Load result into f4.
- __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
- }
- __ Ret();
-
- __ bind(&calculate);
- __ IncrementCounter(
- counters->transcendental_cache_miss(), 1, scratch0, scratch1);
- if (tagged) {
- __ bind(&invalid_cache);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
- masm->isolate()),
- 1,
- 1);
- } else {
- Label no_update;
- Label skip_cache;
-
- // Call C function to calculate the result and update the cache.
- // a0: precalculated cache entry address.
- // a2 and a3: parts of the double value.
- // Store a0, a2 and a3 on stack for later before calling C function.
- __ Push(a3, a2, cache_entry);
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(f4);
-
- // Try to update the cache. If we cannot allocate a
- // heap number, we return the result without updating.
- __ Pop(a3, a2, cache_entry);
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
- __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
-
- __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
- __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
- __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, cache_entry);
-
- __ bind(&invalid_cache);
- // The cache is invalid. Call runtime which will recreate the
- // cache.
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
- __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
-
- __ bind(&skip_cache);
- // Call C function to calculate the result and answer directly
- // without updating the cache.
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(f4);
- __ bind(&no_update);
-
- // We return the value in f4 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ li(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-}
-
-
-void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
- Register scratch) {
- __ push(ra);
- __ PrepareCallCFunction(2, scratch);
- if (IsMipsSoftFloatABI) {
- __ Move(a0, a1, f4);
- } else {
- __ mov_d(f12, f4);
- }
- AllowExternalCallThatCantCauseGC scope(masm);
- Isolate* isolate = masm->isolate();
- switch (type_) {
- case TranscendentalCache::SIN:
- __ CallCFunction(
- ExternalReference::math_sin_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::COS:
- __ CallCFunction(
- ExternalReference::math_cos_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::TAN:
- __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::LOG:
- __ CallCFunction(
- ExternalReference::math_log_double_function(isolate),
- 0, 1);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- __ pop(ra);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = a1;
const Register exponent = a2;
@@ -1597,13 +1423,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch2);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0, 2);
}
__ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ jmp(&done);
__ bind(&int_exponent_convert);
@@ -1681,13 +1507,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0, 2);
}
__ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
@@ -1701,21 +1527,37 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated(Isolate* isolate) {
- return (!save_doubles_ || isolate->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- BinaryOpStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void StoreRegistersStateStub::GenerateAheadOfTime(
+ Isolate* isolate) {
+ StoreRegistersStateStub stub1(kDontSaveFPRegs);
+ stub1.GetCode(isolate);
+ // Hydrogen code stubs need stub2 at snapshot time.
+ StoreRegistersStateStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate);
+}
+
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(
+ Isolate* isolate) {
+ RestoreRegistersStateStub stub1(kDontSaveFPRegs);
+ stub1.GetCode(isolate);
+ // Hydrogen code stubs need stub2 at snapshot time.
+ RestoreRegistersStateStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate);
}
@@ -1734,16 +1576,13 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
store_buffer_overflow_code = *stub.GetCode(isolate);
}
- save_doubles_code->set_is_pregenerated(true);
- store_buffer_overflow_code->set_is_pregenerated(true);
isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
+ stub.GetCode(isolate);
}
@@ -2921,7 +2760,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the RegExp has been compiled (data contains a fixed array).
__ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
- __ And(t0, regexp_data, Operand(kSmiTagMask));
+ __ SmiTst(regexp_data, t0);
__ Check(nz,
kUnexpectedTypeForRegExpDataFixedArrayExpected,
t0,
@@ -3312,155 +3151,86 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ lw(a1, MemOperand(sp, kPointerSize * 2));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ JumpIfNotSmi(a1, &slowcase);
- __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
- // Smi-tagging is equivalent to multiplying by 2.
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- // Size of JSArray with two in-object properties and the header of a
- // FixedArray.
- int objects_size =
- (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
- __ Addu(a2, t1, Operand(objects_size));
- __ Allocate(
- a2, // In: Size, in words.
- v0, // Out: Start of allocation (tagged).
- a3, // Scratch register.
- t0, // Scratch register.
- &slowcase,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
- // v0: Start of allocated area, object-tagged.
- // a1: Number of elements in array, as smi.
- // t1: Number of elements, untagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
- __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
- __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Set input, index and length fields from arguments.
- __ lw(a1, MemOperand(sp, kPointerSize * 0));
- __ lw(a2, MemOperand(sp, kPointerSize * 1));
- __ lw(t2, MemOperand(sp, kPointerSize * 2));
- __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
- __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
- __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
-
- // Fill out the elements FixedArray.
- // v0: JSArray, tagged.
- // a3: FixedArray, tagged.
- // t1: Number of elements in array, untagged.
-
- // Set map.
- __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- // Set FixedArray length.
- __ sll(t2, t1, kSmiTagSize);
- __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with undefined.
- // v0: JSArray, tagged.
- // a2: undefined.
- // a3: Start of elements in FixedArray.
- // t1: Number of elements to fill.
- Label loop;
- __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
- __ addu(t1, t1, a3); // Point past last element to store.
- __ bind(&loop);
- __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
- __ sw(a2, MemOperand(a3));
- __ Branch(&loop, USE_DELAY_SLOT);
- __ addiu(a3, a3, kPointerSize); // In branch delay slot.
-
- __ bind(&done);
- __ DropAndRet(3);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
- // a2 : cache cell for call target
- Label initialize, done, miss, megamorphic, not_array_function;
+ // a2 : Feedback vector
+ // a3 : slot in feedback vector (Smi)
+ Label check_array, initialize_array, initialize_non_array, megamorphic, done;
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
+ Heap::RootListIndex kMegamorphicRootIndex = Heap::kUndefinedValueRootIndex;
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->the_hole_value());
+ Heap::RootListIndex kUninitializedRootIndex = Heap::kTheHoleValueRootIndex;
+ ASSERT_EQ(*TypeFeedbackInfo::PremonomorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->null_value());
+ Heap::RootListIndex kPremonomorphicRootIndex = Heap::kNullValueRootIndex;
- // Load the cache state into a3.
- __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ // Load the cache state into t0.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ Branch(&done, eq, a3, Operand(a1));
-
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in a3.
- __ lw(t1, FieldMemOperand(a3, 0));
+ __ Branch(&done, eq, t0, Operand(a1));
+ __ LoadRoot(at, kMegamorphicRootIndex);
+ __ Branch(&done, eq, t0, Operand(at));
+
+ // Check if we're dealing with the Array function or not.
+ __ LoadArrayFunction(t1);
+ __ Branch(&check_array, eq, a1, Operand(t1));
+
+ // Non-array cache: Check the cache state.
+ __ LoadRoot(at, kPremonomorphicRootIndex);
+ __ Branch(&initialize_non_array, eq, t0, Operand(at));
+ __ LoadRoot(at, kUninitializedRootIndex);
+ __ Branch(&megamorphic, ne, t0, Operand(at));
+
+ // Non-array cache: Uninitialized -> premonomorphic. The sentinel is an
+ // immortal immovable object (null) so no write-barrier is needed.
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, at);
+ __ LoadRoot(at, kPremonomorphicRootIndex);
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); // In delay slot.
+
+ // Array cache: Check the cache state to see if we're in a monomorphic
+ // state where the state object is an AllocationSite object.
+ __ bind(&check_array);
+ __ lw(t1, FieldMemOperand(t0, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&miss, ne, t1, Operand(at));
-
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&megamorphic, ne, a1, Operand(a3));
- __ jmp(&done);
+ __ Branch(&done, eq, t1, Operand(at));
- __ bind(&miss);
+ // Array cache: Uninitialized or premonomorphic -> monomorphic.
+ __ LoadRoot(at, kUninitializedRootIndex);
+ __ Branch(&initialize_array, eq, t0, Operand(at));
+ __ LoadRoot(at, kPremonomorphicRootIndex);
+ __ Branch(&initialize_array, eq, t0, Operand(at));
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&initialize, eq, a3, Operand(at));
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
+ // Both caches: Monomorphic -> megamorphic. The sentinel is an
+ // immortal immovable object (undefined) so no write-barrier is needed.
__ bind(&megamorphic);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
- __ jmp(&done);
-
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
- __ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&not_array_function, ne, a1, Operand(a3));
-
- // The target function is the Array constructor.
- // Create an AllocationSite if we don't already have it, store it in the cell.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ LoadRoot(at, kMegamorphicRootIndex);
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); // In delay slot.
+
+ // Array cache: Uninitialized or premonomorphic -> monomorphic.
+ __ bind(&initialize_array);
{
FrameScope scope(masm, StackFrame::INTERNAL);
const RegList kSavedRegs =
1 << 4 | // a0
1 << 5 | // a1
- 1 << 6; // a2
+ 1 << 6 | // a2
+ 1 << 7; // a3
// Arguments register must be smi-tagged to call out.
__ SmiTag(a0);
@@ -3474,9 +3244,17 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
}
__ Branch(&done);
- __ bind(&not_array_function);
- __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
+ // Non-array cache: Premonomorphic -> monomorphic.
+ __ bind(&initialize_non_array);
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sw(a1, MemOperand(t0, 0));
+
+ __ Push(t0, a2, a1);
+ __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(t0, a2, a1);
__ bind(&done);
}
@@ -3484,115 +3262,126 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
// a1 : the function to call
- // a2 : cache cell for call target
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
- // Call as function is indicated with the hole.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&call, ne, t0, Operand(at));
- // Patch the receiver on the stack with the global receiver object.
- __ lw(a3,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, argc_ * kPointerSize));
- __ bind(&call);
- }
+ // a2 : feedback vector
+ // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
+ Label slow, non_function, wrap, cont;
- // Check that the function is really a JavaScript function.
- // a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &non_function);
- // Get the map of the function object.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+ if (NeedsChecks()) {
+ // Check that the function is really a JavaScript function.
+ // a1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ // Goto slow case if we do not have a function.
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
}
// Fast-case: Invoke the function now.
// a1: pushed function
ParameterCount actual(argc_);
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&call_as_function, eq, t0, Operand(at));
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
+ if (CallAsMethod()) {
+ if (NeedsChecks()) {
+ // Do not transform the receiver for strict mode functions and natives.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
+ int32_t strict_mode_function_mask =
+ 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+ int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+ __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
+ __ Branch(&cont, ne, at, Operand(zero_reg));
+ }
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
+ // Compute the receiver in non-strict mode.
+ __ lw(a3, MemOperand(sp, argc_ * kPointerSize));
+
+ if (NeedsChecks()) {
+ __ JumpIfSmi(a3, &wrap);
+ __ GetObjectType(a3, t0, t0);
+ __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
}
- // Check for function proxy.
- __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ push(a1); // Put proxy as additional argument.
- __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
- __ li(a2, Operand(0, RelocInfo::NONE32));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(t1, CALL_AS_METHOD);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (NeedsChecks()) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ if (RecordCallTarget()) {
+ // If there is a call target cache, mark it megamorphic in the
+ // non-function case. MegamorphicSentinel is an immortal immovable
+ // object (undefined) so no write barrier is needed.
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, a2, Operand(t1));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ sw(at, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ }
+ // Check for function proxy.
+ __ Branch(&non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ push(a1); // Put proxy as additional argument.
+ __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
+ __ li(a2, Operand(0, RelocInfo::NONE32));
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ bind(&non_function);
+ __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+ __ li(a0, Operand(argc_)); // Set up the number of arguments.
+ __ li(a2, Operand(0, RelocInfo::NONE32));
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
}
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
- __ li(a0, Operand(argc_)); // Set up the number of arguments.
- __ mov(a2, zero_reg);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
- __ SetCallKind(t1, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ if (CallAsMethod()) {
+ __ bind(&wrap);
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(a1);
+ }
+ __ mov(a0, v0);
+ __ sw(a0, MemOperand(sp, argc_ * kPointerSize));
+ __ jmp(&cont);
+ }
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : feedback vector
+ // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function_call);
// Check that the function is a JSFunction.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = a3;
+ Register jmp_reg = t0;
__ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3601,19 +3390,18 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a0: number of arguments
// a1: called object
- // a3: object type
+ // t0: object type
Label do_call;
__ bind(&slow);
- __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
__ bind(&non_function_call);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
__ li(a2, Operand(0, RelocInfo::NONE32));
- __ SetCallKind(t1, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
@@ -3766,33 +3554,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for
- // very short strings.
- if (!ascii) {
- __ addu(count, count, count);
- }
- __ Branch(&done, eq, count, Operand(zero_reg));
- __ addu(count, dest, count); // Count now points to the last dest byte.
-
- __ bind(&loop);
- __ lbu(scratch, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ Branch(&loop, lt, dest, Operand(count));
-
- __ bind(&done);
-}
-
-
enum CopyCharactersFlags {
COPY_ASCII = 1,
DEST_ALWAYS_ALIGNED = 2
@@ -3911,145 +3672,6 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
}
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ Subu(scratch, c1, Operand(static_cast<int>('0')));
- __ Branch(&not_array_index,
- Ugreater,
- scratch,
- Operand(static_cast<int>('9' - '0')));
- __ Subu(scratch, c2, Operand(static_cast<int>('0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register.
- Label tmp;
- __ sll(scratch1, c2, kBitsPerByte);
- __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
- __ Or(c1, c1, scratch1);
- __ bind(&tmp);
- __ Branch(
- not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ sll(scratch, c2, kBitsPerByte);
- __ Or(chars, chars, scratch);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load string table.
- // Load address of first element of the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ lw(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
- __ sra(mask, mask, 1);
- __ Addu(mask, mask, -1);
-
- // Calculate untagged address of the first element of the string table.
- Register first_string_table_element = string_table;
- __ Addu(first_string_table_element, string_table,
- Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers.
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_string_table_element: address of the first element of
- // the string table
- // undefined: the undefined object
- // scratch: -
-
- // Perform a number of probes in the string table.
- const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch5; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- if (i > 0) {
- __ Addu(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ And(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ sll(scratch, candidate, kPointerSizeLog2);
- __ Addu(scratch, scratch, first_string_table_element);
- __ lw(candidate, MemOperand(scratch));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ GetObjectType(candidate, scratch, scratch);
- __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
-
- __ Branch(not_found, eq, undefined, Operand(candidate));
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole,
- scratch, Operand(candidate));
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // Check that the candidate is a non-external ASCII string. The instance
- // type is still in the scratch register from the CompareObjectType
- // operation.
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
- __ Branch(&found_in_string_table, eq, chars, Operand(scratch));
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ mov(v0, result);
-}
-
-
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character) {
@@ -4477,352 +4099,232 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
+void ArrayPushStub::Generate(MacroAssembler* masm) {
+ Register receiver = a0;
+ Register scratch = a1;
- Counters* counters = masm->isolate()->counters();
+ int argc = arguments_count();
- // Stack on entry:
- // sp[0]: second argument (right).
- // sp[4]: first argument (left).
-
- // Load the two arguments.
- __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- // Otherwise, at least one of the arguments is definitely a string,
- // and we convert the one that is not known to be a string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
- __ JumpIfEitherSmi(a0, a1, &call_runtime);
- // Load instance types.
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ Or(t4, t0, Operand(t1));
- __ And(t4, t4, Operand(kIsNotStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
+ if (argc == 0) {
+ // Nothing to do, just return the length.
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ DropAndRet(argc + 1);
+ return;
}
- // Both arguments are strings.
- // a0: first string
- // a1: second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- // These tests use zero-length check on string-length whch is an Smi.
- // Assert that Smi::FromInt(0) is really 0.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT(Smi::FromInt(0) == 0);
- __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
- __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
- __ mov(v0, a0); // Assume we'll return first string (from a0).
- __ Movz(v0, a1, a2); // If first is empty, return second (from a1).
- __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
- __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
- __ and_(t4, t4, t5); // Branch if both strings were non-empty.
- __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
+ Isolate* isolate = masm->isolate();
- __ bind(&strings_not_empty);
+ if (argc != 1) {
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
}
- // Untag both string-lengths.
- __ sra(a2, a2, kSmiTagSize);
- __ sra(a3, a3, kSmiTagSize);
-
- // Both strings are non-empty.
- // a0: first string
- // a1: second string
- // a2: length of first string
- // a3: length of second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ Addu(t2, a2, Operand(a3));
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return a string here.
- __ Branch(&longer_than_two, ne, t2, Operand(2));
-
- // Check that both strings are non-external ASCII strings.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
- __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
+ Label call_builtin, attempt_to_grow_elements, with_write_barrier;
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in a2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode).
- __ li(t2, Operand(2));
- __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
+ Register elements = t2;
+ Register end_elements = t1;
+ // Get the elements array of the object.
+ __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ASCII the result is an ASCII cons string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ scratch,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
}
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
- __ And(t4, t0, Operand(t1));
- __ And(t4, t4, Operand(kStringEncodingMask));
- __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- Label skip_write_barrier, after_writing;
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(masm->isolate());
- __ li(t0, Operand(high_promotion_mode));
- __ lw(t0, MemOperand(t0, 0));
- __ Branch(&skip_write_barrier, eq, t0, Operand(zero_reg));
-
- __ mov(t3, v0);
- __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
- __ RecordWriteField(t3,
- ConsString::kFirstOffset,
- a0,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
- __ RecordWriteField(t3,
- ConsString::kSecondOffset,
- a1,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ jmp(&after_writing);
- __ bind(&skip_write_barrier);
- __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
- __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
+ // Get the array's length into scratch and calculate new length.
+ __ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
- __ bind(&after_writing);
+ // Get the elements' length.
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
+ const int kEndElementsOffset =
+ FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only one byte characters.
- // t0: first instance type.
- // t1: second instance type.
- // Branch to if _both_ instances have kOneByteDataHintMask set.
- __ And(at, t0, Operand(kOneByteDataHintMask));
- __ and_(at, at, t1);
- __ Branch(&ascii_data, ne, at, Operand(zero_reg));
- __ Xor(t0, t0, Operand(t1));
- STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
- __ And(t0, t0, Operand(kOneByteStringTag | kOneByteDataHintTag));
- __ Branch(&ascii_data, eq, t0,
- Operand(kOneByteStringTag | kOneByteDataHintTag));
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
- __ Branch(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // a0: first string
- // a1: second string
- // a2: length of first string
- // a3: length of second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t2: sum of lengths.
- Label first_prepared, second_prepared;
- __ bind(&string_add_flat_result);
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- // Check whether both strings have same encoding
- __ Xor(t3, t0, Operand(t1));
- __ And(t3, t3, Operand(kStringEncodingMask));
- __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ // Check if we could survive without allocation.
+ __ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0));
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t4, t0, Operand(kStringRepresentationMask));
-
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- Label skip_first_add;
- __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
- __ Branch(USE_DELAY_SLOT, &first_prepared);
- __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ bind(&skip_first_add);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(t4, t0, Operand(kShortExternalStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
- __ bind(&first_prepared);
+ // Check if value is a smi.
+ __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ JumpIfNotSmi(t0, &with_write_barrier);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t4, t1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- Label skip_second_add;
- __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
- __ Branch(USE_DELAY_SLOT, &second_prepared);
- __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ bind(&skip_second_add);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(t4, t1, Operand(kShortExternalStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // t3: first character of first string
- // a1: first character of second string
- // a2: length of first string
- // a3: length of second string
- // t2: sum of lengths.
- // Both strings have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ And(t4, t1, Operand(kStringEncodingMask));
- __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
+ // Store the value.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(end_elements, elements, end_elements);
+ __ Addu(end_elements, end_elements, kEndElementsOffset);
+ __ sw(t0, MemOperand(end_elements));
+ } else {
+ // Check if we could survive without allocation.
+ __ Branch(&call_builtin, gt, scratch, Operand(t0));
- __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // v0: result string.
- // t3: first character of first string.
- // a1: first character of second string
- // a2: length of first string.
- // a3: length of second string.
- // t2: first character of result.
-
- StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
- // t2: next character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
+ __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ StoreNumberToDoubleElements(t0, scratch, elements, a3, t1, a2,
+ &call_builtin, argc * kDoubleSize);
+ }
- __ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // v0: result string.
- // t3: first character of first string.
- // a1: first character of second string.
- // a2: length of first string.
- // a3: length of second string.
- // t2: first character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
- // t2: next character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
+ // Save new length.
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ mov(v0, scratch);
+ __ DropAndRet(argc + 1);
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ if (IsFastDoubleElementsKind(elements_kind())) {
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ __ bind(&with_write_barrier);
+
+ if (IsFastSmiElementsKind(elements_kind())) {
+ if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
+
+ __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&call_builtin, eq, t3, Operand(at));
+
+ ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
+ ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ __ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
+ __ lw(a3, ContextOperand(a3, Context::JS_ARRAY_MAPS_INDEX));
+ const int header_size = FixedArrayBase::kHeaderSize;
+ // Verify that the object can be transitioned in place.
+ const int origin_offset = header_size + elements_kind() * kPointerSize;
+ __ lw(a2, FieldMemOperand(receiver, origin_offset));
+ __ lw(at, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ Branch(&call_builtin, ne, a2, Operand(at));
+
+
+ const int target_offset = header_size + target_kind * kPointerSize;
+ __ lw(a3, FieldMemOperand(a3, target_offset));
+ __ mov(a2, receiver);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, DONT_TRACK_ALLOCATION_SITE, NULL);
+ }
- if (call_builtin.is_linked()) {
+ // Save new length.
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Store the value.
+ // We may need a register containing the address end_elements below, so write
+ // back the value in end_elements.
+ __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(end_elements, elements, end_elements);
+ __ Addu(end_elements, end_elements, kEndElementsOffset);
+ __ sw(t0, MemOperand(end_elements));
+
+ __ RecordWrite(elements,
+ end_elements,
+ t0,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ mov(v0, scratch);
+ __ DropAndRet(argc + 1);
+
+ __ bind(&attempt_to_grow_elements);
+ // scratch: array's length + 1.
+
+ if (!FLAG_inline_new) {
__ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
}
-}
+ __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
+ // Growing elements that are SMI-only requires special handling in case the
+ // new element is non-Smi. For now, delegate to the builtin.
+ if (IsFastSmiElementsKind(elements_kind())) {
+ __ JumpIfNotSmi(a2, &call_builtin);
+ }
-void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(a0);
- __ push(a1);
-}
+ // We could be lucky and the elements array could be at the top of new-space.
+ // In this case we can just grow it in place by moving the allocation pointer
+ // up.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate);
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate);
+
+ const int kAllocationDelta = 4;
+ ASSERT(kAllocationDelta >= argc);
+ // Load top and check if it is the end of elements.
+ __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(end_elements, elements, end_elements);
+ __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
+ __ li(t0, Operand(new_space_allocation_top));
+ __ lw(a3, MemOperand(t0));
+ __ Branch(&call_builtin, ne, a3, Operand(end_elements));
+
+ __ li(t3, Operand(new_space_allocation_limit));
+ __ lw(t3, MemOperand(t3));
+ __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
+ __ Branch(&call_builtin, hi, a3, Operand(t3));
+
+ // We fit and could grow elements.
+ // Update new_space_allocation_top.
+ __ sw(a3, MemOperand(t0));
+ // Push the argument.
+ __ sw(a2, MemOperand(end_elements));
+ // Fill the rest with holes.
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < kAllocationDelta; i++) {
+ __ sw(a3, MemOperand(end_elements, i * kPointerSize));
+ }
+
+ // Update elements' and array's sizes.
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
+ __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ // Elements are in new space, so write barrier is not required.
+ __ mov(v0, scratch);
+ __ DropAndRet(argc + 1);
-void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
- __ pop(a1);
- __ pop(a0);
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
}
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ GetObjectType(arg, scratch1, scratch1);
- __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
-
- // Check the number to string cache.
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
- __ mov(arg, scratch1);
- __ sw(arg, MemOperand(sp, stack_offset));
- __ bind(&done);
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : left
+ // -- a0 : right
+ // -- ra : return address
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ // Load a2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ li(a2, handle(isolate->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ And(at, a2, Operand(kSmiTagMask));
+ __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
+ __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(state_);
+ __ TailCallStub(&stub);
}
@@ -5156,8 +4658,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
- __ push(ra);
- __ Push(a1, a0);
+ __ Push(ra, a1, a0);
__ li(t0, Operand(Smi::FromInt(op_)));
__ addiu(sp, sp, -kPointerSize);
__ CallExternalReference(miss, 3, USE_DELAY_SLOT);
@@ -5459,89 +4960,13 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
- { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
- { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
- { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
- { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
- { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
- // StoreArrayLiteralElementStub::Generate
- { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
- // FastNewClosureStub::Generate
- { REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
- // StringAddStub::Generate
- { REG(t3), REG(a1), REG(t0), EMIT_REMEMBERED_SET },
- { REG(t3), REG(a0), REG(t0), EMIT_REMEMBERED_SET },
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-
-bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
// Hydrogen code stubs need stub2 at snapshot time.
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
+ stub2.GetCode(isolate);
}
@@ -5833,7 +5258,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- AllowStubCallsScope allow_stub_calls(masm, true);
ProfileEntryHookStub stub;
__ push(ra);
__ CallStub(&stub);
@@ -5873,11 +5297,11 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
ASSERT(IsPowerOf2(frame_alignment));
__ And(sp, sp, Operand(-frame_alignment));
}
-
+ __ Subu(sp, sp, kCArgsSlotsSize);
#if defined(V8_HOST_ARCH_MIPS)
int32_t entry_hook =
reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
- __ li(at, Operand(entry_hook));
+ __ li(t9, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
@@ -5885,15 +5309,18 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ li(at, Operand(ExternalReference(&dispatcher,
+ __ li(t9, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
masm->isolate())));
#endif
- __ Call(at);
+ // Call C function through t9 to conform ABI for PIC.
+ __ Call(t9);
// Restore the stack pointer if needed.
if (frame_alignment > kPointerSize) {
__ mov(sp, s5);
+ } else {
+ __ Addu(sp, sp, kCArgsSlotsSize);
}
// Also pop ra to get Ret(0).
@@ -5906,20 +5333,15 @@ template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(),
- CONTEXT_CHECK_REQUIRED,
- mode);
+ T stub(GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
- Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Branch(&next, ne, a3, Operand(kind));
T stub(kind);
- __ TailCallStub(&stub);
- __ bind(&next);
+ __ TailCallStub(&stub, eq, a3, Operand(kind));
}
// If we reached this point there is a problem.
@@ -5932,7 +5354,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- // a2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// a0 - number of arguments
// a1 - constructor?
@@ -5960,44 +5382,40 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind holey_initial = GetHoleyElementsKind(initial);
ArraySingleArgumentConstructorStub stub_holey(holey_initial,
- CONTEXT_CHECK_REQUIRED,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
ArraySingleArgumentConstructorStub stub(initial,
- CONTEXT_CHECK_REQUIRED,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ Addu(a3, a3, Operand(1));
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
if (FLAG_debug_code) {
- __ lw(t1, FieldMemOperand(t1, 0));
+ __ lw(t1, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSiteInCell, t1, Operand(at));
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+ __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
}
- // Save the resulting elements kind in type info
- __ SmiTag(a3);
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
- __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
- __ SmiUntag(a3);
+ // Save the resulting elements kind in type info. We can't just store a3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
+ __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
- Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Branch(&next, ne, a3, Operand(kind));
ArraySingleArgumentConstructorStub stub(kind);
- __ TailCallStub(&stub);
- __ bind(&next);
+ __ TailCallStub(&stub, eq, a3, Operand(kind));
}
// If we reached this point there is a problem.
@@ -6010,20 +5428,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- ElementsKind initial_kind = GetInitialFastElementsKind();
- ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
-
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
- (!FLAG_track_allocation_sites &&
- (kind == initial_kind || kind == initial_holey_kind))) {
- T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode(isolate);
}
}
}
@@ -6045,11 +5458,11 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things.
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ stubh1.GetCode(isolate);
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ stubh2.GetCode(isolate);
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ stubh3.GetCode(isolate);
}
}
@@ -6085,7 +5498,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc (only if argument_count_ == ANY)
// -- a1 : constructor
- // -- a2 : type info cell
+ // -- a2 : feedback vector (fixed array or undefined)
+ // -- a3 : slot index (if a2 is fixed array)
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
@@ -6094,23 +5508,27 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ And(at, a3, Operand(kSmiTagMask));
+ __ SmiTst(t0, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
- __ GetObjectType(a3, a3, t0);
+ __ GetObjectType(t0, t0, t1);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- t0, Operand(MAP_TYPE));
+ t1, Operand(MAP_TYPE));
- // We should either have undefined in a2 or a valid cell.
+ // We should either have undefined in a2 or a valid fixed array.
Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
+ Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&okay_here, eq, a2, Operand(at));
- __ lw(a3, FieldMemOperand(a2, 0));
- __ Assert(eq, kExpectedPropertyCellInRegisterA2,
- a3, Operand(cell_map));
+ __ lw(t0, FieldMemOperand(a2, 0));
+ __ Assert(eq, kExpectedFixedArrayInRegisterA2,
+ t0, Operand(fixed_array_map));
+
+ // a3 should be a smi if we don't have undefined in a2
+ __ AssertSmi(a3);
+
__ bind(&okay_here);
}
@@ -6118,16 +5536,20 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
- __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a2, a2, Operand(t0));
+ __ lw(a2, FieldMemOperand(a2, FixedArray::kHeaderSize));
- // If the type cell is undefined, or contains anything other than an
+ // If the feedback vector is undefined, or contains anything other than an
// AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ lw(t0, FieldMemOperand(a3, 0));
+ __ lw(t0, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&no_info, ne, t0, Operand(at));
- __ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset));
+ __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
@@ -6137,34 +5559,25 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
void InternalArrayConstructorStub::GenerateCase(
MacroAssembler* masm, ElementsKind kind) {
- Label not_zero_case, not_one_case;
- Label normal_sequence;
- __ Branch(&not_zero_case, ne, a0, Operand(zero_reg));
InternalArrayNoArgumentConstructorStub stub0(kind);
- __ TailCallStub(&stub0);
+ __ TailCallStub(&stub0, lo, a0, Operand(1));
- __ bind(&not_zero_case);
- __ Branch(&not_one_case, gt, a0, Operand(1));
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN, hi, a0, Operand(1));
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument.
__ lw(at, MemOperand(sp, 0));
- __ Branch(&normal_sequence, eq, at, Operand(zero_reg));
InternalArraySingleArgumentConstructorStub
stub1_holey(GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey);
+ __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
}
- __ bind(&normal_sequence);
InternalArraySingleArgumentConstructorStub stub1(kind);
__ TailCallStub(&stub1);
-
- __ bind(&not_one_case);
- InternalArrayNArgumentsConstructorStub stubN(kind);
- __ TailCallStub(&stubN);
}
@@ -6183,7 +5596,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ And(at, a3, Operand(kSmiTagMask));
+ __ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
@@ -6218,6 +5631,149 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : callee
+ // -- t0 : call_data
+ // -- a2 : holder
+ // -- a1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1)* 4] : first argument
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ Register callee = a0;
+ Register call_data = t0;
+ Register holder = a2;
+ Register api_function_address = a1;
+ Register context = cp;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ Isolate* isolate = masm->isolate();
+
+ // Save context, callee and call data.
+ __ Push(context, callee, call_data);
+ // Load context from callee.
+ __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // Push return value and default return value.
+ __ Push(scratch, scratch);
+ __ li(scratch,
+ Operand(ExternalReference::isolate_address(isolate)));
+ // Push isolate and holder.
+ __ Push(scratch, holder);
+
+ // Prepare arguments.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ ASSERT(!api_function_address.is(a0) && !scratch.is(a0));
+ // a0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ Addu(a0, sp, Operand(1 * kPointerSize));
+ // FunctionCallbackInfo::implicit_args_
+ __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ sw(at, MemOperand(a0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ li(at, Operand(argc));
+ __ sw(at, MemOperand(a0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument.
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- a2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = a2;
+
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // a1 (internal::Object** args_) as the data.
+ __ sw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference::Type thunk_type =
+ ExternalReference::PROFILING_GETTER_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 10531a8002..8d65d5b055 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -38,30 +38,6 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) { }
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
- void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
@@ -69,7 +45,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -83,18 +58,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
// Generate code for copying a large number of characters. This function
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
@@ -111,23 +74,6 @@ class StringHelper : public AllStatic {
int flags);
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
@@ -145,44 +91,45 @@ class StringHelper : public AllStatic {
};
-class StringAddStub: public PlatformCodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
+ SubStringStub() {}
private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
+};
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow);
+class StoreRegistersStateStub: public PlatformCodeStub {
+ public:
+ explicit StoreRegistersStateStub(SaveFPRegsMode with_fp)
+ : save_doubles_(with_fp) {}
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateRegisterArgsPop(MacroAssembler* masm);
+ static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+ Major MajorKey() { return StoreRegistersState; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
- const StringAddFlags flags_;
+ void Generate(MacroAssembler* masm);
};
-
-class SubStringStub: public PlatformCodeStub {
+class RestoreRegistersStateStub: public PlatformCodeStub {
public:
- SubStringStub() {}
+ explicit RestoreRegistersStateStub(SaveFPRegsMode with_fp)
+ : save_doubles_(with_fp) {}
+ static void GenerateAheadOfTime(Isolate* isolate);
private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
+ Major MajorKey() { return RestoreRegistersState; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
void Generate(MacroAssembler* masm);
};
-
class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
@@ -240,7 +187,6 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
ASSERT(SignRegisterBits::is_valid(sign_.code()));
}
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
@@ -291,8 +237,6 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 9d4b52c9da..1b79433d37 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -37,18 +37,6 @@ namespace v8 {
namespace internal {
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- return NULL;
-}
-
-
#define __ masm.
@@ -62,10 +50,10 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &exp;
+ if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
+ if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -113,13 +101,457 @@ UnaryMathFunction CreateExpFunction() {
}
-#undef __
+#if defined(V8_HOST_ARCH_MIPS)
+OS::MemCopyUint8Function CreateMemCopyUint8Function(
+ OS::MemCopyUint8Function stub) {
+#if defined(USE_SIMULATOR)
+ return stub;
+#else
+ if (Serializer::enabled()) {
+ return stub;
+ }
+
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(3 * KB, &actual_size, true));
+ if (buffer == NULL) return stub;
+
+ // This code assumes that cache lines are 32 bytes and if the cache line is
+ // larger it will not work correctly.
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ {
+ Label lastb, unaligned, aligned, chkw,
+ loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
+ leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
+ ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
+
+ // The size of each prefetch.
+ uint32_t pref_chunk = 32;
+ // The maximum size of a prefetch, it must not be less then pref_chunk.
+ // If the real size of a prefetch is greater then max_pref_size and
+ // the kPrefHintPrepareForStore hint is used, the code will not work
+ // correctly.
+ uint32_t max_pref_size = 128;
+ ASSERT(pref_chunk < max_pref_size);
+
+ // pref_limit is set based on the fact that we never use an offset
+ // greater then 5 on a store pref and that a single pref can
+ // never be larger then max_pref_size.
+ uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
+ int32_t pref_hint_load = kPrefHintLoadStreamed;
+ int32_t pref_hint_store = kPrefHintPrepareForStore;
+ uint32_t loadstore_chunk = 4;
+
+ // The initial prefetches may fetch bytes that are before the buffer being
+ // copied. Start copies with an offset of 4 so avoid this situation when
+ // using kPrefHintPrepareForStore.
+ ASSERT(pref_hint_store != kPrefHintPrepareForStore ||
+ pref_chunk * 4 >= max_pref_size);
+
+ // If the size is less than 8, go to lastb. Regardless of size,
+ // copy dst pointer to v0 for the retuen value.
+ __ slti(t2, a2, 2 * loadstore_chunk);
+ __ bne(t2, zero_reg, &lastb);
+ __ mov(v0, a0); // In delay slot.
+
+ // If src and dst have different alignments, go to unaligned, if they
+ // have the same alignment (but are not actually aligned) do a partial
+ // load/store to make them aligned. If they are both already aligned
+ // we can start copying at aligned.
+ __ xor_(t8, a1, a0);
+ __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
+ __ bne(t8, zero_reg, &unaligned);
+ __ subu(a3, zero_reg, a0); // In delay slot.
+
+ __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
+ __ beq(a3, zero_reg, &aligned); // Already aligned.
+ __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
+
+ __ lwr(t8, MemOperand(a1));
+ __ addu(a1, a1, a3);
+ __ swr(t8, MemOperand(a0));
+ __ addu(a0, a0, a3);
+
+ // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
+ // count how many bytes we have to copy after all the 64 byte chunks are
+ // copied and a3 to the dst pointer after all the 64 byte chunks have been
+ // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
+ __ bind(&aligned);
+ __ andi(t8, a2, 0x3f);
+ __ beq(a2, t8, &chkw); // Less than 64?
+ __ subu(a3, a2, t8); // In delay slot.
+ __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
+
+ // When in the loop we prefetch with kPrefHintPrepareForStore hint,
+ // in this case the a0+x should be past the "t0-32" address. This means:
+ // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
+ // x=64 the last "safe" a0 address is "t0-96". In the current version we
+ // will use "pref hint, 128(a0)", so "t0-160" is the limit.
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ addu(t0, a0, a2); // t0 is the "past the end" address.
+ __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address.
+ }
+
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+
+ if (pref_hint_store != kPrefHintPrepareForStore) {
+ __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+ }
+ __ bind(&loop16w);
+ __ lw(t0, MemOperand(a1));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
+ __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&skip_pref);
+ __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+
+ __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
+ __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
+ __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
+ __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
+ __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
+ __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
+ __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+
+ __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
+ __ addiu(a0, a0, 16 * loadstore_chunk);
+ __ bne(a0, a3, &loop16w);
+ __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
+ __ mov(a2, t8);
+
+ // Here we have src and dest word-aligned but less than 64-bytes to go.
+ // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
+ // down to chk1w to handle the tail end of the copy.
+ __ bind(&chkw);
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ andi(t8, a2, 0x1f);
+ __ beq(a2, t8, &chk1w); // Less than 32?
+ __ nop(); // In delay slot.
+ __ lw(t0, MemOperand(a1));
+ __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ addiu(a1, a1, 8 * loadstore_chunk);
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+ __ addiu(a0, a0, 8 * loadstore_chunk);
+
+ // Here we have less than 32 bytes to copy. Set up for a loop to copy
+ // one word at a time. Set a2 to count how many bytes we have to copy
+ // after all the word chunks are copied and a3 to the dst pointer after
+ // all the word chunks have been copied. We will loop, incrementing a0
+ // and a1 untill a0 equals a3.
+ __ bind(&chk1w);
+ __ andi(a2, t8, loadstore_chunk - 1);
+ __ beq(a2, t8, &lastb);
+ __ subu(a3, t8, a2); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ __ bind(&wordCopy_loop);
+ __ lw(t3, MemOperand(a1));
+ __ addiu(a0, a0, loadstore_chunk);
+ __ addiu(a1, a1, loadstore_chunk);
+ __ bne(a0, a3, &wordCopy_loop);
+ __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+
+ __ bind(&lastb);
+ __ Branch(&leave, le, a2, Operand(zero_reg));
+ __ addu(a3, a0, a2);
+
+ __ bind(&lastbloop);
+ __ lb(v1, MemOperand(a1));
+ __ addiu(a0, a0, 1);
+ __ addiu(a1, a1, 1);
+ __ bne(a0, a3, &lastbloop);
+ __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+
+ __ bind(&leave);
+ __ jr(ra);
+ __ nop();
+
+ // Unaligned case. Only the dst gets aligned so we need to do partial
+ // loads of the source followed by normal stores to the dst (once we
+ // have aligned the destination).
+ __ bind(&unaligned);
+ __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
+ __ beq(a3, zero_reg, &ua_chk16w);
+ __ subu(a2, a2, a3); // In delay slot.
+
+ __ lwr(v1, MemOperand(a1));
+ __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addu(a1, a1, a3);
+ __ swr(v1, MemOperand(a0));
+ __ addu(a0, a0, a3);
+
+ // Now the dst (but not the source) is aligned. Set a2 to count how many
+ // bytes we have to copy after all the 64 byte chunks are copied and a3 to
+ // the dst pointer after all the 64 byte chunks have been copied. We will
+ // loop, incrementing a0 and a1 until a0 equals a3.
+ __ bind(&ua_chk16w);
+ __ andi(t8, a2, 0x3f);
+ __ beq(a2, t8, &ua_chkw);
+ __ subu(a3, a2, t8); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ addu(t0, a0, a2);
+ __ Subu(t9, t0, pref_limit);
+ }
+
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+
+ if (pref_hint_store != kPrefHintPrepareForStore) {
+ __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+ }
+
+ __ bind(&ua_loop16w);
+ __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+ __ lwr(t0, MemOperand(a1));
+ __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0);
+ __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&ua_skip_pref);
+ __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwl(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+ __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
+ __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
+ __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
+ __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
+ __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
+ __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
+ __ lwl(t0,
+ MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t4,
+ MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t5,
+ MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t6,
+ MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t7,
+ MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+ __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+ __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
+ __ addiu(a0, a0, 16 * loadstore_chunk);
+ __ bne(a0, a3, &ua_loop16w);
+ __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
+ __ mov(a2, t8);
+
+ // Here less than 64-bytes. Check for
+ // a 32 byte chunk and copy if there is one. Otherwise jump down to
+ // ua_chk1w to handle the tail end of the copy.
+ __ bind(&ua_chkw);
+ __ Pref(pref_hint_load, MemOperand(a1));
+ __ andi(t8, a2, 0x1f);
+
+ __ beq(a2, t8, &ua_chk1w);
+ __ nop(); // In delay slot.
+ __ lwr(t0, MemOperand(a1));
+ __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwl(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addiu(a1, a1, 8 * loadstore_chunk);
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+ __ addiu(a0, a0, 8 * loadstore_chunk);
+
+ // Less than 32 bytes to copy. Set up for a loop to
+ // copy one word at a time.
+ __ bind(&ua_chk1w);
+ __ andi(a2, t8, loadstore_chunk - 1);
+ __ beq(a2, t8, &ua_smallCopy);
+ __ subu(a3, t8, a2); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ __ bind(&ua_wordCopy_loop);
+ __ lwr(v1, MemOperand(a1));
+ __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addiu(a0, a0, loadstore_chunk);
+ __ addiu(a1, a1, loadstore_chunk);
+ __ bne(a0, a3, &ua_wordCopy_loop);
+ __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+
+ // Copy the last 8 bytes.
+ __ bind(&ua_smallCopy);
+ __ beq(a2, zero_reg, &leave);
+ __ addu(a3, a0, a2); // In delay slot.
+
+ __ bind(&ua_smallCopy_loop);
+ __ lb(v1, MemOperand(a1));
+ __ addiu(a0, a0, 1);
+ __ addiu(a1, a1, 1);
+ __ bne(a0, a3, &ua_smallCopy_loop);
+ __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+
+ __ jr(ra);
+ __ nop();
+ }
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
+#endif
+}
+#endif
UnaryMathFunction CreateSqrtFunction() {
- return &sqrt;
+#if defined(USE_SIMULATOR)
+ return &std::sqrt;
+#else
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::sqrt;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ __ MovFromFloatParameter(f12);
+ __ sqrt_d(f0, f12);
+ __ MovToFloatResult(f0);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#endif
}
+#undef __
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@@ -591,11 +1023,11 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ Move(double_scratch1, temp3, temp1);
}
__ mul_d(result, result, double_scratch1);
- __ Branch(&done);
+ __ BranchShort(&done);
__ bind(&zero);
__ Move(result, kDoubleRegZero);
- __ Branch(&done);
+ __ BranchShort(&done);
__ bind(&infinity);
__ ldc1(result, ExpConstant(2, temp3));
@@ -603,9 +1035,10 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ bind(&done);
}
-
+#ifdef DEBUG
// nop(CODE_AGE_MARKER_NOP)
static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
+#endif
static byte* GetNoCodeAgeSequence(uint32_t* length) {
// The sequence of instructions that is patched out for aging code is the
@@ -615,10 +1048,15 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
byte* byte_sequence = reinterpret_cast<byte*>(sequence);
*length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
if (!initialized) {
- CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->Push(ra, fp, cp, a1);
- patcher.masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
+ // Since patcher is a large object, allocate it dynamically when needed,
+ // to avoid overloading the stack in stress conditions.
+ SmartPointer<CodePatcher>
+ patcher(new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength));
+ PredictableCodeSizeScope scope(patcher->masm(), *length);
+ patcher->masm()->Push(ra, fp, cp, a1);
+ patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ patcher->masm()->Addu(
+ fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
initialized = true;
}
return byte_sequence;
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 822b94ad79..efbcb3ce33 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -36,50 +36,9 @@
namespace v8 {
namespace internal {
-// Forward declarations
-class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- explicit CodeGenerator(Isolate* isolate) {
- InitializeAstVisitor(isolate);
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info, const char* kind);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
-
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
class StringCharLoadGenerator : public AllStatic {
public:
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 5a0870fd21..dcf8b82db0 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -124,6 +124,16 @@ const uint32_t kFCSRFlagMask =
const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+// 'pref' instruction hints
+const int32_t kPrefHintLoad = 0;
+const int32_t kPrefHintStore = 1;
+const int32_t kPrefHintLoadStreamed = 4;
+const int32_t kPrefHintStoreStreamed = 5;
+const int32_t kPrefHintLoadRetained = 6;
+const int32_t kPrefHintStoreRetained = 7;
+const int32_t kPrefHintWritebackInvalidate = 25;
+const int32_t kPrefHintPrepareForStore = 30;
+
// Helper functions for converting between register numbers and names.
class Registers {
public:
@@ -297,6 +307,8 @@ enum Opcode {
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
+ PREF = ((6 << 3) + 3) << kOpcodeShift,
+
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
SDC1 = ((7 << 3) + 5) << kOpcodeShift,
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 1535231dd8..b9bf69db42 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -274,9 +274,10 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a1 : function
- // -- a2 : cache cell for call target
+ // -- a2 : feedback array
+ // -- a3 : slot in feedback array
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), 0);
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), 0);
}
@@ -295,9 +296,10 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments (not smi)
// -- a1 : constructor function
- // -- a2 : cache cell for call target
+ // -- a2 : feedback array
+ // -- a3 : feedback slot (smi)
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), a0.bit());
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), a0.bit());
}
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 971ead6c23..6bd9ba7b7f 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -104,7 +104,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->environment_length();
+ int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(s0.code(), params);
output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
output_frame->SetRegister(s2.code(), handler);
@@ -239,13 +239,13 @@ void Deoptimizer::EntryGenerator::Generate() {
__ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
- __ Branch(&pop_loop_header);
+ __ BranchShort(&pop_loop_header);
__ bind(&pop_loop);
__ pop(t0);
__ sw(t0, MemOperand(a3, 0));
__ addiu(a3, a3, sizeof(uint32_t));
__ bind(&pop_loop_header);
- __ Branch(&pop_loop, ne, a2, Operand(sp));
+ __ BranchShort(&pop_loop, ne, a2, Operand(sp));
// Compute the output frame in the deoptimizer.
__ push(a0); // Preserve deoptimizer object across call.
@@ -280,11 +280,11 @@ void Deoptimizer::EntryGenerator::Generate() {
__ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
__ push(t3);
__ bind(&inner_loop_header);
- __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
__ Addu(t0, t0, Operand(kPointerSize));
__ bind(&outer_loop_header);
- __ Branch(&outer_push_loop, lt, t0, Operand(a1));
+ __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
__ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 691df940f2..1ae0340351 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -899,6 +899,9 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case LWR:
Format(instr, "lwr 'rt, 'imm16s('rs)");
break;
+ case PREF:
+ Format(instr, "pref 'rt, 'imm16s('rs)");
+ break;
case SB:
Format(instr, "sb 'rt, 'imm16s('rs)");
break;
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index 1bd511654a..20f0712666 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
#include "v8.h"
#if V8_TARGET_ARCH_MIPS
@@ -41,10 +40,24 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 437bf3a9f1..d9c0c798a3 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -154,18 +154,16 @@ const int kSafepointRegisterStackIndexMap[kNumRegs] = {
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = -3 * kPointerSize;
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
public:
- // See some explanation in MacroAssembler::EnterExitFrame.
- // This marks the top of the extra allocated stack space.
- static const int kStackSpaceOffset = -3 * kPointerSize;
+ static const int kFrameSize = 2 * kPointerSize;
static const int kCodeOffset = -2 * kPointerSize;
-
static const int kSPOffset = -1 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index cbd0788121..41bc68e6d0 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -84,7 +84,7 @@ class JumpPatchSite BASE_EMBEDDED {
__ bind(&patch_site_);
__ andi(at, reg, 0);
// Always taken before patched.
- __ Branch(target, eq, at, Operand(zero_reg));
+ __ BranchShort(target, eq, at, Operand(zero_reg));
}
// When initially emitting this ensure that a jump is never generated to skip
@@ -95,7 +95,7 @@ class JumpPatchSite BASE_EMBEDDED {
__ bind(&patch_site_);
__ andi(at, reg, 0);
// Never taken before patched.
- __ Branch(target, ne, at, Operand(zero_reg));
+ __ BranchShort(target, ne, at, Operand(zero_reg));
}
void EmitPatchInfo() {
@@ -138,6 +138,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -152,16 +155,21 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). t1 is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
+ // Classic mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->is_classic_mode() && !info->is_native()) {
Label ok;
- __ Branch(&ok, eq, t1, Operand(zero_reg));
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+ __ lw(at, MemOperand(sp, receiver_offset));
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&ok, ne, a2, Operand(at));
+
+ __ lw(a2, GlobalObjectOperand());
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+
__ sw(a2, MemOperand(sp, receiver_offset));
+
__ bind(&ok);
}
@@ -179,9 +187,22 @@ void FullCodeGenerator::Generate() {
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(at);
+ // Emit a loop to initialize stack cells for locals when optimizing for
+ // size. Otherwise, unroll the loop for maximum performance.
+ __ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
+ if ((FLAG_optimize_for_size && locals_count > 4) ||
+ !is_int16(locals_count)) {
+ Label loop;
+ __ Subu(a2, sp, Operand(locals_count * kPointerSize));
+ __ bind(&loop);
+ __ Subu(sp, sp, Operand(kPointerSize));
+ __ Branch(&loop, gt, sp, Operand(a2), USE_DELAY_SLOT);
+ __ sw(t5, MemOperand(sp, 0)); // Push in the delay slot.
+ } else {
+ __ Subu(sp, sp, Operand(locals_count * kPointerSize));
+ for (int i = 0; i < locals_count; i++) {
+ __ sw(t5, MemOperand(sp, i * kPointerSize));
+ }
}
}
}
@@ -193,20 +214,22 @@ void FullCodeGenerator::Generate() {
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in a1.
- __ push(a1);
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ push(a1);
__ Push(info->scope()->GetScopeInfo());
__ CallRuntime(Runtime::kNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
+ __ push(a1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
- // Context is returned in both v0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Context is returned in v0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ mov(cp, v0);
+ __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -329,10 +352,6 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
if (isolate()->IsDebuggerActive()) {
// Detect debug break requests as soon as possible.
reset_value = FLAG_interrupt_budget >> 4;
@@ -353,13 +372,10 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
__ slt(at, a3, zero_reg);
__ beq(at, zero_reg, &ok);
@@ -392,32 +408,24 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(v0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ Branch(&ok, ge, a3, Operand(zero_reg));
- __ push(v0);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ lw(a2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(a2);
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- }
- __ pop(v0);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ Branch(&ok, ge, a3, Operand(zero_reg));
+ __ push(v0);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ pop(v0);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
@@ -619,6 +627,7 @@ void FullCodeGenerator::StackValueContext::Plug(
Label done;
__ bind(materialize_true);
__ LoadRoot(at, Heap::kTrueValueRootIndex);
+ // Push the value as the following branch can clobber at in long branch mode.
__ push(at);
__ Branch(&done);
__ bind(materialize_false);
@@ -673,7 +682,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
__ mov(a0, result_register());
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
+ CallIC(ic, condition->test_id());
__ mov(at, zero_reg);
Split(ne, v0, Operand(at), if_true, if_false, fall_through);
}
@@ -1038,9 +1047,18 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
+ Label skip;
+ __ Branch(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&next_test, ne, v0, Operand(at));
+ __ Drop(1);
+ __ Branch(clause->body_target());
+ __ bind(&skip);
+
__ Branch(&next_test, ne, v0, Operand(zero_reg));
__ Drop(1); // Switch value is no longer needed.
__ Branch(clause->body_target());
@@ -1072,6 +1090,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1144,10 +1163,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ push(v0); // Map.
__ li(a0, Operand(Smi::FromInt(0)));
- // Push enumeration cache, enumeration cache length (as smi) and zero.
- __ Push(a2, a1, a0);
+ // Push map, enumeration cache, enumeration cache length (as smi) and zero.
+ __ Push(v0, a2, a1, a0);
__ jmp(&loop);
__ bind(&no_descriptors);
@@ -1158,13 +1176,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(a1, cell);
- __ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ sw(a2, FieldMemOperand(a1, Cell::kValueOffset));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ __ li(a1, FeedbackVector());
+ __ li(a2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
+ __ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot)));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1212,8 +1230,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
- __ push(a1); // Enumerable.
- __ push(a3); // Current entry.
+ __ Push(a1, a3); // Enumerable and current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ mov(a3, result_register());
__ Branch(loop_statement.continue_label(), eq, a3, Operand(zero_reg));
@@ -1392,11 +1409,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, mode);
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
}
@@ -1474,13 +1490,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in a2 and the global
// object (receiver) in a0.
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallLoadIC(CONTEXTUAL);
context()->Plug(v0);
break;
}
@@ -1488,9 +1503,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1555,12 +1569,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ li(a1, Operand(var->name()));
__ Push(cp, a1); // Context and name.
__ CallRuntime(Runtime::kLoadContextSlot, 2);
@@ -1605,9 +1619,8 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ jmp(&allocated);
__ bind(&runtime_allocate);
- __ push(t1);
__ li(a0, Operand(Smi::FromInt(size)));
- __ push(a0);
+ __ Push(t1, a0);
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ pop(t1);
@@ -1634,6 +1647,8 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
@@ -1691,10 +1706,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(a0, result_register());
__ li(a2, Operand(key->value()));
__ lw(a1, MemOperand(sp));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1767,6 +1779,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
@@ -1779,6 +1796,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
__ mov(a0, result_register());
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
@@ -1788,29 +1812,24 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
1, a1, a2);
- } else if (expr->depth() > 1) {
- __ Push(a3, a2, a1);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ Push(a3, a2, a1);
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ li(a0, Operand(Smi::FromInt(flags)));
+ __ Push(a3, a2, a1, a0);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
if (has_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
@@ -2048,10 +2067,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_catch);
__ mov(a0, v0);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
- __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
- __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ push(a3); // iter
- __ push(a0); // exception
+ __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
+ __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(a2, a3, a0); // "throw", iter, except
__ jmp(&l_call);
// try { received = %yield result }
@@ -2079,32 +2097,39 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
kRAHasBeenSaved, kDontSaveFPRegs);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ pop(v0); // result
+ __ pop(v0); // result
EmitReturnSequence();
__ mov(a0, v0);
- __ bind(&l_resume); // received in a0
+ __ bind(&l_resume); // received in a0
__ PopTryHandler();
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
- __ LoadRoot(a2, Heap::knext_stringRootIndex); // "next"
- __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ push(a3); // iter
- __ push(a0); // received
+ __ LoadRoot(a2, Heap::knext_stringRootIndex); // "next"
+ __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(a2, a3, a0); // "next", iter, received
// result = receiver[f](arg);
__ bind(&l_call);
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
- CallIC(ic);
+ __ lw(a1, MemOperand(sp, kPointerSize));
+ __ lw(a0, MemOperand(sp, 2 * kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ mov(a0, v0);
+ __ mov(a1, a0);
+ __ sw(a1, MemOperand(sp, 2 * kPointerSize));
+ CallFunctionStub stub(1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
// if (!result.done) goto l_try;
__ bind(&l_loop);
__ mov(a0, v0);
__ push(a0); // save result
__ LoadRoot(a2, Heap::kdone_stringRootIndex); // "done"
- Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(done_ic); // result.done in v0
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in v0
__ mov(a0, v0);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
@@ -2113,8 +2138,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result.value
__ pop(a0); // result
__ LoadRoot(a2, Heap::kvalue_stringRootIndex); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in v0
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in v0
context()->DropAndPlug(2, v0); // drop iter and g
break;
}
@@ -2126,18 +2150,20 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in a0, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. a1
- // will hold the generator object until the activation has been resumed.
+ // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // a1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
__ pop(a1);
// Check generator state.
- Label wrong_state, done;
+ Label wrong_state, closed_state, done;
__ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
- __ Branch(&wrong_state, le, a3, Operand(zero_reg));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ Branch(&closed_state, eq, a3, Operand(zero_reg));
+ __ Branch(&wrong_state, lt, a3, Operand(zero_reg));
// Load suspended function and context.
__ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
@@ -2166,11 +2192,13 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Call(&resume_frame);
__ jmp(&done);
__ bind(&resume_frame);
- __ push(ra); // Return address.
- __ push(fp); // Caller's frame pointer.
- __ mov(fp, sp);
- __ push(cp); // Callee's context.
- __ push(t0); // Callee's JS Function.
+ // ra = return address.
+ // fp = caller's frame pointer.
+ // cp = callee's context,
+ // t0 = callee's JS function.
+ __ Push(ra, fp, cp, t0);
+ // Adjust FP to point to saved FP.
+ __ Addu(fp, sp, 2 * kPointerSize);
// Load the operand stack size.
__ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
@@ -2201,13 +2229,28 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(a2);
__ Branch(&push_operand_holes);
__ bind(&call_resume);
- __ push(a1);
- __ push(result_register());
+ ASSERT(!result_register().is(a1));
+ __ Push(a1, result_register());
__ Push(Smi::FromInt(resume_mode));
__ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ push(a2);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrow, 1);
+ }
+ __ jmp(&done);
+
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(a1);
@@ -2260,8 +2303,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
__ li(a2, Operand(key->value()));
// Call load IC. It has arguments receiver and property name a0 and a2.
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
@@ -2270,7 +2312,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2297,25 +2339,21 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ BinaryOpICStub stub(op, mode);
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
__ bind(&smi_case);
// Smi case. This code works the same way as the smi-smi case in the type
// recording binary operation stub, see
- // BinaryOpStub::GenerateSmiSmiOperation for comments.
switch (op) {
case Token::SAR:
- __ Branch(&stub_call);
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ srav(right, left, scratch1);
__ And(v0, right, Operand(~kSmiTagMask));
break;
case Token::SHL: {
- __ Branch(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ sllv(scratch1, scratch1, scratch2);
@@ -2325,7 +2363,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::SHR: {
- __ Branch(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ srlv(scratch1, scratch1, scratch2);
@@ -2380,10 +2417,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
OverwriteMode mode) {
__ mov(a0, result_register());
__ pop(a1);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
@@ -2421,10 +2457,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(a1, result_register());
__ pop(a0); // Restore value.
__ li(a2, Operand(prop->key()->AsLiteral()->value()));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2432,8 +2465,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(a1, result_register());
- __ pop(a2);
- __ pop(a0); // Restore value.
+ __ Pop(a0, a2); // a0 = restored value.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
@@ -2445,6 +2477,28 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ sw(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ Move(a3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, LanguageMode mode) {
+ __ li(a1, Operand(name));
+ __ li(a0, Operand(Smi::FromInt(mode)));
+ __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
@@ -2452,42 +2506,30 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(a0, result_register());
__ li(a2, Operand(var->name()));
__ lw(a1, GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallStoreIC();
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ lw(a1, StackOperand(var));
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a1, Operand(t0));
- __ sw(result_register(), StackOperand(var));
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(v0);
+ if (var->IsLookupSlot()) {
__ li(a0, Operand(var->name()));
- __ Push(cp, a0); // Context and name.
+ __ Push(v0, cp, a0); // Context and name.
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, a1);
+ __ lw(a2, location);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&skip, ne, a2, Operand(at));
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(v0); // Value.
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, a1, a0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), language_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2500,20 +2542,16 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
- __ sw(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(a3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
} else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), language_mode());
+ } else {
+ ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, a1);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2521,24 +2559,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
}
- // Perform the assignment.
- __ sw(v0, location);
- if (var->IsContextSlot()) {
- __ mov(a3, v0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(v0); // Value.
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, a1, a0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
- // Non-initializing assignments to consts are ignored.
+ // Non-initializing assignments to consts are ignored.
}
@@ -2554,10 +2578,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ li(a2, Operand(prop->key()->AsLiteral()->value()));
__ pop(a1);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2575,13 +2596,12 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// - a1 is the key,
// - a2 is the receiver.
__ mov(a0, result_register());
- __ pop(a1); // Key.
- __ pop(a2);
+ __ Pop(a2, a1); // a1 = key.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2608,72 +2628,105 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
TypeFeedbackId id) {
ic_total_count_++;
- __ Call(code, rmode, id);
+ __ Call(code, RelocInfo::CODE_TARGET, id);
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithIC(Call* expr) {
+ Expression* callee = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
+
+ CallFunctionFlags flags;
+ // Get the target function.
+ if (callee->IsVariableProxy()) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a classic mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ flags = NO_CALL_FUNCTION_FLAGS;
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ lw(v0, MemOperand(sp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ lw(at, MemOperand(sp, 0));
+ __ push(at);
+ __ sw(v0, MemOperand(sp, kPointerSize));
+ flags = CALL_AS_METHOD;
+ }
+
+ // Load the arguments.
{ PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
- __ li(a2, Operand(name));
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
+ CallFunctionStub stub(arg_count, flags);
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
RecordJSReturnSite(expr);
+
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(v0);
+
+ context()->DropAndPlug(1, v0);
}
+// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(a1);
- __ push(v0);
- __ push(a1);
-
- // Code common for calls using the IC.
+ Expression* callee = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
+
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ lw(a1, MemOperand(sp, 0));
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ lw(at, MemOperand(sp, 0));
+ __ push(at);
+ __ sw(v0, MemOperand(sp, kPointerSize));
+
{ PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
}
+
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
+ CallFunctionStub stub(arg_count, CALL_AS_METHOD);
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, v0); // Drop the key still on the stack.
+
+ context()->DropAndPlug(1, v0);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2685,17 +2738,16 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Record call targets.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ li(a2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ li(a2, FeedbackVector());
+ __ li(a3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
- CallFunctionStub stub(arg_count, flags);
+ // Record call targets in unoptimized code.
+ CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2704,27 +2756,25 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
+ // t2: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+ __ lw(t2, MemOperand(sp, arg_count * kPointerSize));
} else {
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
}
- __ push(a1);
- // Push the receiver of the enclosing function.
+ // t1: the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
- __ lw(a1, MemOperand(fp, receiver_offset * kPointerSize));
- __ push(a1);
- // Push the language mode.
- __ li(a1, Operand(Smi::FromInt(language_mode())));
- __ push(a1);
+ __ lw(t1, MemOperand(fp, receiver_offset * kPointerSize));
+
+ // t0: the language mode.
+ __ li(t0, Operand(Smi::FromInt(language_mode())));
- // Push the start position of the scope the calls resides in.
+ // a1: the start position of the scope the calls resides in.
__ li(a1, Operand(Smi::FromInt(scope()->start_position())));
- __ push(a1);
// Do the runtime call.
+ __ Push(t2, t1, t0, a1);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
@@ -2738,10 +2788,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
+ Call::CallType call_type = expr->GetCallType(isolate());
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@@ -2772,20 +2821,18 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, v0);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ lw(a0, GlobalObjectOperand());
- __ push(a0);
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithIC(expr);
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2797,9 +2844,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in v0)
// and the object holding it (returned in v1).
- __ push(context_register());
+ ASSERT(!context_register().is(a2));
__ li(a2, Operand(proxy->name()));
- __ push(a2);
+ __ Push(context_register(), a2);
__ CallRuntime(Runtime::kLoadContextSlot, 2);
__ Push(v0, v1); // Function, receiver.
@@ -2814,37 +2861,34 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ push(v0);
// The receiver is implicitly the global receiver. Indicate this
// by passing the hole to the call function stub.
- __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ push(a1);
__ bind(&call);
}
// The receiver is either the global receiver or an object found
- // by LoadContextSlot. That object could be the hole if the
- // receiver is implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
+ // by LoadContextSlot.
+ EmitCallWithStub(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
}
if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->value(),
- RelocInfo::CODE_TARGET);
+ EmitCallWithIC(expr);
} else {
EmitKeyedCallWithIC(expr, property->key());
}
} else {
+ ASSERT(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
- // Load global receiver object.
- __ lw(a1, GlobalObjectOperand());
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ push(a1);
// Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ EmitCallWithStub(expr);
}
#ifdef DEBUG
@@ -2882,10 +2926,10 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ li(a2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ __ li(a2, FeedbackVector());
+ __ li(a3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -2908,7 +2952,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ And(t0, v0, Operand(kSmiTagMask));
+ __ SmiTst(v0, t0);
Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2929,7 +2973,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
+ __ NonNegativeSmiTst(v0, at);
Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -3006,8 +3050,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
- __ And(at, a1, Operand(1 << Map::kIsUndetectable));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ And(at, a1, Operand(1 << Map::kIsUndetectable));
Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -3125,6 +3169,36 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+ __ lw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ lw(a1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ li(t0, 0x80000000);
+ Label not_nan;
+ __ Branch(&not_nan, ne, a2, Operand(t0));
+ __ mov(t0, zero_reg);
+ __ mov(a2, a1);
+ __ bind(&not_nan);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a2, Operand(t0), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -3342,48 +3416,6 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- // Save the new heap number in callee-saved register s0, since
- // we call out to external C code below.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(s0, a1, a2, t6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
-
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(s0, v0); // Save result in s0, so it is saved thru CFunc call.
-
- __ bind(&heapnumber_allocated);
-
- // Convert 32 random bits in v0 to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ PrepareCallCFunction(1, a0);
- __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- __ li(a1, Operand(0x41300000));
- // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
- __ Move(f12, v0, a1);
- // Move 0x4130000000000000 to FPU.
- __ Move(f14, zero_reg, a1);
- // Subtract and store the result in the heap number.
- __ sub_d(f0, f12, f14);
- __ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
- __ mov(v0, s0);
-
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
@@ -3478,29 +3510,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask) {
- __ And(at, index, Operand(kSmiTagMask));
- __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
- __ And(at, value, Operand(kSmiTagMask));
- __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
-
- __ lw(at, FieldMemOperand(string, String::kLengthOffset));
- __ Check(lt, kIndexIsTooLarge, index, Operand(at));
-
- __ Check(ge, kIndexIsNegative, index, Operand(zero_reg));
-
- __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
-
- __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
- __ Subu(at, at, Operand(encoding_mask));
- __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
@@ -3511,13 +3520,20 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(value);
- __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(index, value);
if (FLAG_debug_code) {
+ __ SmiTst(value, at);
+ __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
+ __ SmiTst(index, at);
+ __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
+ __ SmiUntag(index, index);
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ Register scratch = t5;
+ __ EmitSeqStringSetCharCheck(
+ string, index, value, scratch, one_byte_seq_type);
+ __ SmiTag(index, index);
}
__ SmiUntag(value, value);
@@ -3541,13 +3557,20 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(value);
- __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(index, value);
if (FLAG_debug_code) {
+ __ SmiTst(value, at);
+ __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
+ __ SmiTst(index, at);
+ __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
+ __ SmiUntag(index, index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ Register scratch = t5;
+ __ EmitSeqStringSetCharCheck(
+ string, index, value, scratch, two_byte_seq_type);
+ __ SmiTag(index, index);
}
__ SmiUntag(value, value);
@@ -3735,9 +3758,11 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(1));
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
+ __ pop(a1);
+ __ mov(a0, result_register()); // StringAddStub requires args in a0, a1.
+ StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
__ CallStub(&stub);
context()->Plug(v0);
}
@@ -3756,54 +3781,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
+ // Load the argument on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
+ __ CallRuntime(Runtime::kMath_log, 1);
context()->Plug(v0);
}
@@ -3837,8 +3820,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
// InvokeFunction requires the function in a1. Move it in there.
__ mov(a1, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(a1, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(a1, count, CALL_FUNCTION, NullCallWrapper());
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
@@ -3857,7 +3839,10 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
+ VisitForAccumulatorValue(args->at(2));
+ __ mov(a0, result_register());
+ __ pop(a1);
+ __ pop(a2);
__ CallStub(&stub);
context()->Plug(v0);
}
@@ -3917,43 +3902,6 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = v0;
- Register left = a1;
- Register tmp = a2;
- Register tmp2 = a3;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1)); // Result (right) in v0.
- __ pop(left);
-
- Label done, fail, ok;
- __ Branch(&ok, eq, left, Operand(right));
- // Fail if either is a non-HeapObject.
- __ And(tmp, left, Operand(right));
- __ JumpIfSmi(tmp, &fail);
- __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
- __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ Branch(&fail, ne, tmp, Operand(tmp2));
- __ lw(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ lw(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ Branch(&ok, eq, tmp, Operand(tmp2));
- __ bind(&fail);
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&ok);
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ bind(&done);
-
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4237,34 +4185,48 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
+ // Push the builtins object as the receiver.
__ lw(a0, GlobalObjectOperand());
__ lw(a0, FieldMemOperand(a0, GlobalObject::kBuiltinsOffset));
__ push(a0);
- }
+ // Load the function from the receiver.
+ __ li(a2, Operand(expr->name()));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ // Push the target function under the receiver.
+ __ lw(at, MemOperand(sp, 0));
+ __ push(at);
+ __ sw(v0, MemOperand(sp, kPointerSize));
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
- if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- __ li(a2, Operand(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, v0);
} else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
+ context()->Plug(v0);
}
- context()->Plug(v0);
}
@@ -4303,9 +4265,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ push(context_register());
+ ASSERT(!context_register().is(a2));
__ li(a2, Operand(var->name()));
- __ push(a2);
+ __ Push(context_register(), a2);
__ CallRuntime(Runtime::kDeleteContextSlot, 2);
context()->Plug(v0);
}
@@ -4436,15 +4398,48 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ __ mov(a0, v0);
if (ShouldInlineSmiCase(expr->op())) {
- __ JumpIfSmi(v0, &no_conversion);
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(v0, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(v0);
+ break;
+ case NAMED_PROPERTY:
+ __ sw(v0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ sw(v0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ Register scratch1 = a1;
+ Register scratch2 = t0;
+ __ li(scratch1, Operand(Smi::FromInt(count_value)));
+ __ AdduAndCheckForOverflow(v0, v0, scratch1, scratch2);
+ __ BranchOnNoOverflow(&done, scratch2);
+ // Call stub. Undo operation first.
+ __ Move(v0, a0);
+ __ jmp(&stub_call);
+ __ bind(&slow);
}
- __ mov(a0, v0);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4465,33 +4460,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
}
- __ mov(a0, result_register());
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- __ li(a1, Operand(Smi::FromInt(count_value)));
- __ AdduAndCheckForOverflow(v0, a0, a1, t0);
- __ BranchOnOverflow(&stub_call, t0); // Do stub on overflow.
-
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(v0, &done);
- __ bind(&stub_call);
- }
- __ mov(a1, a0);
+ __ bind(&stub_call);
+ __ mov(a1, v0);
__ li(a0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
- BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
+ BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4521,10 +4499,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(a0, result_register()); // Value.
__ li(a2, Operand(prop->key()->AsLiteral()->value())); // Name.
__ pop(a1); // Receiver.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4537,12 +4512,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_PROPERTY: {
__ mov(a0, result_register()); // Value.
- __ pop(a1); // Key.
- __ pop(a2); // Receiver.
+ __ Pop(a2, a1); // a1 = key, a2 = receiver.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4562,16 +4536,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- CallIC(ic);
+ CallLoadIC(NOT_CONTEXTUAL);
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4728,7 +4702,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
@@ -4762,7 +4736,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index aa2773462c..4088ea4f4f 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -100,7 +100,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
}
-// Helper function used from LoadIC/CallIC GenerateNormal.
+// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
@@ -229,7 +229,8 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ And(at, scratch,
+ Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ Branch(slow, ne, at, Operand(zero_reg));
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@@ -338,315 +339,6 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- a1 : receiver
- // -- a2 : name
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, a1, a2, a3, t0, t1, t2);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(a1, &number, t1);
- __ GetObjectType(a1, a3, a3);
- __ Branch(&non_number, ne, a3, Operand(HEAP_NUMBER_TYPE));
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, a1);
- __ Branch(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ Branch(&non_string, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, a1);
- __ Branch(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- __ Branch(&boolean, eq, a1, Operand(t0));
- __ LoadRoot(t1, Heap::kFalseValueRootIndex);
- __ Branch(&miss, ne, a1, Operand(t1));
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, a1);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, a1, a2, a3, t0, t1, t2);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss,
- Register scratch) {
- // a1: function
-
- // Check that the value isn't a smi.
- __ JumpIfSmi(a1, miss);
-
- // Check that the value is a JSFunction.
- __ GetObjectType(a1, scratch, scratch);
- __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(a1, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack into a1.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- GenerateNameDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
-
- // a0: elements
- // Search the dictionary - put result in register a1.
- GenerateDictionaryLoad(masm, &miss, a0, a2, a1, a3, t0);
-
- GenerateFunctionTailCall(masm, argc, &miss, t0);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(isolate->counters()->call_miss(), 1, a3, t0);
- } else {
- __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, a3, t0);
- }
-
- // Get the receiver of the function from the stack.
- __ lw(a3, MemOperand(sp, argc*kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ Push(a3, a2);
-
- // Call the entry.
- __ PrepareCEntryArgs(2);
- __ PrepareCEntryFunction(ExternalReference(IC_Utility(id), isolate));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Move result to a1 and leave the internal frame.
- __ mov(a1, v0);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ lw(a2, MemOperand(sp, argc * kPointerSize));
- __ JumpIfSmi(a2, &invoke);
- __ GetObjectType(a2, a3, a3);
- __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
- __ sw(a2, MemOperand(sp, argc * kPointerSize));
- __ bind(&invoke);
- }
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into a1.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into a1.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_name, lookup_monomorphic_cache;
- Label index_smi, index_name;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(a2, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, a1, a0, a3, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, a1, a2, t0, a3, a0, a1, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, a0, a3);
-
- __ bind(&do_call);
- // receiver in a1 is not used after this point.
- // a2: key
- // a1: function
-
- GenerateFunctionTailCall(masm, argc, &slow_call, a0);
-
- __ bind(&check_number_dictionary);
- // a2: key
- // a3: elements map
- // t0: elements pointer
- // Check whether the elements is a number dictionary.
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&slow_load, ne, a3, Operand(at));
- __ sra(a0, a2, kSmiTagSize);
- // a0: untagged index
- __ LoadFromNumberDictionary(&slow_load, t0, a2, a1, a0, a3, t1);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a2); // Save the key.
- __ Push(a1, a2); // Pass the receiver and the key.
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(a2); // Restore the key.
- }
- __ mov(a1, v0);
- __ jmp(&do_call);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, a2, a0, a3, &index_name, &slow_call);
-
- // The key is known to be a unique name.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, a1, a0, a3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ lw(a0, FieldMemOperand(a1, JSObject::kPropertiesOffset));
- __ lw(a3, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&lookup_monomorphic_cache, ne, a3, Operand(at));
-
- GenerateDictionaryLoad(masm, &slow_load, a0, a2, a1, a3, t0);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, a0, a3);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, a0, a3);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor a unique name,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub,
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
- GenerateMiss(masm, argc);
-
- __ bind(&index_name);
- __ IndexFromHash(a3, a2);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Check if the name is really a name.
- Label miss;
- __ JumpIfSmi(a2, &miss);
- __ IsObjectNameType(a2, a0, &miss);
-
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a2 : name
@@ -655,9 +347,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
@@ -827,7 +517,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -862,37 +552,11 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- // Load receiver.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, a1, a2, a3, t0, t1, &notin, &slow);
- __ lw(a1, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow, a3);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in a3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, a2, a3, t0, &slow);
- __ lw(a1, unmapped_location);
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
- __ Branch(&slow, eq, a1, Operand(a3));
- GenerateFunctionTailCall(masm, argc, &slow, a3);
- __ bind(&slow);
- GenerateMiss(masm, argc);
+ GenerateMiss(masm);
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
@@ -905,9 +569,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ Push(a1, a0);
// Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
__ TailCallExternalReference(ref, 2, 1);
}
@@ -984,7 +647,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
- masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+ masm, receiver, a2, a3, Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup
@@ -1132,7 +795,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -1181,6 +844,22 @@ static void KeyedStoreGenerateGenericHelper(
__ Branch(fast_double, ne, elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
}
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element.
+ Label holecheck_passed1;
+ __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(address, address, at);
+ __ lw(scratch_value, MemOperand(address));
+ __ Branch(&holecheck_passed1, ne, scratch_value,
+ Operand(masm->isolate()->factory()->the_hole_value()));
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
+ __ bind(&holecheck_passed1);
+
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(value, &non_smi_value);
@@ -1231,6 +910,21 @@ static void KeyedStoreGenerateGenericHelper(
__ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
__ Branch(slow, ne, elements_map, Operand(at));
}
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ __ Addu(address, elements,
+ Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)
+ - kHeapObjectTag));
+ __ sll(at, key, kPointerSizeLog2);
+ __ addu(address, address, at);
+ __ lw(scratch_value, MemOperand(address));
+ __ Branch(&fast_double_without_map_check, ne, scratch_value,
+ Operand(kHoleNanUpper32));
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
@@ -1324,10 +1018,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
__ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded |
+ 1 << Map::kIsObserved));
__ Branch(&slow, ne, t0, Operand(zero_reg));
// Check if the object is a JS array or not.
__ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -1422,11 +1117,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -1437,10 +1132,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// Push receiver, key and value for runtime call.
__ Push(a2, a1, a0);
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
@@ -1485,8 +1178,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -1495,9 +1187,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, strict_mode,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
@@ -1622,12 +1312,10 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
return;
}
-#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
address, andi_instruction_address, delta);
}
-#endif
Address patch_address =
andi_instruction_address - delta * Instruction::kInstrSize;
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 26ffd66e1d..e4fbe1fcd6 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.7
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -84,9 +84,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
+ RegisterDependentCodeForEmbeddedMaps(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -148,17 +146,23 @@ bool LCodeGen::GeneratePrologue() {
// fp: Caller's frame pointer.
// lr: Caller's pc.
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
+ // Classic mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->is_classic_mode() &&
+ !info_->is_native()) {
Label ok;
- __ Branch(&ok, eq, t1, Operand(zero_reg));
+ int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ lw(a2, MemOperand(sp, receiver_offset));
+ __ Branch(&ok, ne, a2, Operand(at));
+
+ __ lw(a2, GlobalObjectOperand());
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ sw(a2, MemOperand(sp, receiver_offset));
+
__ bind(&ok);
}
}
@@ -175,8 +179,7 @@ bool LCodeGen::GeneratePrologue() {
if (slots > 0) {
if (FLAG_debug_code) {
__ Subu(sp, sp, Operand(slots * kPointerSize));
- __ push(a0);
- __ push(a1);
+ __ Push(a0, a1);
__ Addu(a0, sp, Operand(slots * kPointerSize));
__ li(a1, Operand(kSlotsZapValue));
Label loop;
@@ -184,8 +187,7 @@ bool LCodeGen::GeneratePrologue() {
__ Subu(a0, a0, Operand(kPointerSize));
__ sw(a1, MemOperand(a0, 2 * kPointerSize));
__ Branch(&loop, ne, a0, Operand(sp));
- __ pop(a1);
- __ pop(a0);
+ __ Pop(a0, a1);
} else {
__ Subu(sp, sp, Operand(slots * kPointerSize));
}
@@ -200,17 +202,18 @@ bool LCodeGen::GeneratePrologue() {
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in a1.
- __ push(a1);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
+ __ push(a1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both v0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Context is returned in both v0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ mov(cp, v0);
+ __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -264,7 +267,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -280,7 +284,7 @@ bool LCodeGen::GenerateDeferredCode() {
__ MultiPush(cp.bit() | fp.bit() | ra.bit());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
code->Generate();
@@ -333,7 +337,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
ASSERT(info()->IsStub());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ Call(t9);
}
} else {
@@ -394,7 +398,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
ASSERT(r.IsSmiOrTagged());
- __ LoadObject(scratch, literal);
+ __ li(scratch, literal);
}
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
@@ -516,17 +520,36 @@ Operand LCodeGen::ToOperand(LOperand* op) {
}
+static int ArgumentsOffsetWithoutFrame(int index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()));
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
}
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
ASSERT(op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(
+ sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ }
}
@@ -669,7 +692,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr != NULL);
__ Call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
@@ -696,7 +718,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
- __ LoadObject(cp, Handle<Object>::cast(constant->handle(isolate())));
+ __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
} else {
UNREACHABLE();
}
@@ -766,13 +788,23 @@ void LCodeGen::DeoptimizeIf(Condition condition,
return;
}
- ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
- if (FLAG_deopt_every_n_times == 1 &&
- !info()->IsStub() &&
- info()->opt_count() == id) {
- ASSERT(frame_is_built_);
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Register scratch = scratch0();
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+ Label no_deopt;
+ __ Push(a1, scratch);
+ __ li(scratch, Operand(count));
+ __ lw(a1, MemOperand(scratch));
+ __ Subu(a1, a1, Operand(1));
+ __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
+ __ li(a1, Operand(FLAG_deopt_every_n_times));
+ __ sw(a1, MemOperand(scratch));
+ __ Pop(a1, scratch);
+
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
- return;
+ __ bind(&no_deopt);
+ __ sw(a1, MemOperand(scratch));
+ __ Pop(a1, scratch);
}
if (info()->ShouldTrapOnDeopt()) {
@@ -818,36 +850,6 @@ void LCodeGen::DeoptimizeIf(Condition condition,
}
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<JSObject> > objects(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else if (it.rinfo()->target_object()->IsJSObject()) {
- Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded objects after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
- }
-}
-
-
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
@@ -858,6 +860,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1028,11 +1031,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::RegExpExec: {
RegExpExecStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1048,13 +1046,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::TranscendentalCache: {
- __ lw(a0, MemOperand(sp, 0));
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -1070,7 +1061,7 @@ void LCodeGen::DoModI(LModI* instr) {
HMod* hmod = instr->hydrogen();
HValue* left = hmod->left();
HValue* right = hmod->right();
- if (hmod->HasPowerOf2Divisor()) {
+ if (hmod->RightIsPowerOf2()) {
const Register left_reg = ToRegister(instr->left());
const Register result_reg = ToRegister(instr->result());
@@ -1093,35 +1084,6 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&left_is_not_negative);
__ And(result_reg, left_reg, divisor - 1);
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- const Register left_reg = ToRegister(instr->left());
- const Register result_reg = ToRegister(instr->result());
- const Register right_reg = ToRegister(instr->right());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- DeoptimizeIf(ne, instr->environment(), right_reg, Operand(divisor));
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
- &left_is_not_negative, ge, left_reg, Operand(zero_reg));
- __ subu(result_reg, zero_reg, left_reg);
- __ And(result_reg, result_reg, divisor - 1);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
- }
- __ Branch(USE_DELAY_SLOT, &done);
- __ subu(result_reg, zero_reg, result_reg);
- }
-
- __ bind(&left_is_not_negative);
- __ And(result_reg, left_reg, divisor - 1);
- __ bind(&done);
-
} else {
const Register scratch = scratch0();
const Register left_reg = ToRegister(instr->left());
@@ -1678,7 +1640,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
- __ LoadObject(ToRegister(instr->result()), value);
+ __ li(ToRegister(instr->result()), value);
}
@@ -1689,41 +1651,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temp());
- Label done;
-
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- // If the object is a smi return the object.
- __ Move(result, input);
- __ JumpIfSmi(input, &done);
- }
-
- // If the object is not a value type, return the object.
- __ GetObjectType(input, map, map);
- __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
- __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
@@ -1735,7 +1662,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
ASSERT(!scratch.is(scratch0()));
ASSERT(!scratch.is(object));
- __ And(at, object, Operand(kSmiTagMask));
+ __ SmiTst(object, at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
@@ -1762,14 +1689,38 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- Register string = ToRegister(instr->string());
- LOperand* index_op = instr->index();
- Register value = ToRegister(instr->value());
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
Register scratch = scratch0();
- String::Encoding encoding = instr->encoding();
+ ASSERT(!scratch.is(string));
+ ASSERT(!scratch.is(ToRegister(index)));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Addu(scratch, string, ToRegister(index));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ sll(scratch, ToRegister(index), 1);
+ __ Addu(scratch, string, scratch);
+ }
+ return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
if (FLAG_debug_code) {
+ Register scratch = scratch0();
__ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
@@ -1782,37 +1733,36 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
__ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
}
- if (index_op->IsConstantOperand()) {
- int constant_index = ToInteger32(LConstantOperand::cast(index_op));
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ sb(value,
- FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
- } else {
- __ sh(value,
- FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
- }
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ lbu(result, operand);
} else {
- Register index = ToRegister(index_op);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ Addu(scratch, string, Operand(index));
- __ sb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
- } else {
- __ sll(scratch, index, 1);
- __ Addu(scratch, string, scratch);
- __ sh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
- }
+ __ lhu(result, operand);
}
}
-void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->value(), at);
- __ push(input_reg);
- ASSERT(ToRegister(instr->context()).is(cp));
- CallRuntime(Runtime::kThrow, 1, instr);
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
if (FLAG_debug_code) {
- __ stop("Unreachable code.");
+ Register scratch = scratch0();
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ sb(value, operand);
+ } else {
+ __ sh(value, operand);
}
}
@@ -1941,12 +1891,12 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ MultiPush(saved_regs);
__ PrepareCallCFunction(0, 2, scratch0());
- __ SetCallCDoubleArguments(left, right);
+ __ MovToFloatParameters(left, right);
__ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
- __ GetCFunctionDoubleResult(result);
+ __ MovFromFloatResult(result);
// Restore saved register.
__ MultiPop(saved_regs);
@@ -1965,7 +1915,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
@@ -2022,6 +1972,16 @@ void LCodeGen::EmitBranchF(InstrType instr,
template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr,
+ Condition condition,
+ Register src1,
+ const Operand& src2) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
+}
+
+
+template<class InstrType>
void LCodeGen::EmitFalseBranchF(InstrType instr,
Condition condition,
FPURegister src1,
@@ -2101,7 +2061,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
- __ And(at, reg, Operand(kSmiTagMask));
+ __ SmiTst(reg, at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
@@ -2294,6 +2254,32 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
+ __ FmoveHigh(scratch, value);
+ __ li(at, 0x80000000);
+ } else {
+ Register value = ToRegister(instr->value());
+ __ CheckMap(value,
+ scratch,
+ Heap::kHeapNumberMapRootIndex,
+ instr->FalseLabel(chunk()),
+ DO_SMI_CHECK);
+ __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
+ __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+ __ mov(at, zero_reg);
+ }
+ EmitBranch(instr, eq, scratch, Operand(at));
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Register temp2,
@@ -2629,10 +2615,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
- __ Branch(&cache_miss, ne, map, Operand(at));
+ __ BranchShort(&cache_miss, ne, map, Operand(at));
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
- // with true or false.
+ // with true or false. The distance from map check has to be constant.
__ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
__ Branch(&done);
@@ -2682,7 +2668,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// offset to the location of the map check.
Register temp = ToRegister(instr->temp());
ASSERT(temp.is(t0));
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
+ __ li(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 7;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
@@ -2784,10 +2770,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ li(a2, Operand(instr->name()));
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2816,18 +2801,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
}
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->global_object()).is(a1));
- ASSERT(ToRegister(instr->value()).is(a0));
-
- __ li(a2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
@@ -2895,11 +2868,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (access.IsExternalMemory()) {
Register result = ToRegister(instr->result());
MemOperand operand = MemOperand(object, offset);
- if (access.representation().IsByte()) {
- __ lb(result, operand);
- } else {
- __ lw(result, operand);
- }
+ __ Load(result, operand, access.representation());
return;
}
@@ -2915,11 +2884,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
object = result;
}
MemOperand operand = FieldMemOperand(object, offset);
- if (access.representation().IsByte()) {
- __ lb(result, operand);
- } else {
- __ lw(result, operand);
- }
+ __ Load(result, operand, access.representation());
}
@@ -2930,7 +2895,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in a2.
__ li(a2, Operand(instr->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2984,34 +2949,47 @@ void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
}
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->object());
- __ lw(to_reg, FieldMemOperand(from_reg,
- ExternalArray::kExternalPointerOffset));
-}
-
-
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
- if (instr->length()->IsConstantOperand() &&
- instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ if (instr->length()->IsConstantOperand()) {
int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- int index = (const_length - const_index) + 1;
- __ lw(result, MemOperand(arguments, index * kPointerSize));
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int index = (const_length - const_index) + 1;
+ __ lw(result, MemOperand(arguments, index * kPointerSize));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ li(at, Operand(const_length + 1));
+ __ Subu(result, at, index);
+ __ sll(at, result, kPointerSizeLog2);
+ __ Addu(at, arguments, at);
+ __ lw(result, MemOperand(at));
+ }
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister(instr->length());
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = const_index - 1;
+ if (loc != 0) {
+ __ Subu(result, length, Operand(loc));
+ __ sll(at, result, kPointerSizeLog2);
+ __ Addu(at, arguments, at);
+ __ lw(result, MemOperand(at));
+ } else {
+ __ sll(at, length, kPointerSizeLog2);
+ __ Addu(at, arguments, at);
+ __ lw(result, MemOperand(at));
+ }
} else {
Register length = ToRegister(instr->length());
Register index = ToRegister(instr->index());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them, add one more.
- __ subu(length, length, index);
- __ Addu(length, length, Operand(1));
- __ sll(length, length, kPointerSizeLog2);
- __ Addu(at, arguments, Operand(length));
- __ lw(result, MemOperand(at, 0));
+ __ Subu(result, length, index);
+ __ Addu(result, result, 1);
+ __ sll(at, result, kPointerSizeLog2);
+ __ Addu(at, arguments, at);
+ __ lw(result, MemOperand(at));
}
}
@@ -3033,10 +3011,16 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
+ ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
+ : 0;
+
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ int base_offset =
+ (instr->additional_index() << element_size_shift) + additional_offset;
FPURegister result = ToDoubleRegister(instr->result());
if (key_is_constant) {
__ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
@@ -3044,11 +3028,12 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
}
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0(), base_offset));
__ cvt_d_s(result, result);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
+ } else { // loading doubles, not floats.
+ __ ldc1(result, MemOperand(scratch0(), base_offset));
}
} else {
Register result = ToRegister(instr->result());
@@ -3057,31 +3042,40 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
element_size_shift, shift_size,
instr->additional_index(), additional_offset);
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
__ lb(result, mem_operand);
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ lbu(result, mem_operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
__ lh(result, mem_operand);
break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ lhu(result, mem_operand);
break;
- case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
__ lw(result, mem_operand);
break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
DeoptimizeIf(Ugreater_equal, instr->environment(),
result, Operand(0x80000000));
}
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -3167,7 +3161,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ And(scratch, result, Operand(kSmiTagMask));
+ __ SmiTst(result, scratch);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
@@ -3178,7 +3172,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3196,14 +3190,28 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
int shift_size,
int additional_index,
int additional_offset) {
- if (additional_index != 0 && !key_is_constant) {
- additional_index *= 1 << (element_size - shift_size);
- __ Addu(scratch0(), key, Operand(additional_index));
- }
-
+ int base_offset = (additional_index << element_size) + additional_offset;
if (key_is_constant) {
return MemOperand(base,
- (constant_key << element_size) + additional_offset);
+ base_offset + (constant_key << element_size));
+ }
+
+ if (additional_offset != 0) {
+ if (shift_size >= 0) {
+ __ sll(scratch0(), key, shift_size);
+ __ Addu(scratch0(), scratch0(), Operand(base_offset));
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ __ srl(scratch0(), key, 1);
+ __ Addu(scratch0(), scratch0(), Operand(base_offset));
+ }
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ }
+
+ if (additional_index != 0) {
+ additional_index *= 1 << (element_size - shift_size);
+ __ Addu(scratch0(), key, Operand(additional_index));
}
if (additional_index == 0) {
@@ -3288,26 +3296,29 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
Register scratch = scratch0();
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
+ Label global_object, result_in_receiver;
- // Do not transform the receiver to object for strict mode
- // functions.
- __ lw(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lw(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ lw(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- // Do not transform the receiver to object for builtins.
- int32_t strict_mode_function_mask =
- 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
- int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
- __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
- __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
+ // Do not transform the receiver to object for builtins.
+ int32_t strict_mode_function_mask =
+ 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+ int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+ __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
+ __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
+ }
// Normal function. Replace undefined or null with global receiver.
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
@@ -3316,20 +3327,30 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ Branch(&global_object, eq, receiver, Operand(scratch));
// Deoptimize if the receiver is not a JS object.
- __ And(scratch, receiver, Operand(kSmiTagMask));
+ __ SmiTst(receiver, scratch);
DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
DeoptimizeIf(lt, instr->environment(),
scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(&receiver_ok);
+ __ Branch(&result_in_receiver);
__ bind(&global_object);
- __ lw(receiver, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ lw(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
- __ lw(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
- __ bind(&receiver_ok);
+ __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ lw(result,
+ ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(result,
+ FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+
+ if (result.is(receiver)) {
+ __ bind(&result_in_receiver);
+ } else {
+ Label result_ok;
+ __ Branch(&result_ok);
+ __ bind(&result_in_receiver);
+ __ mov(result, receiver);
+ __ bind(&result_ok);
+ }
}
@@ -3377,8 +3398,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
}
@@ -3416,17 +3436,9 @@ void LCodeGen::DoContext(LContext* instr) {
}
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ lw(result,
- MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
- __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
+ __ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
// The context is the first argument.
__ Push(cp, scratch0(), scratch1());
@@ -3434,25 +3446,10 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
}
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ lw(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global_object());
- Register result = ToRegister(instr->result());
- __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
A1State a1_state) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -3463,7 +3460,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (can_invoke_directly) {
if (a1_state == A1_UNINITIALIZED) {
- __ LoadHeapObject(a1, function);
+ __ li(a1, function);
}
// Change context.
@@ -3476,7 +3473,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function.
- __ SetCallKind(t1, call_kind);
__ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Call(at);
@@ -3486,24 +3482,11 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(
- function, expected, count, CALL_FUNCTION, generator, call_kind);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
}
}
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
- __ mov(a0, v0);
- CallKnownFunction(instr->hydrogen()->function(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- A1_UNINITIALIZED);
-}
-
-
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
ASSERT(instr->context() != NULL);
ASSERT(ToRegister(instr->context()).is(cp));
@@ -3774,6 +3757,7 @@ void LCodeGen::DoPower(LPower* instr) {
Label no_deopt;
__ JumpIfSmi(a2, &no_deopt);
__ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(MathPowStub::TAGGED);
@@ -3789,68 +3773,6 @@ void LCodeGen::DoPower(LPower* instr) {
}
-void LCodeGen::DoRandom(LRandom* instr) {
- // Assert that the register size is indeed the size of each seed.
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- // Load native context.
- Register global_object = ToRegister(instr->global_object());
- Register native_context = global_object;
- __ lw(native_context, FieldMemOperand(
- global_object, GlobalObject::kNativeContextOffset));
-
- // Load state (FixedArray of the native context's random seeds).
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- Register state = native_context;
- __ lw(state, FieldMemOperand(native_context, kRandomSeedOffset));
-
- // Load state[0].
- Register state0 = ToRegister(instr->scratch());
- __ lw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
- // Load state[1].
- Register state1 = ToRegister(instr->scratch2());
- __ lw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- Register scratch3 = ToRegister(instr->scratch3());
- Register scratch4 = scratch0();
- __ And(scratch3, state0, Operand(0xFFFF));
- __ li(scratch4, Operand(18273));
- __ Mul(scratch3, scratch3, scratch4);
- __ srl(state0, state0, 16);
- __ Addu(state0, scratch3, state0);
- // Save state[0].
- __ sw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ And(scratch3, state1, Operand(0xFFFF));
- __ li(scratch4, Operand(36969));
- __ Mul(scratch3, scratch3, scratch4);
- __ srl(state1, state1, 16),
- __ Addu(state1, scratch3, state1);
- // Save state[1].
- __ sw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- Register random = scratch4;
- __ And(random, state1, Operand(0x3FFFF));
- __ sll(state0, state0, 14);
- __ Addu(random, random, state0);
-
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- __ li(scratch3, Operand(0x41300000));
- // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Move(result, random, scratch3);
- // Move 0x4130000000000000 to FPU.
- DoubleRegister scratch5 = double_scratch0();
- __ Move(scratch5, zero_reg, scratch3);
- __ sub_d(result, result, scratch5);
-}
-
-
void LCodeGen::DoMathExp(LMathExp* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
@@ -3866,46 +3788,11 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, zero_reg);
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathTan(LMathTan* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, zero_reg);
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, zero_reg);
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, zero_reg);
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+ 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
@@ -3919,74 +3806,66 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
- CALL_AS_METHOD,
A1_CONTAINS_TARGET);
}
}
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(target);
+ }
+ generator.AfterCall();
}
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ li(a2, Operand(instr->name()));
- CallCode(ic, mode, instr);
-}
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ li(a0, Operand(instr->arity()));
+ }
+ // Change context.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->function()).is(a1));
- ASSERT(ToRegister(instr->result()).is(v0));
+ // Load the code entry address
+ __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Call(at);
- int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ li(a2, Operand(instr->name()));
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
- CallKnownFunction(instr->hydrogen()->target(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- A1_UNINITIALIZED);
+ CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -4010,16 +3889,15 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ li(a0, Operand(instr->arity()));
- __ li(a2, Operand(instr->hydrogen()->property_cell()));
+ __ li(a2, Operand(factory()->undefined_value()));
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
- ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
+ ArrayNoArgumentConstructorStub stub(kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
@@ -4031,18 +3909,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ Branch(&packed_case, eq, t1, Operand(zero_reg));
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
- override_mode);
+ ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done);
__ bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
+ ArraySingleArgumentConstructorStub stub(kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
+ ArrayNArgumentsConstructorStub stub(kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -4066,7 +3943,13 @@ void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
- __ Addu(result, base, Operand(instr->offset()));
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ Addu(result, base, Operand(ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ Addu(result, base, offset);
+ }
}
@@ -4081,23 +3964,25 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (access.IsExternalMemory()) {
Register value = ToRegister(instr->value());
MemOperand operand = MemOperand(object, offset);
- if (representation.IsByte()) {
- __ sb(value, operand);
- } else {
- __ sw(value, operand);
- }
+ __ Store(value, operand, representation);
return;
}
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ And(scratch, value, Operand(kSmiTagMask));
+ __ SmiTst(value, scratch);
DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
ASSERT(transition.is_null());
ASSERT(access.IsInobject());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -4125,17 +4010,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// Do the store.
Register value = ToRegister(instr->value());
- ASSERT(!object.is(value));
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset);
- if (representation.IsByte()) {
- __ sb(value, operand);
- } else {
- __ sw(value, operand);
- }
+ __ Store(value, operand, representation);
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
@@ -4150,11 +4027,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
} else {
__ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
MemOperand operand = FieldMemOperand(scratch, offset);
- if (representation.IsByte()) {
- __ sb(value, operand);
- } else {
- __ sw(value, operand);
- }
+ __ Store(value, operand, representation);
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
@@ -4178,9 +4051,8 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in a2.
__ li(a2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(),
+ instr->strict_mode_flag());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4242,10 +4114,16 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
+ ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
+ : 0;
+
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ int base_offset =
+ (instr->additional_index() << element_size_shift) + additional_offset;
Register address = scratch0();
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
@@ -4260,11 +4138,12 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ Addu(address, external_pointer, address);
}
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(address, additional_offset));
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ sdc1(value, MemOperand(address, additional_offset));
+ __ swc1(double_scratch0(), MemOperand(address, base_offset));
+ } else { // Storing doubles, not floats.
+ __ sdc1(value, MemOperand(address, base_offset));
}
} else {
Register value(ToRegister(instr->value()));
@@ -4273,21 +4152,30 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
element_size_shift, shift_size,
instr->additional_index(), additional_offset);
switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
__ sb(value, mem_operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ sh(value, mem_operand);
break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ sw(value, mem_operand);
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -4339,8 +4227,8 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
// Only load canonical NaN if the comparison above set the overflow.
__ bind(&is_nan);
- __ Move(double_scratch,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ LoadRoot(at, Heap::kNanValueRootIndex);
+ __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
__ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
element_size_shift));
__ Branch(&done);
@@ -4404,7 +4292,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases: external, fast double
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -4475,9 +4363,10 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- StringAddStub stub(instr->hydrogen()->flags());
+ ASSERT(ToRegister(instr->left()).is(a1));
+ ASSERT(ToRegister(instr->right()).is(a0));
+ StringAddStub stub(instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -4610,10 +4499,13 @@ void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
LOperand* output = instr->result();
Register scratch = scratch0();
- __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
+ ASSERT(output->IsRegister());
if (!instr->hydrogen()->value()->HasRange() ||
!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ SmiTag(ToRegister(output), ToRegister(input));
}
}
@@ -4682,13 +4574,12 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
LNumberTagU* instr_;
};
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ Branch(deferred->entry(), hi, reg, Operand(Smi::kMaxValue));
- __ SmiTag(reg, reg);
+ __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
+ __ SmiTag(result, input);
__ bind(deferred->exit());
}
@@ -4907,8 +4798,9 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
Label no_heap_number, check_bools, check_false;
- __ Branch(&no_heap_number, ne, scratch1, Operand(at)); // HeapNumber map?
- __ mov(scratch2, input_reg);
+ // Check HeapNumber map.
+ __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
+ __ mov(scratch2, input_reg); // In delay slot.
__ TruncateHeapNumberToI(input_reg, scratch2);
__ Branch(&done);
@@ -5089,7 +4981,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
- __ And(at, ToRegister(input), Operand(kSmiTagMask));
+ __ SmiTst(ToRegister(input), at);
DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
}
@@ -5097,7 +4989,7 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->IsHeapObject()) {
LOperand* input = instr->value();
- __ And(at, ToRegister(input), Operand(kSmiTagMask));
+ __ SmiTst(ToRegister(input), at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
}
@@ -5165,12 +5057,12 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(object);
__ mov(cp, zero_reg);
- __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
- __ And(at, scratch0(), Operand(kSmiTagMask));
+ __ SmiTst(scratch0(), at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
@@ -5362,19 +5254,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(size));
}
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
- instr->context());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
- instr->context());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
} else {
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
- instr->context());
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
+ __ Push(Smi::FromInt(flags));
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -5397,7 +5292,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// a2 and t0-t2 are used as temporaries.
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(t3, instr->hydrogen()->literals());
+ __ li(t3, instr->hydrogen()->literals());
__ lw(a1, FieldMemOperand(t3, literal_offset));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&materialized, ne, a1, Operand(at));
@@ -5625,24 +5520,25 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= Assembler::kInstrSize;
+ }
}
}
+ last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5715,7 +5611,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
RelocInfo::CODE_TARGET,
instr);
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5727,7 +5622,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 29a89d5885..b4651702e0 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -242,11 +242,8 @@ class LCodeGen: public LCodeGenBase {
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
A1State a1_state);
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -273,7 +270,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -282,6 +278,10 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
+ MemOperand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
@@ -302,6 +302,8 @@ class LCodeGen: public LCodeGenBase {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr,
Condition condition,
@@ -313,6 +315,11 @@ class LCodeGen: public LCodeGenBase {
FPURegister src1,
FPURegister src2);
template<class InstrType>
+ void EmitFalseBranch(InstrType instr,
+ Condition condition,
+ Register src1,
+ const Operand& src2);
+ template<class InstrType>
void EmitFalseBranchF(InstrType instr,
Condition condition,
FPURegister src1,
@@ -414,12 +421,18 @@ class LCodeGen: public LCodeGenBase {
codegen_->expected_safepoint_kind_ = kind;
switch (codegen_->expected_safepoint_kind_) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PushSafepointRegisters();
+ case Safepoint::kWithRegisters: {
+ StoreRegistersStateStub stub1(kDontSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub1);
break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PushSafepointRegistersAndDoubles();
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ StoreRegistersStateStub stub2(kSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub2);
break;
+ }
default:
UNREACHABLE();
}
@@ -429,12 +442,18 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
ASSERT((kind & Safepoint::kWithRegisters) != 0);
switch (kind) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PopSafepointRegisters();
+ case Safepoint::kWithRegisters: {
+ RestoreRegistersStateStub stub1(kDontSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub1);
break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PopSafepointRegistersAndDoubles();
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ RestoreRegistersStateStub stub2(kSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub2);
break;
+ }
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
index 460e13bf0a..3ee74866c7 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -256,7 +256,7 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsInteger32(constant_source)) {
__ li(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ __ li(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
@@ -271,8 +271,7 @@ void LGapResolver::EmitMove(int index) {
__ li(kLithiumScratchReg,
Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ LoadObject(kLithiumScratchReg,
- cgen_->ToHandle(constant_source));
+ __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
}
__ sw(kLithiumScratchReg, cgen_->ToMemOperand(destination));
}
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 6b76ff7429..a194c29eab 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -261,7 +261,7 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
+ hydrogen()->type_literal()->ToCString().get(),
true_block_id(), false_block_id());
}
@@ -277,11 +277,23 @@ void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
- stream->Add(" + %d", offset());
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
stream->Add("#%d / ", arity());
}
@@ -306,28 +318,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[a2] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -364,7 +354,7 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -567,8 +557,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
@@ -576,40 +565,35 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr, int index) {
+ LTemplateResultInstruction<1>* instr, int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixed(
- LTemplateInstruction<1, I, T>* instr, Register reg) {
+ LTemplateResultInstruction<1>* instr, Register reg) {
return Define(instr, ToUnallocated(reg));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -864,17 +848,18 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
if (current->OperandCount() == 0) {
instr = DefineAsRegister(new(zone()) LDummy());
} else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
instr = DefineAsRegister(new(zone())
LDummyUse(UseAny(current->OperandAt(0))));
}
for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
LInstruction* dummy =
new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
dummy->set_hydrogen_value(current);
@@ -933,90 +918,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- int object_index = objects_to_materialize->length();
- for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- LOperand* op;
- HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- for (int i = object_index; i < objects_to_materialize->length(); ++i) {
- HValue* object_to_materialize = objects_to_materialize->at(i);
- int previously_materialized_object = -1;
- for (int prev = 0; prev < i; ++prev) {
- if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
- previously_materialized_object = prev;
- break;
- }
- }
- int length = object_to_materialize->OperandCount();
- bool is_arguments = object_to_materialize->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- continue;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
- }
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* value = object_to_materialize->OperandAt(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else {
- ASSERT(!value->IsPushArgument());
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
- }
-
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
- }
-
- return result;
-}
-
-
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -1043,6 +944,9 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
@@ -1087,7 +991,7 @@ LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineSameAsFirst(result));
+ return AssignEnvironment(DefineAsRegister(result));
}
@@ -1119,11 +1023,11 @@ LInstruction* LChunkBuilder::DoStoreCodeEntry(
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* inner_object) {
- LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
- LInnerAllocatedObject* result =
- new(zone()) LInnerAllocatedObject(base_object);
- return DefineAsRegister(result);
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
}
@@ -1145,33 +1049,38 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
}
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context));
-}
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), a1);
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+ return MarkAsCall(DefineFixed(result, v0), instr);
}
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, v0), instr);
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1189,9 +1098,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathRound: return DoMathRound(instr);
case kMathAbs: return DoMathAbs(instr);
case kMathLog: return DoMathLog(instr);
- case kMathSin: return DoMathSin(instr);
- case kMathCos: return DoMathCos(instr);
- case kMathTan: return DoMathTan(instr);
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
@@ -1203,30 +1109,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseFixedDouble(instr->value(), f4);
- LMathLog* result = new(zone()) LMathLog(input);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), f4);
- LMathSin* result = new(zone()) LMathSin(input);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), f4);
- LMathCos* result = new(zone()) LMathCos(input);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), f4);
- LMathTan* result = new(zone()) LMathTan(input);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
+ return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr);
}
@@ -1285,32 +1171,6 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* key = UseFixed(instr->key(), a2);
- return MarkAsCall(
- DefineFixed(new(zone()) LCallKeyed(context, key), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), a1);
@@ -1330,8 +1190,8 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
- return MarkAsCall(
- DefineFixed(new(zone()) LCallFunction(context, function), v0), instr);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, v0), instr);
}
@@ -1419,20 +1279,6 @@ bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
}
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- // Only optimize when we have magic numbers for the divisor.
- // The standard integer division routine is usually slower than transitionning
- // to FPU.
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
HValue* right = instr->right();
LOperand* dividend = UseRegister(instr->left());
@@ -1449,19 +1295,15 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
+ if (instr->RightIsPowerOf2()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseOrConstant(right));
+ UseConstant(right));
LInstruction* result = DefineAsRegister(mod);
return (left->CanBeNegative() &&
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- return AssignEnvironment(DefineAsRegister(mod));
} else {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegister(right),
@@ -1589,6 +1431,15 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
result = AssignEnvironment(result);
}
return result;
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ return result;
} else if (instr->representation().IsDouble()) {
if (kArchVariant == kMips32r2) {
if (instr->left()->IsMul())
@@ -1642,19 +1493,6 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseTempRegister(instr->global_object());
- LOperand* scratch = TempRegister();
- LOperand* scratch2 = TempRegister();
- LOperand* scratch3 = TempRegister();
- LRandom* result = new(zone()) LRandom(
- global_object, scratch, scratch2, scratch3);
- return DefineFixedDouble(result, f0);
-}
-
-
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
@@ -1703,6 +1541,16 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
}
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -1785,19 +1633,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), a0);
LDateField* result =
@@ -1806,11 +1641,21 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegisterOrConstant(instr->index());
- LOperand* value = UseRegister(instr->value());
- return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ return new(zone()) LSeqStringSetChar(context, string, index, value);
}
@@ -1835,13 +1680,6 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
}
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(), a0);
- return MarkAsCall(new(zone()) LThrow(context, value), instr);
-}
-
-
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
@@ -1926,7 +1764,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegisterAtStart(val);
if (val->CheckFlag(HInstruction::kUint32)) {
LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineAsRegister(new(zone()) LSmiTag(value));
} else {
@@ -1937,8 +1775,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
- : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
+ : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
@@ -2074,16 +1912,6 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object = UseFixed(instr->global_object(), a1);
- LOperand* value = UseFixed(instr->value(), a0);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(context, global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2134,20 +1962,13 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseRegister(instr->elements());
@@ -2159,20 +1980,19 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(backing_store, key);
}
DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS;
return can_deoptimize ? AssignEnvironment(result) : result;
}
@@ -2189,7 +2009,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@@ -2218,17 +2038,17 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
- (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
- ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
- (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
-
- return new(zone()) LStoreKeyed(external_pointer, key, val);
+ LOperand* backing_store = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(backing_store, key, val);
}
@@ -2328,8 +2148,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
return MarkAsCall(
DefineFixed(new(zone()) LStringAdd(context, left, right), v0),
instr);
@@ -2400,7 +2220,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
int index = static_cast<int>(instr->index());
- Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2450,15 +2270,8 @@ LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
- LOperand* length;
- LOperand* index;
- if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
- length = UseRegisterOrConstant(instr->length());
- index = UseOrConstant(instr->index());
- } else {
- length = UseTempRegister(instr->length());
- index = UseRegisterAtStart(instr->index());
- }
+ LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2478,6 +2291,9 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
@@ -2529,8 +2345,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
+ instr->inlining_kind());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 518a3b8c65..258502dc26 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -52,12 +52,9 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallConstantFunction) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@@ -72,6 +69,7 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
+ V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -93,13 +91,10 @@ class LCodeGen;
V(Drop) \
V(Dummy) \
V(DummyUse) \
- V(ElementsKind) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@@ -118,7 +113,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadExternalArrayPointer) \
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
@@ -130,7 +124,6 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
- V(MathCos) \
V(MathExp) \
V(MathFloor) \
V(MathFloorOfDiv) \
@@ -138,9 +131,7 @@ class LCodeGen;
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
- V(MathSin) \
V(MathSqrt) \
- V(MathTan) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@@ -149,13 +140,12 @@ class LCodeGen;
V(NumberTagU) \
V(NumberUntagD) \
V(OsrEntry) \
- V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
- V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringGetChar) \
V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
@@ -164,7 +154,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -176,7 +165,6 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
@@ -185,7 +173,6 @@ class LCodeGen;
V(Uint32ToDouble) \
V(Uint32ToSmi) \
V(UnknownOSRValue) \
- V(ValueOf) \
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -302,10 +289,8 @@ class LInstruction : public ZoneObject {
// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LInstruction {
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
@@ -317,10 +302,20 @@ class LTemplateInstruction : public LInstruction {
protected:
EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
private:
+ // Iterator support.
virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
@@ -489,10 +484,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
};
@@ -557,6 +548,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
LOperand* receiver() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
@@ -810,42 +802,6 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathTan(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
-};
-
-
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
@@ -922,6 +878,22 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
@@ -1301,34 +1273,6 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
@@ -1348,41 +1292,39 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
- inputs_[2] = value;
}
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
- LThrow(LOperand* context, LOperand* value) {
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
inputs_[0] = context;
- inputs_[1] = value;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
};
@@ -1431,28 +1373,6 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
- public:
- LRandom(LOperand* global_object,
- LOperand* scratch,
- LOperand* scratch2,
- LOperand* scratch3) {
- inputs_[0] = global_object;
- temps_[0] = scratch;
- temps_[1] = scratch2;
- temps_[2] = scratch3;
- }
-
- LOperand* global_object() const { return inputs_[0]; }
- LOperand* scratch() const { return temps_[0]; }
- LOperand* scratch2() const { return temps_[1]; }
- LOperand* scratch3() const { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1577,20 +1497,6 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadExternalArrayPointer V8_FINAL
- : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
@@ -1606,6 +1512,12 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
bool is_external() const {
return hydrogen()->is_external();
}
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -1671,28 +1583,6 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1771,19 +1661,19 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInnerAllocatedObject(LOperand* base_object) {
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
+ inputs_[1] = offset;
}
- LOperand* base_object() { return inputs_[0]; }
- int offset() { return hydrogen()->offset(); }
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
- DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@@ -1801,18 +1691,6 @@ class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
@@ -1826,95 +1704,73 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGlobalObject(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-};
-
-
-class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
}
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
+ LOperand* function() { return inputs_[0]; }
-class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
}
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
+ LOperand* target() const { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+ const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallKeyed(LOperand* context, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = key;
- }
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<LOperand*> inputs_;
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
};
-
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNamed(LOperand* context) {
+ LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
+ inputs_[1] = function;
}
LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1936,35 +1792,6 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallGlobal(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
@@ -2265,6 +2092,12 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@@ -2709,20 +2542,18 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- argument_count_(0),
allocator_(allocator),
- position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
@@ -2739,15 +2570,11 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
static bool HasMagicNumberForDivisor(int32_t divisor);
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathTan(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
@@ -2763,7 +2590,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2813,7 +2639,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2822,22 +2648,16 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- DoubleRegister reg);
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
LInstruction* AssignEnvironment(LInstruction* instr);
LInstruction* AssignPointerMap(LInstruction* instr);
@@ -2851,10 +2671,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
-
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
@@ -2868,14 +2684,11 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
- Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
- int argument_count_;
LAllocator* allocator_;
- int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index d6e8af1fc0..c62b9f5322 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -44,7 +44,6 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -53,6 +52,38 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
+void MacroAssembler::Load(Register dst,
+ const MemOperand& src,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ lb(dst, src);
+ } else if (r.IsUInteger8()) {
+ lbu(dst, src);
+ } else if (r.IsInteger16()) {
+ lh(dst, src);
+ } else if (r.IsUInteger16()) {
+ lhu(dst, src);
+ } else {
+ lw(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src,
+ const MemOperand& dst,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ sb(src, dst);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ sh(src, dst);
+ } else {
+ sw(src, dst);
+ }
+}
+
+
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
@@ -83,19 +114,6 @@ void MacroAssembler::StoreRoot(Register source,
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- li(result, Operand(cell));
- lw(result, FieldMemOperand(result, Cell::kValueOffset));
- } else {
- li(result, Operand(object));
- }
-}
-
-
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
@@ -256,6 +274,12 @@ void MacroAssembler::RecordWrite(Register object,
eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
@@ -491,8 +515,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Subu(reg1, reg1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use reg2 for index calculations and keep the hash intact in reg0.
mov(reg2, reg0);
// Compute the masked index: (hash + i + i * i) & mask.
@@ -511,7 +534,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
addu(reg2, elements, at);
lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
- if (i != kProbes - 1) {
+ if (i != kNumberDictionaryProbes - 1) {
Branch(&done, eq, key, Operand(at));
} else {
Branch(miss, ne, key, Operand(at));
@@ -766,8 +789,46 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
+void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
+ if (kArchVariant == kLoongson) {
+ lw(zero_reg, rs);
+ } else {
+ pref(hint, rs);
+ }
+}
+
+
//------------Pseudo-instructions-------------
+void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
+ lwr(rd, rs);
+ lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
+
+void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
+ swr(rd, rs);
+ swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
+
+void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
+ AllowDeferredHandleDereference smi_check;
+ if (value->IsSmi()) {
+ li(dst, Operand(value), mode);
+ } else {
+ ASSERT(value->IsHeapObject());
+ if (isolate()->heap()->InNewSpace(*value)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(value);
+ li(dst, Operand(cell));
+ lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
+ } else {
+ li(dst, Operand(value));
+ }
+ }
+}
+
+
void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
ASSERT(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1167,12 +1228,12 @@ void MacroAssembler::BranchF(Label* target,
void MacroAssembler::Move(FPURegister dst, double imm) {
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value(imm);
+ DoubleRepresentation value_rep(imm);
// Handle special values first.
bool force_load = dst.is(kDoubleRegZero);
- if (value.bits == zero.bits && !force_load) {
+ if (value_rep == zero && !force_load) {
mov_d(dst, kDoubleRegZero);
- } else if (value.bits == minus_zero.bits && !force_load) {
+ } else if (value_rep == minus_zero && !force_load) {
neg_d(dst, kDoubleRegZero);
} else {
uint32_t lo, hi;
@@ -1502,19 +1563,27 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
if (is_near(L)) {
BranchShort(L, cond, rs, rt, bdslot);
} else {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ Jr(L, bdslot);
+ }
}
} else {
if (is_trampoline_emitted()) {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ Jr(L, bdslot);
+ }
} else {
BranchShort(L, cond, rs, rt, bdslot);
}
@@ -2783,7 +2852,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3370,10 +3439,9 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
bind(&is_nan);
// Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ LoadRoot(at, Heap::kNanValueRootIndex);
+ lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
+ lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
jmp(&have_double_value);
bind(&smi_value);
@@ -3456,7 +3524,7 @@ void MacroAssembler::CheckMap(Register obj,
}
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
Move(dst, v0, v1);
} else {
@@ -3465,55 +3533,47 @@ void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
- if (!IsMipsSoftFloatABI) {
- Move(f12, dreg);
+void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
+ if (IsMipsSoftFloatABI) {
+ Move(dst, a0, a1);
} else {
- Move(a0, a1, dreg);
+ Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
}
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
- DoubleRegister dreg2) {
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
- if (dreg2.is(f12)) {
- ASSERT(!dreg1.is(f14));
- Move(f14, dreg2);
- Move(f12, dreg1);
- } else {
- Move(f12, dreg1);
- Move(f14, dreg2);
- }
+ Move(f12, src);
} else {
- Move(a0, a1, dreg1);
- Move(a2, a3, dreg2);
+ Move(a0, a1, src);
}
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
- Register reg) {
+void MacroAssembler::MovToFloatResult(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
- Move(f12, dreg);
- Move(a2, reg);
+ Move(f0, src);
} else {
- Move(a2, reg);
- Move(a0, a1, dreg);
+ Move(v0, v1, src);
}
}
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be t1 to
- // follow the calling convention which requires the call type to be
- // in t1.
- ASSERT(dst.is(t1));
- if (call_kind == CALL_AS_FUNCTION) {
- li(dst, Operand(Smi::FromInt(1)));
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+ DoubleRegister src2) {
+ if (!IsMipsSoftFloatABI) {
+ if (src2.is(f12)) {
+ ASSERT(!src1.is(f14));
+ Move(f14, src2);
+ Move(f12, src1);
+ } else {
+ Move(f12, src1);
+ Move(f14, src2);
+ }
} else {
- li(dst, Operand(Smi::FromInt(0)));
+ Move(a0, a1, src1);
+ Move(a2, a3, src2);
}
}
@@ -3528,8 +3588,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label regular_invoke;
@@ -3539,7 +3598,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// a0: actual arguments count
// a1: function (passed through to callee)
// a2: expected arguments count
- // a3: callee code entry
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
@@ -3583,14 +3641,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(t1, call_kind);
Call(adaptor);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
Branch(done);
}
} else {
- SetCallKind(t1, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&regular_invoke);
@@ -3602,8 +3658,7 @@ void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -3612,16 +3667,14 @@ void MacroAssembler::InvokeCode(Register code,
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, &definitely_mismatches, flag,
- call_wrapper, call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(t1, call_kind);
Call(code);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(t1, call_kind);
Jump(code);
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -3631,41 +3684,10 @@ void MacroAssembler::InvokeCode(Register code,
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
-
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, no_reg,
- &done, &definitely_mismatches, flag,
- NullCallWrapper(), call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- SetCallKind(t1, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(t1, call_kind);
- Jump(code, rmode);
- }
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -3683,28 +3705,39 @@ void MacroAssembler::InvokeFunction(Register function,
lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
+ // Contract with called JS functions requires that function is passed in a1.
+ ASSERT(function.is(a1));
+
// Get the function and setup the context.
- LoadHeapObject(a1, function);
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(a3, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ li(a1, function);
+ InvokeFunction(a1, expected, actual, flag, call_wrapper);
}
@@ -3831,10 +3864,12 @@ void MacroAssembler::CallStub(CodeStub* stub,
}
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ ||
- stub->CompilingCallsToThisStubIsGCSafe(isolate()));
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+void MacroAssembler::TailCallStub(CodeStub* stub,
+ Condition cond,
+ Register r1,
+ const Operand& r2,
+ BranchDelaySlot bd) {
+ Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
}
@@ -3844,10 +3879,8 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::CallApiFunctionAndReturn(
- ExternalReference function,
- Address function_address,
+ Register function_address,
ExternalReference thunk_ref,
- Register thunk_last_arg,
int stack_space,
MemOperand return_value_operand,
MemOperand* context_restore_operand) {
@@ -3861,6 +3894,25 @@ void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference::handle_scope_level_address(isolate()),
next_address);
+ ASSERT(function_address.is(a1) || function_address.is(a2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag =
+ isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
+ lb(t9, MemOperand(t9, 0));
+ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
+
+ // Additional parameter is the address of the actual callback.
+ li(t9, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ mov(t9, function_address);
+ bind(&end_profiler_check);
+
// Allocate HandleScope in callee-save registers.
li(s3, Operand(next_address));
lw(s0, MemOperand(s3, kNextOffset));
@@ -3878,25 +3930,6 @@ void MacroAssembler::CallApiFunctionAndReturn(
PopSafepointRegisters();
}
- Label profiler_disabled;
- Label end_profiler_check;
- bool* is_profiling_flag =
- isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
- lb(t9, MemOperand(t9, 0));
- beq(t9, zero_reg, &profiler_disabled);
-
- // Third parameter is the address of the actual getter function.
- li(thunk_last_arg, reinterpret_cast<int32_t>(function_address));
- li(t9, Operand(thunk_ref));
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- li(t9, Operand(function));
-
- bind(&end_profiler_check);
-
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
@@ -3973,8 +4006,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -4210,12 +4242,10 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
GetBuiltinEntry(t9, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(t9));
- SetCallKind(t1, CALL_AS_METHOD);
Call(t9);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(t1, CALL_AS_METHOD);
Jump(t9);
}
}
@@ -4316,16 +4346,8 @@ void MacroAssembler::Check(Condition cc, BailoutReason reason,
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -4337,18 +4359,16 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- li(a0, Operand(p0));
- push(a0);
- li(a0, Operand(Smi::FromInt(p1 - p0)));
+ li(a0, Operand(Smi::FromInt(reason)));
push(a0);
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -4356,8 +4376,8 @@ void MacroAssembler::Abort(BailoutReason reason) {
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
// Currently in debug mode with debug_code enabled the number of
- // generated instructions is 14, so we use this as a maximum value.
- static const int kExpectedAbortInstructions = 14;
+ // generated instructions is 10, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 10;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
@@ -4476,122 +4496,12 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
-void MacroAssembler::LoadNumber(Register object,
- FPURegister dst,
- Register heap_number_map,
- Register scratch,
- Label* not_number) {
- Label is_smi, done;
-
- UntagAndJumpIfSmi(scratch, object, &is_smi);
- JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
-
- ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
- Branch(&done);
-
- bind(&is_smi);
- mtc1(scratch, dst);
- cvt_d_w(dst, dst);
-
- bind(&done);
-}
-
-
-void MacroAssembler::LoadNumberAsInt32Double(Register object,
- DoubleRegister double_dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister double_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
-
- Label done, obj_is_not_smi;
-
- UntagAndJumpIfNotSmi(scratch1, object, &obj_is_not_smi);
- mtc1(scratch1, double_scratch);
- cvt_d_w(double_dst, double_scratch);
- Branch(&done);
-
- bind(&obj_is_not_smi);
- JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Load the number.
- // Load the double value.
- ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- EmitFPUTruncate(kRoundToZero,
- scratch1,
- double_dst,
- at,
- double_scratch,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- Branch(not_int32, ne, except_flag, Operand(zero_reg));
- bind(&done);
-}
-
-
-void MacroAssembler::LoadNumberAsInt32(Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister double_scratch0,
- FPURegister double_scratch1,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
-
- Label done, maybe_undefined;
-
- UntagAndJumpIfSmi(dst, object, &done);
-
- JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- // Load the double value.
- ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- EmitFPUTruncate(kRoundToZero,
- dst,
- double_scratch0,
- scratch1,
- double_scratch1,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- Branch(not_int32, ne, except_flag, Operand(zero_reg));
- Branch(&done);
-
- bind(&maybe_undefined);
- LoadRoot(at, Heap::kUndefinedValueRootIndex);
- Branch(not_int32, ne, object, Operand(at));
- // |undefined| is truncated to 0.
- li(dst, Operand(Smi::FromInt(0)));
- // Fall through.
-
- bind(&done);
-}
-
-
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
if (frame_mode == BUILD_STUB_FRAME) {
Push(ra, fp, cp);
Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
- Addu(fp, sp, Operand(2 * kPointerSize));
+ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
} else {
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
@@ -4614,7 +4524,7 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
Push(ra, fp, cp, a1);
nop(Assembler::CODE_AGE_SEQUENCE_NOP);
// Adjust fp to point to caller's fp.
- Addu(fp, sp, Operand(2 * kPointerSize));
+ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
}
@@ -4629,7 +4539,9 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
sw(cp, MemOperand(sp, 2 * kPointerSize));
sw(t8, MemOperand(sp, 1 * kPointerSize));
sw(t9, MemOperand(sp, 0 * kPointerSize));
- addiu(fp, sp, 3 * kPointerSize);
+ // Adjust FP to point to saved FP.
+ Addu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -4918,7 +4830,7 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
- And(t0, object, Operand(kSmiTagMask));
+ SmiTst(object, t0);
Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
push(object);
lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
@@ -4932,7 +4844,7 @@ void MacroAssembler::AssertString(Register object) {
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
- And(t0, object, Operand(kSmiTagMask));
+ SmiTst(object, t0);
Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
push(object);
lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
@@ -5126,6 +5038,42 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ Register scratch,
+ uint32_t encoding_mask) {
+ Label is_object;
+ SmiTst(string, at);
+ Check(ne, kNonObject, at, Operand(zero_reg));
+
+ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
+ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+
+ andi(at, at, kStringRepresentationMask | kStringEncodingMask);
+ li(scratch, Operand(encoding_mask));
+ Check(eq, kUnexpectedStringType, at, Operand(scratch));
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ Label index_tag_ok, index_tag_bad;
+ TrySmiTag(index, scratch, &index_tag_bad);
+ Branch(&index_tag_ok);
+ bind(&index_tag_bad);
+ Abort(kIndexIsTooLarge);
+ bind(&index_tag_ok);
+
+ lw(at, FieldMemOperand(string, String::kLengthOffset));
+ Check(lt, kIndexIsTooLarge, index, Operand(at));
+
+ ASSERT(Smi::FromInt(0) == 0);
+ Check(ge, kIndexIsNegative, index, Operand(zero_reg));
+
+ SmiUntag(index, index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
@@ -5506,6 +5454,57 @@ void MacroAssembler::EnsureNotWhite(
}
+void MacroAssembler::Throw(BailoutReason reason) {
+ Label throw_start;
+ bind(&throw_start);
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ li(a0, Operand(Smi::FromInt(reason)));
+ push(a0);
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // will not return here
+ if (is_trampoline_pool_blocked()) {
+ // If the calling code cares throw the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the ThrowMessage macro constant.
+ // Currently in debug mode with debug_code enabled the number of
+ // generated instructions is 14, so we use this as a maximum value.
+ static const int kExpectedThrowMessageInstructions = 14;
+ int throw_instructions = InstructionsGeneratedSince(&throw_start);
+ ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
+ while (throw_instructions++ < kExpectedThrowMessageInstructions) {
+ nop();
+ }
+ }
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc,
+ BailoutReason reason,
+ Register rs,
+ Operand rt) {
+ Label L;
+ Branch(&L, NegateCondition(cc), rs, rt);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
@@ -5536,7 +5535,8 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
EnumLength(a3, a1);
- Branch(call_runtime, eq, a3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
+ Branch(
+ call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
jmp(&start);
@@ -5549,11 +5549,17 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
bind(&start);
- // Check that there are no elements. Register r2 contains the current JS
+ // Check that there are no elements. Register a2 contains the current JS
// object we've reached through the prototype chain.
+ Label no_elements;
lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
- Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
+ Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
+ // Second chance, the object may be using the empty slow element dictionary.
+ LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
+ Branch(call_runtime, ne, a2, Operand(at));
+
+ bind(&no_elements);
lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
Branch(&next, ne, a2, Operand(null_value));
}
@@ -5650,6 +5656,30 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
}
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // Scratch contained elements pointer.
+ Move(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ Ext(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
+ lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ Branch(&loop_again, ne, current, Operand(factory->null_value()));
+}
+
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index a4fd766e66..85347c9e51 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -169,6 +169,7 @@ class MacroAssembler: public Assembler {
DECLARE_BRANCH_PROTOTYPES(Branch)
DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
+ DECLARE_BRANCH_PROTOTYPES(BranchShort)
#undef DECLARE_BRANCH_PROTOTYPES
#undef COND_TYPED_ARGS
@@ -279,6 +280,9 @@ class MacroAssembler: public Assembler {
Branch(L);
}
+ void Load(Register dst, const MemOperand& src, Representation r);
+ void Store(Register src, const MemOperand& dst, Representation r);
+
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index);
@@ -293,17 +297,6 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
- void LoadHeapObject(Register dst, Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- li(result, object);
- }
- }
-
// ---------------------------------------------------------------------------
// GC Support
@@ -609,21 +602,23 @@ class MacroAssembler: public Assembler {
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
+ void Pref(int32_t hint, const MemOperand& rs);
+
// ---------------------------------------------------------------------------
// Pseudo-instructions.
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
+ void Ulw(Register rd, const MemOperand& rs);
+ void Usw(Register rd, const MemOperand& rs);
+
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
li(rd, Operand(j), mode);
}
- inline void li(Register dst, Handle<Object> value,
- LiFlags mode = OPTIMIZE_SIZE) {
- li(dst, Operand(value), mode);
- }
+ void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
@@ -900,40 +895,31 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// JavaScript invokes.
- // Set up call kind marking in t1. The method takes t1 as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void IsObjectJSObjectType(Register heap_object,
@@ -978,6 +964,12 @@ class MacroAssembler: public Assembler {
// handler chain.
void ThrowUncatchable(Register value);
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason, Register rs, Operand rt);
+
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count);
@@ -1187,16 +1179,18 @@ class MacroAssembler: public Assembler {
li(s2, Operand(ref));
}
+#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
+const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
+
// Call a code stub.
void CallStub(CodeStub* stub,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = cc_always,
- Register r1 = zero_reg,
- const Operand& r2 = Operand(zero_reg),
- BranchDelaySlot bd = PROTECT);
+ COND_ARGS);
// Tail call a code stub (jump).
- void TailCallStub(CodeStub* stub);
+ void TailCallStub(CodeStub* stub, COND_ARGS);
+
+#undef COND_ARGS
void CallJSExitStub(CodeStub* stub);
@@ -1268,24 +1262,23 @@ class MacroAssembler: public Assembler {
void CallCFunction(Register function,
int num_reg_arguments,
int num_double_arguments);
- void GetCFunctionDoubleResult(const DoubleRegister dst);
+ void MovFromFloatResult(DoubleRegister dst);
+ void MovFromFloatParameter(DoubleRegister dst);
// There are two ways of passing double arguments on MIPS, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
- void SetCallCDoubleArguments(DoubleRegister dreg);
- void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
- void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+ void MovToFloatParameter(DoubleRegister src);
+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+ void MovToFloatResult(DoubleRegister src);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function,
- Address function_address,
+ void CallApiFunctionAndReturn(Register function_address,
ExternalReference thunk_ref,
- Register thunk_last_arg,
int stack_space,
MemOperand return_value_operand,
MemOperand* context_restore_operand);
@@ -1346,8 +1339,6 @@ class MacroAssembler: public Assembler {
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
@@ -1378,6 +1369,21 @@ class MacroAssembler: public Assembler {
Addu(dst, src, src);
}
+ // Try to convert int32 to smi. If the value is to large, preserve
+ // the original value and jump to not_a_smi. Destroys scratch and
+ // sets flags.
+ void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
+ TrySmiTag(reg, reg, scratch, not_a_smi);
+ }
+ void TrySmiTag(Register dst,
+ Register src,
+ Register scratch,
+ Label* not_a_smi) {
+ SmiTagCheckOverflow(at, src, scratch);
+ BranchOnOverflow(not_a_smi, scratch);
+ mov(dst, at);
+ }
+
void SmiUntag(Register reg) {
sra(reg, reg, kSmiTagSize);
}
@@ -1386,6 +1392,14 @@ class MacroAssembler: public Assembler {
sra(dst, src, kSmiTagSize);
}
+ // Test if the register contains a smi.
+ inline void SmiTst(Register value, Register scratch) {
+ And(scratch, value, Operand(kSmiTagMask));
+ }
+ inline void NonNegativeSmiTst(Register value, Register scratch) {
+ And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
+ }
+
// Untag the source value into destination and jump if source is a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
@@ -1465,6 +1479,12 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ Register scratch,
+ uint32_t encoding_mask);
+
// Test that both first and second are sequential ASCII strings.
// Assume that they are non-smis.
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
@@ -1542,19 +1562,15 @@ class MacroAssembler: public Assembler {
bind(&no_memento_found);
}
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments);
- void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
- void BranchShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
- void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
- void BranchShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
const Operand& rt,
@@ -1575,8 +1591,7 @@ class MacroAssembler: public Assembler {
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
@@ -1611,7 +1626,6 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
bool generating_stub_;
- bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index acc65251e2..10417d573c 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -25,10 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdlib.h>
#include <limits.h>
+#include <stdarg.h>
+#include <stdlib.h>
#include <cmath>
-#include <cstdarg>
+
#include "v8.h"
#if V8_TARGET_ARCH_MIPS
@@ -971,6 +972,12 @@ class Redirection {
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
+ static void* ReverseRedirection(int32_t reg) {
+ Redirection* redirection = FromSwiInstruction(
+ reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+ return redirection->external_function();
+ }
+
private:
void* external_function_;
uint32_t swi_instruction_;
@@ -1388,12 +1395,12 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, void* arg1);
// This signature supports direct call to accessor getter callback.
typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
typedef void (*SimulatorRuntimeProfilingGetterCall)(
- int32_t arg0, int32_t arg1, int32_t arg2);
+ int32_t arg0, int32_t arg1, void* arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. They are also used for debugging with simulator.
@@ -1554,7 +1561,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
SimulatorRuntimeProfilingApiCall target =
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
- target(arg0, arg1);
+ target(arg0, Redirection::ReverseRedirection(arg1));
} else if (
redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim) {
@@ -1572,7 +1579,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
- target(arg0, arg1, arg2);
+ target(arg0, arg1, Redirection::ReverseRedirection(arg2));
} else {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
@@ -2115,7 +2122,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
// In rounding mode 0 it should behave like ROUND.
case ROUND_W_D: // Round double to word (round half to even).
{
- double rounded = floor(fs + 0.5);
+ double rounded = std::floor(fs + 0.5);
int32_t result = static_cast<int32_t>(rounded);
if ((result & 1) != 0 && result - fs == 0.5) {
// If the number is halfway between two integers,
@@ -2140,7 +2147,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case FLOOR_W_D: // Round double to word towards negative infinity.
{
- double rounded = floor(fs);
+ double rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
@@ -2150,7 +2157,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case CEIL_W_D: // Round double to word towards positive infinity.
{
- double rounded = ceil(fs);
+ double rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
@@ -2176,19 +2183,20 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
}
case ROUND_L_D: { // Mips32r2 instruction.
- double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ double rounded =
+ fs > 0 ? std::floor(fs + 0.5) : std::ceil(fs - 0.5);
i64 = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
}
case FLOOR_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(floor(fs));
+ i64 = static_cast<int64_t>(std::floor(fs));
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
case CEIL_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(ceil(fs));
+ i64 = static_cast<int64_t>(std::ceil(fs));
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index d9fd10f245..92a0a87d24 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -203,6 +203,10 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 17aa4aadef..7e3c801399 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -287,15 +287,19 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Register prototype,
Label* miss) {
Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ lw(prototype,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ASSERT(!prototype.is(at));
- __ li(at, isolate->global_object());
- __ Branch(miss, ne, prototype, Operand(at));
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ lw(scratch, MemOperand(cp, offset));
+ __ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ lw(scratch, MemOperand(scratch, Context::SlotOffset(index)));
+ __ li(at, function);
+ __ Branch(miss, ne, at, Operand(scratch));
+
// Load its initial map. The global functions all have initial maps.
__ li(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -461,7 +465,7 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
- __ LoadObject(scratch1, constant);
+ __ li(scratch1, constant);
__ Branch(miss_label, ne, value_reg, Operand(scratch1));
} else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
@@ -732,35 +736,6 @@ void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
}
-static void GenerateCallFunction(MacroAssembler* masm,
- Handle<Object> object,
- const ParameterCount& arguments,
- Label* miss,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- a0: receiver
- // -- a1: function to call
- // -----------------------------------
- // Check that the function really is a function.
- __ JumpIfSmi(a1, miss);
- __ GetObjectType(a1, a3, a3);
- __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, arguments.immediate() * kPointerSize));
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(a1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
@@ -785,106 +760,80 @@ static void CompileCallLoadPropertyWithInterceptor(
Register receiver,
Register holder,
Register name,
- Handle<JSObject> holder_obj) {
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ PrepareCEntryArgs(StubCache::kInterceptorArgsLength);
- __ PrepareCEntryFunction(ref);
-
- CEntryStub stub(1);
- __ CallStub(&stub);
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
}
-static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
- Register scratch) {
- ASSERT(Smi::FromInt(0) == 0);
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(zero_reg);
+// Generate call to api function.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
+ ASSERT(!receiver.is(scratch_in));
+ // Preparing to push, adjust sp.
+ __ Subu(sp, sp, Operand((argc + 1) * kPointerSize));
+ __ sw(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver.
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch_in.is(arg));
+ __ sw(arg, MemOperand(sp, (argc-1-i) * kPointerSize)); // Push arg.
}
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
- __ Drop(kFastApiCallArguments);
-}
+ ASSERT(optimization.is_simple_api_call());
+ // Abi for CallApiFunctionStub.
+ Register callee = a0;
+ Register call_data = t0;
+ Register holder = a2;
+ Register api_function_address = a1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ li(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
-static void GenerateFastApiDirectCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context) {
- // ----------- S t a t e -------------
- // -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
- // : holder (set by CheckPrototypes)
- // -- sp[28] : last JS argument
- // -- ...
- // -- sp[(argc + 6) * 4] : first JS argument
- // -- sp[(argc + 7) * 4] : receiver
- // -----------------------------------
- typedef FunctionCallbackArguments FCA;
- // Save calling context.
- __ sw(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
- // Get the function and setup the context.
+ Isolate* isolate = masm->isolate();
Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(t1, function);
- __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
- __ sw(t1, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
-
- // Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ li(a0, api_call_info);
- __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ li(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ li(call_data, api_call_info);
+ __ lw(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
} else {
- __ li(t2, call_data);
+ __ li(call_data, call_data_obj);
}
- // Store call data.
- __ sw(t2, MemOperand(sp, FCA::kDataIndex * kPointerSize));
- // Store isolate.
- __ li(t3, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ sw(t3, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
- // Store ReturnValue default and ReturnValue.
- __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
- __ sw(t1, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
- __ sw(t1, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
-
- // Prepare arguments.
- __ Move(a2, sp);
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // a0 = FunctionCallbackInfo&
- // Arguments is built at sp + 1 (sp is a reserved spot for ra).
- __ Addu(a0, sp, kPointerSize);
- // FunctionCallbackInfo::implicit_args_
- __ sw(a2, MemOperand(a0, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ Addu(t0, a2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
- __ sw(t0, MemOperand(a0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ li(t0, Operand(argc));
- __ sw(t0, MemOperand(a0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call = 0
- __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ // Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
@@ -892,273 +841,11 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
ExternalReference(&fun,
type,
masm->isolate());
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- masm->isolate());
-
- AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- MemOperand return_value_operand(
- fp, (2 + FCA::kReturnValueOffset) * kPointerSize);
-
- __ CallApiFunctionAndReturn(ref,
- function_address,
- thunk_ref,
- a1,
- kStackUnwindSpace,
- return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
-}
-
-
-// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch,
- int argc,
- Register* values) {
- ASSERT(optimization.is_simple_api_call());
- ASSERT(!receiver.is(scratch));
-
- typedef FunctionCallbackArguments FCA;
- const int stack_space = kFastApiCallArguments + argc + 1;
- // Assign stack space for the call arguments.
- __ Subu(sp, sp, Operand(stack_space * kPointerSize));
- // Write holder to stack frame.
- __ sw(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- // Write receiver to stack frame.
- int index = stack_space - 1;
- __ sw(receiver, MemOperand(sp, index * kPointerSize));
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- ASSERT(!receiver.is(values[i]));
- ASSERT(!scratch.is(values[i]));
- __ sw(receiver, MemOperand(sp, index-- * kPointerSize));
- }
-
- GenerateFastApiDirectCall(masm, optimization, argc, true);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- Code::ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<Name> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
- Counters* counters = masm->isolate()->counters();
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- __ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
- scratch1, scratch2);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(
- masm, optimization, arguments_.immediate(), false);
- } else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- Handle<JSFunction> function = optimization.constant_function();
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
- }
+ __ li(api_function_address, Operand(ref));
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm);
- __ Branch(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<Name> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
-
- // Call a runtime function to load the interceptor property.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- StubCache::kInterceptorArgsLength);
- // Restore the name_ register.
- __ pop(name_);
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Register scratch,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- __ Push(holder, name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- }
- // If interceptor returns no-result sentinel, call the constant function.
- __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- Code::ExtraICState extra_ic_state_;
-};
-
-
-void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<JSGlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
+ // Jump to stub.
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
}
@@ -1171,21 +858,20 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
Handle<Name> name,
- int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ li(scratch1, Operand(Handle<Map>(object->map())));
+ __ li(scratch1, Operand(receiver_map));
- Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1195,30 +881,31 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
- typedef FunctionCallbackArguments FCA;
- if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
if (!name->IsUniqueName()) {
ASSERT(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
+ ASSERT(current.is_null() ||
+ current->property_dictionary()->FindEntry(*name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -1229,19 +916,24 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
__ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Register map_reg = scratch1;
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- Handle<Map> current_map(current->map());
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
// CheckMap implicitly loads the map of |reg| into |map_reg|.
__ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
} else {
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
}
+
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
- if (current->IsJSGlobalProxy()) {
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
}
+
reg = holder_reg; // From now on the object will be in holder_reg.
if (heap()->InNewSpace(*prototype)) {
@@ -1254,71 +946,62 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
}
- if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- }
-
// Go to the next object in the prototype chain.
current = prototype;
+ current_map = handle(current->map());
}
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
- DONT_DO_SMI_CHECK);
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
// Return the register containing the holder.
return reg;
}
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ Branch(success);
+ Label success;
+ __ Branch(&success);
__ bind(miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ b(success);
+ Label success;
+ __ Branch(&success);
GenerateRestoreName(masm(), miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
Register LoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
- Label* success,
Handle<Object> callback) {
Label miss;
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
ASSERT(!reg.is(scratch2()));
@@ -1350,7 +1033,7 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
__ Branch(&miss, ne, scratch2(), Operand(callback));
}
- HandlerFrontendFooter(name, success, &miss);
+ HandlerFrontendFooter(name, &miss);
return reg;
}
@@ -1376,19 +1059,12 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadObject(v0, value);
+ __ li(v0, value);
__ Ret();
}
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
@@ -1424,43 +1100,23 @@ void LoadStubCompiler::GenerateLoadCallback(
__ Addu(scratch2(), sp, 1 * kPointerSize);
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
- __ mov(a0, sp); // (first argument - a0) = Handle<Name>
-
- const int kApiStackSpace = 1;
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
+ // Abi for CallApiGetter.
+ Register getter_address_reg = a2;
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object** args_) as the data.
- __ sw(a2, MemOperand(sp, kPointerSize));
- // (second argument - a1) = AccessorInfo&
- __ Addu(a1, sp, kPointerSize);
-
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ li(getter_address_reg, Operand(ref));
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
- ExternalReference::Type thunk_type =
- ExternalReference::PROFILING_GETTER_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- isolate());
- __ CallApiFunctionAndReturn(ref,
- getter_address,
- thunk_ref,
- a2,
- kStackUnwindSpace,
- MemOperand(fp, 6 * kPointerSize),
- NULL);
+ CallApiGetterStub stub;
+ __ TailCallStub(&stub);
}
void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
- Handle<JSObject> object,
+ Handle<Object> object,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Handle<Name> name) {
@@ -1509,11 +1165,10 @@ void LoadStubCompiler::GenerateLoadInterceptor(
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method).
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
+
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
@@ -1544,1252 +1199,14 @@ void LoadStubCompiler::GenerateLoadInterceptor(
}
-void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ Branch(miss, ne, a2, Operand(name));
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ lw(a0, MemOperand(sp, argc * kPointerSize));
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(a0, miss);
- CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ li(a3, Operand(cell));
- __ lw(a1, FieldMemOperand(a3, Cell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(a1, miss);
- __ GetObjectType(a1, a3, a3);
- __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
-
- // Check the shared function info. Make sure it hasn't changed.
- __ li(a3, Handle<SharedFunctionInfo>(function->shared()));
- __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Branch(miss, ne, t0, Operand(a3));
- } else {
- __ Branch(miss, ne, a1, Operand(function));
- }
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- const int argc = arguments().immediate();
-
- // Get the receiver of the function from the stack into a0.
- __ lw(a0, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(a0, &miss, t0);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
- GenerateFastPropertyLoad(masm(), a1, reg, index.is_inobject(holder),
- index.translate(holder), Representation::Tagged());
-
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- Label miss;
-
- // Check that function is still array.
- const int argc = arguments().immediate();
- GenerateNameCheck(name, &miss);
- Register receiver = a1;
-
- if (cell.is_null()) {
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, a0,
- t0, name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
- Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
- __ li(a0, Operand(argc));
- __ li(a2, Operand(site_feedback_cell));
- __ li(a1, Operand(function));
-
- ArrayConstructorStub stub(isolate());
- __ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- Register receiver = a1;
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, v0, t0,
- name, &miss);
-
- if (argc == 0) {
- // Nothing to do, just return the length.
- __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ DropAndRet(argc + 1);
- } else {
- Label call_builtin;
- if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- Register elements = t2;
- Register end_elements = t1;
- // Get the elements array of the object.
- __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- v0,
- Heap::kFixedArrayMapRootIndex,
- &check_double,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into v0 and calculate new length.
- __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
-
- // Check if value is a smi.
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
- __ JumpIfNotSmi(t0, &with_write_barrier);
-
- // Save new length.
- __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- const int kEndElementsOffset =
- FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ Addu(end_elements, end_elements, kEndElementsOffset);
- __ sw(t0, MemOperand(end_elements));
-
- // Check for a smi.
- __ DropAndRet(argc + 1);
-
- __ bind(&check_double);
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- a0,
- Heap::kFixedDoubleArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into v0 and calculate new length.
- __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ Branch(&call_builtin, gt, v0, Operand(t0));
-
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(
- t0, v0, elements, a3, t1, a2,
- &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Check for a smi.
- __ DropAndRet(argc + 1);
-
- __ bind(&with_write_barrier);
-
- __ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(a3, t3, &not_fast_object);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(a3, t3, &call_builtin);
-
- __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&call_builtin, eq, t3, Operand(at));
- // edx: receiver
- // a3: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- a3,
- t3,
- &try_holey_map);
- __ mov(a2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- a3,
- t3,
- &call_builtin);
- __ mov(a2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(a3, a3, &call_builtin);
- }
-
- // Save new length.
- __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- __ Addu(end_elements, end_elements, kEndElementsOffset);
- __ sw(t0, MemOperand(end_elements));
-
- __ RecordWrite(elements,
- end_elements,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ DropAndRet(argc + 1);
-
- __ bind(&attempt_to_grow_elements);
- // v0: array's length + 1.
- // t0: elements' length.
-
- if (!FLAG_inline_new) {
- __ Branch(&call_builtin);
- }
-
- __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(a2, &no_fast_elements_check);
- __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(t3, t3, &call_builtin);
- __ bind(&no_fast_elements_check);
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top and check if it is the end of elements.
- __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
- __ li(t3, Operand(new_space_allocation_top));
- __ lw(a3, MemOperand(t3));
- __ Branch(&call_builtin, ne, end_elements, Operand(a3));
-
- __ li(t5, Operand(new_space_allocation_limit));
- __ lw(t5, MemOperand(t5));
- __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
- __ Branch(&call_builtin, hi, a3, Operand(t5));
-
- // We fit and could grow elements.
- // Update new_space_allocation_top.
- __ sw(a3, MemOperand(t3));
- // Push the argument.
- __ sw(a2, MemOperand(end_elements));
- // Fill the rest with holes.
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ sw(a3, MemOperand(end_elements, i * kPointerSize));
- }
-
- // Update elements' and array's sizes.
- __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
- __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Elements are in new space, so write barrier is not required.
- __ DropAndRet(argc + 1);
- }
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
- }
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss, return_undefined, call_builtin;
- Register receiver = a1;
- Register elements = a3;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
- t0, v0, name, &miss);
-
- // Get the elements array of the object.
- __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- v0,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into t0 and calculate new length.
- __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Subu(t0, t0, Operand(Smi::FromInt(1)));
- __ Branch(&return_undefined, lt, t0, Operand(zero_reg));
-
- // Get the last element.
- __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- // We can't address the last element in one operation. Compute the more
- // expensive shift first, and use an offset later on.
- __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(elements, elements, t1);
- __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Branch(&call_builtin, eq, v0, Operand(t2));
-
- // Set the array's length.
- __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Fill with the hole.
- __ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ DropAndRet(argc + 1);
-
- __ bind(&return_undefined);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ DropAndRet(argc + 1);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
-
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- v0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- v0, holder, a1, a3, t0, name, &miss);
-
- Register receiver = a1;
- Register index = t1;
- Register result = v0;
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ DropAndRet(argc + 1);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(v0, Heap::kNanValueRootIndex);
- __ DropAndRet(argc + 1);
- }
-
- __ bind(&miss);
- // Restore function name in a2.
- __ li(a2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- v0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- v0, holder, a1, a3, t0, name, &miss);
-
- Register receiver = v0;
- Register index = t1;
- Register scratch = a3;
- Register result = v0;
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ DropAndRet(argc + 1);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(v0, Heap::kempty_stringRootIndex);
- __ DropAndRet(argc + 1);
- }
-
- __ bind(&miss);
- // Restore function name in a2.
- __ li(a2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(a1, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = a1;
- __ lw(code, MemOperand(sp, 0 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ And(code, code, Operand(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, v0);
- generator.GenerateFast(masm());
- __ DropAndRet(argc + 1);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // a2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
-
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss, slow;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(a1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into v0.
- __ lw(v0, MemOperand(sp, 0 * kPointerSize));
-
- // If the argument is a smi, just return.
- STATIC_ASSERT(kSmiTag == 0);
- __ And(t0, v0, Operand(kSmiTagMask));
- __ DropAndRet(argc + 1, eq, t0, Operand(zero_reg));
-
- __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
-
- Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
-
- // If fpu is enabled, we use the floor instruction.
-
- // Load the HeapNumber value.
- __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
-
- // Backup FCSR.
- __ cfc1(a3, FCSR);
- // Clearing FCSR clears the exception mask with no side-effects.
- __ ctc1(zero_reg, FCSR);
- // Convert the argument to an integer.
- __ floor_w_d(f0, f0);
-
- // Start checking for special cases.
- // Get the argument exponent and clear the sign bit.
- __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
- __ And(t2, t1, Operand(~HeapNumber::kSignMask));
- __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
-
- // Retrieve FCSR and check for fpu errors.
- __ cfc1(t5, FCSR);
- __ And(t5, t5, Operand(kFCSRExceptionFlagMask));
- __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
-
- // Check for NaN, Infinity, and -Infinity.
- // They are invariant through a Math.Floor call, so just
- // return the original argument.
- __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
- >> HeapNumber::kMantissaBitsInTopWord));
- __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
- // We had an overflow or underflow in the conversion. Check if we
- // have a big exponent.
- // If greater or equal, the argument is already round and in v0.
- __ Branch(&restore_fcsr_and_return, ge, t3,
- Operand(HeapNumber::kMantissaBits));
- __ Branch(&wont_fit_smi);
-
- __ bind(&no_fpu_error);
- // Move the result back to v0.
- __ mfc1(v0, f0);
- // Check if the result fits into a smi.
- __ Addu(a1, v0, Operand(0x40000000));
- __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
- // Tag the result.
- STATIC_ASSERT(kSmiTag == 0);
- __ sll(v0, v0, kSmiTagSize);
-
- // Check for -0.
- __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
- // t1 already holds the HeapNumber exponent.
- __ And(t0, t1, Operand(HeapNumber::kSignMask));
- // If our HeapNumber is negative it was -0, so load its address and return.
- // Else v0 is loaded with 0, so we can also just return.
- __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
- __ lw(v0, MemOperand(sp, 0 * kPointerSize));
-
- __ bind(&restore_fcsr_and_return);
- // Restore FCSR and return.
- __ ctc1(a3, FCSR);
-
- __ DropAndRet(argc + 1);
-
- __ bind(&wont_fit_smi);
- // Restore FCSR and fall to slow case.
- __ ctc1(a3, FCSR);
-
- __ bind(&slow);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // a2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(a1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into v0.
- __ lw(v0, MemOperand(sp, 0 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(v0, &not_smi);
-
- // Do bitwise not or do nothing depending on the sign of the
- // argument.
- __ sra(t0, v0, kBitsPerInt - 1);
- __ Xor(a1, v0, t0);
-
- // Add 1 or do nothing depending on the sign of the argument.
- __ Subu(v0, a1, t0);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ Branch(&slow, lt, v0, Operand(zero_reg));
-
- // Smi case done.
- __ DropAndRet(argc + 1);
-
- // Check if the argument is a heap number and load its exponent and
- // sign.
- __ bind(&not_smi);
- __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
- __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ And(t0, a1, Operand(HeapNumber::kSignMask));
- __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
- __ DropAndRet(argc + 1);
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
- __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
- __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ DropAndRet(argc + 1);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // a2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
-
- Counters* counters = isolate()->counters();
-
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(a1, &miss_before_stack_reserved);
-
- __ IncrementCounter(counters->call_const(), 1, a0, a3);
- __ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
-
- ReserveSpaceForFastApiCall(masm(), a0);
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
- depth, &miss);
-
- GenerateFastApiDirectCall(masm(), optimization, argc, false);
-
- __ bind(&miss);
- FreeSpaceForFastApiCall(masm());
-
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(a1, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(isolate()->counters()->call_const(), 1, a0, a3);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
- name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ lw(a3, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, argc * kPointerSize));
- }
- break;
-
- case STRING_CHECK:
- // Check that the object is a string.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- a0, holder, a3, a1, t0, name, &miss);
- break;
-
- case SYMBOL_CHECK:
- // Check that the object is a symbol.
- __ GetObjectType(a1, a1, a3);
- __ Branch(&miss, ne, a3, Operand(SYMBOL_TYPE));
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- a0, holder, a3, a1, t0, name, &miss);
- break;
-
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(a1, &fast);
- __ GetObjectType(a1, a0, a0);
- __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- a0, holder, a3, a1, t0, name, &miss);
- break;
- }
- case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- __ Branch(&fast, eq, a1, Operand(t0));
- __ LoadRoot(t0, Heap::kFalseValueRootIndex);
- __ Branch(&miss, ne, a1, Operand(t0));
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- a0, holder, a3, a1, t0, name, &miss);
- break;
- }
- }
-
- __ jmp(success);
-
- // Handle call cache miss.
- __ bind(&miss);
-
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Handle<JSFunction> function) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<Cell>::null(),
- function, Handle<String>::cast(name),
- Code::CONSTANT);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
Label success;
-
- CompileHandlerFrontend(object, holder, name, check, &success);
+ // Check that the object is a boolean.
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&success, eq, object, Operand(at));
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ __ Branch(miss, ne, object, Operand(at));
__ bind(&success);
- CompileHandlerBackend(function);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), a2, extra_state_);
- compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
- &miss);
-
- // Move returned value, the function to call, to a1.
- __ mov(a1, v0);
- // Restore receiver.
- __ lw(a0, MemOperand(sp, argc * kPointerSize));
-
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function,
- Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name),
- Code::NORMAL);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Set up the context (function already in r1).
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
}
@@ -2798,15 +1215,14 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
// Stub never generated for non-global objects that require access
// checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- __ push(receiver()); // Receiver.
+ __ Push(receiver(), holder_reg); // Receiver.
__ li(at, Operand(callback)); // Callback info.
__ push(at);
__ li(at, Operand(name));
@@ -2815,28 +1231,10 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 1, values);
+ __ TailCallExternalReference(store_callback_property, 5, 1);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2846,6 +1244,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
+ Handle<HeapType> type,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- a0 : value
@@ -2855,18 +1254,25 @@ void StoreStubCompiler::GenerateStoreViaSetter(
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ Register receiver = a1;
+ Register value = a0;
// Save value register, so we can restore it later.
- __ push(a0);
+ __ push(value);
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
- __ push(a1);
- __ push(a0);
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ lw(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver, value);
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2890,56 +1296,29 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ Push(receiver(), this->name(), value());
- __ li(scratch1(), Operand(Smi::FromInt(strict_mode())));
- __ push(scratch1()); // strict mode
-
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ TailCallExternalReference(store_ic_property, 3, 1);
// Return the generated code.
- return GetCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Handle<JSGlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
- __ bind(&success);
// Return undefined if maps of the full prototype chain is still the same.
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
__ Ret();
// Return the generated code.
- return GetCode(kind(), Code::NONEXISTENT, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2971,25 +1350,12 @@ Register* KeyedStoreStubCompiler::registers() {
}
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ Branch(miss, ne, name_reg, Operand(name));
-}
-
-
-void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ Branch(miss, ne, name_reg, Operand(name));
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
@@ -3002,11 +1368,17 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ lw(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
__ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -3025,17 +1397,14 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
- Label success, miss;
+ Label miss;
- __ CheckMap(
- receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
__ li(a3, Operand(cell));
@@ -3047,43 +1416,51 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Branch(&miss, eq, t0, Operand(at));
}
- HandlerFrontendFooter(name, &success, &miss);
- __ bind(&success);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, t0);
+ HandlerFrontendFooter(name, &miss);
+
// Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), Code::NORMAL, name);
}
Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
+ TypeHandleList* types,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
IcCheckType check) {
Label miss;
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ Branch(&miss, ne, this->name(), Operand(name));
}
- __ JumpIfSmi(receiver(), &miss);
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
Register map_reg = scratch1();
- int receiver_count = receiver_maps->length();
+ int receiver_count = types->length();
int number_of_handled_maps = 0;
__ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = receiver_maps->at(current);
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
+ if (type->Is(HeapType::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ bind(&number_case);
+ }
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
- eq, map_reg, Operand(receiver_maps->at(current)));
+ eq, map_reg, Operand(map));
}
}
ASSERT(number_of_handled_maps != 0);
@@ -3140,12 +1517,12 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
- Label slow, miss_force_generic;
+ Label slow, miss;
Register key = a0;
Register receiver = a1;
- __ JumpIfNotSmi(key, &miss_force_generic);
+ __ JumpIfNotSmi(key, &miss);
__ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ sra(a2, a0, kSmiTagSize);
__ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
@@ -3165,14 +1542,14 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
// Miss case, call the runtime.
- __ bind(&miss_force_generic);
+ __ bind(&miss);
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 4277136b60..212bb0b9ca 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -637,8 +637,9 @@ ObjectMirror.prototype.propertyNames = function(kind, limit) {
// Find all the named properties.
if (kind & PropertyKind.Named) {
- // Get the local property names.
- propertyNames = %GetLocalPropertyNames(this.value_, true);
+ // Get all the local property names.
+ propertyNames =
+ %GetLocalPropertyNames(this.value_, PROPERTY_ATTRIBUTES_NONE);
total += propertyNames.length;
// Get names for named interceptor properties if any.
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index 95d3daada2..07b0575492 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -309,7 +309,7 @@ int main(int argc, char** argv) {
// Capture 100 frames if anything happens.
V8::SetCaptureStackTraceForUncaughtExceptions(true, 100);
HandleScope scope(isolate);
- v8::Context::Scope(v8::Local<v8::Context>::New(isolate, context));
+ v8::Context::Scope cscope(v8::Local<v8::Context>::New(isolate, context));
const char* name = i::FLAG_extra_code;
FILE* file = i::OS::FOpen(name, "rb");
if (file == NULL) {
@@ -332,7 +332,7 @@ int main(int argc, char** argv) {
i += read;
}
fclose(file);
- Local<String> source = String::New(chars);
+ Local<String> source = String::NewFromUtf8(isolate, chars);
TryCatch try_catch;
Local<Script> script = Script::Compile(source);
if (try_catch.HasCaught()) {
@@ -358,7 +358,7 @@ int main(int argc, char** argv) {
internal_isolate->heap()->CollectAllGarbage(
i::Heap::kNoGCFlags, "mksnapshot");
i::Object* raw_context = *v8::Utils::OpenPersistent(context);
- context.Dispose();
+ context.Reset();
CppByteSink sink(argv[1]);
// This results in a somewhat smaller snapshot, probably because it gets rid
// of some things that are cached between garbage collections.
@@ -399,5 +399,8 @@ int main(int argc, char** argv) {
ser.CurrentAllocationAddress(i::MAP_SPACE),
ser.CurrentAllocationAddress(i::CELL_SPACE),
ser.CurrentAllocationAddress(i::PROPERTY_CELL_SPACE));
+ isolate->Exit();
+ isolate->Dispose();
+ V8::Dispose();
return 0;
}
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index 9c7ac3889e..468da31ec2 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -91,10 +91,14 @@ var objectInfoMap = new ObservationWeakMap(observationState.objectInfoMap);
var notifierObjectInfoMap =
new ObservationWeakMap(observationState.notifierObjectInfoMap);
-function TypeMapCreate() {
+function nullProtoObject() {
return { __proto__: null };
}
+function TypeMapCreate() {
+ return nullProtoObject();
+}
+
function TypeMapAddType(typeMap, type, ignoreDuplicate) {
typeMap[type] = ignoreDuplicate ? 1 : (typeMap[type] || 0) + 1;
}
@@ -128,11 +132,12 @@ function TypeMapIsDisjointFrom(typeMap1, typeMap2) {
}
var defaultAcceptTypes = TypeMapCreateFromList([
- 'new',
- 'updated',
- 'deleted',
- 'prototype',
- 'reconfigured'
+ 'add',
+ 'update',
+ 'delete',
+ 'setPrototype',
+ 'reconfigure',
+ 'preventExtensions'
]);
// An Observer is a registration to observe an object by a callback with
@@ -141,11 +146,12 @@ var defaultAcceptTypes = TypeMapCreateFromList([
// to the callback. An observer never changes its accept types and thus never
// needs to "normalize".
function ObserverCreate(callback, acceptList) {
- return IS_UNDEFINED(acceptList) ? callback : {
- __proto__: null,
- callback: callback,
- accept: TypeMapCreateFromList(acceptList)
- };
+ if (IS_UNDEFINED(acceptList))
+ return callback;
+ var observer = nullProtoObject();
+ observer.callback = callback;
+ observer.accept = TypeMapCreateFromList(acceptList);
+ return observer;
}
function ObserverGetCallback(observer) {
@@ -161,8 +167,8 @@ function ObserverIsActive(observer, objectInfo) {
ObserverGetAcceptTypes(observer));
}
-function ObjectInfoGet(object) {
- var objectInfo = objectInfoMap.get(object);
+function ObjectInfoGetOrCreate(object) {
+ var objectInfo = ObjectInfoGet(object);
if (IS_UNDEFINED(objectInfo)) {
if (!%IsJSProxy(object))
%SetIsObserved(object);
@@ -179,6 +185,10 @@ function ObjectInfoGet(object) {
return objectInfo;
}
+function ObjectInfoGet(object) {
+ return objectInfoMap.get(object);
+}
+
function ObjectInfoGetFromNotifier(notifier) {
return notifierObjectInfoMap.get(notifier);
}
@@ -211,7 +221,7 @@ function ObjectInfoNormalizeChangeObservers(objectInfo) {
var callback = ObserverGetCallback(observer);
var callbackInfo = CallbackInfoGet(callback);
var priority = CallbackInfoGetPriority(callbackInfo);
- objectInfo.changeObservers = { __proto__: null };
+ objectInfo.changeObservers = nullProtoObject();
objectInfo.changeObservers[priority] = observer;
}
}
@@ -242,7 +252,7 @@ function ObjectInfoRemoveObserver(objectInfo, callback) {
var callbackInfo = CallbackInfoGet(callback);
var priority = CallbackInfoGetPriority(callbackInfo);
- delete objectInfo.changeObservers[priority];
+ objectInfo.changeObservers[priority] = null;
}
function ObjectInfoHasActiveObservers(objectInfo) {
@@ -253,7 +263,8 @@ function ObjectInfoHasActiveObservers(objectInfo) {
return ObserverIsActive(objectInfo.changeObservers, objectInfo);
for (var priority in objectInfo.changeObservers) {
- if (ObserverIsActive(objectInfo.changeObservers[priority], objectInfo))
+ var observer = objectInfo.changeObservers[priority];
+ if (!IS_NULL(observer) && ObserverIsActive(observer, objectInfo))
return true;
}
@@ -332,7 +343,7 @@ function ObjectObserve(object, callback, acceptList) {
if (!AcceptArgIsValid(acceptList))
throw MakeTypeError("observe_accept_invalid");
- var objectInfo = ObjectInfoGet(object);
+ var objectInfo = ObjectInfoGetOrCreate(object);
ObjectInfoAddObserver(objectInfo, callback, acceptList);
return object;
}
@@ -343,7 +354,7 @@ function ObjectUnobserve(object, callback) {
if (!IS_SPEC_FUNCTION(callback))
throw MakeTypeError("observe_non_function", ["unobserve"]);
- var objectInfo = objectInfoMap.get(object);
+ var objectInfo = ObjectInfoGet(object);
if (IS_UNDEFINED(objectInfo))
return object;
@@ -352,9 +363,9 @@ function ObjectUnobserve(object, callback) {
}
function ArrayObserve(object, callback) {
- return ObjectObserve(object, callback, ['new',
- 'updated',
- 'deleted',
+ return ObjectObserve(object, callback, ['add',
+ 'update',
+ 'delete',
'splice']);
}
@@ -379,15 +390,37 @@ function ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
}
var callbackInfo = CallbackInfoNormalize(callback);
- if (!observationState.pendingObservers)
- observationState.pendingObservers = { __proto__: null };
+ if (IS_NULL(observationState.pendingObservers)) {
+ observationState.pendingObservers = nullProtoObject();
+ GetMicrotaskQueue().push(ObserveMicrotaskRunner);
+ %SetMicrotaskPending(true);
+ }
observationState.pendingObservers[callbackInfo.priority] = callback;
callbackInfo.push(changeRecord);
- %SetObserverDeliveryPending();
}
-function ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord,
- skipAccessCheck) {
+function ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, type) {
+ if (!ObjectInfoHasActiveObservers(objectInfo))
+ return;
+
+ var hasType = !IS_UNDEFINED(type);
+ var newRecord = hasType ?
+ { object: ObjectInfoGetObject(objectInfo), type: type } :
+ { object: ObjectInfoGetObject(objectInfo) };
+
+ for (var prop in changeRecord) {
+ if (prop === 'object' || (hasType && prop === 'type')) continue;
+ %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
+ READ_ONLY + DONT_DELETE);
+ }
+ ObjectFreeze(newRecord);
+
+ ObjectInfoEnqueueInternalChangeRecord(objectInfo, newRecord,
+ true /* skip access check */);
+}
+
+function ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord,
+ skipAccessCheck) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(changeRecord.name)) return;
@@ -403,25 +436,27 @@ function ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord,
for (var priority in objectInfo.changeObservers) {
var observer = objectInfo.changeObservers[priority];
+ if (IS_NULL(observer))
+ continue;
ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
needsAccessCheck);
}
}
function BeginPerformSplice(array) {
- var objectInfo = objectInfoMap.get(array);
+ var objectInfo = ObjectInfoGet(array);
if (!IS_UNDEFINED(objectInfo))
ObjectInfoAddPerformingType(objectInfo, 'splice');
}
function EndPerformSplice(array) {
- var objectInfo = objectInfoMap.get(array);
+ var objectInfo = ObjectInfoGet(array);
if (!IS_UNDEFINED(objectInfo))
ObjectInfoRemovePerformingType(objectInfo, 'splice');
}
function EnqueueSpliceRecord(array, index, removed, addedCount) {
- var objectInfo = objectInfoMap.get(array);
+ var objectInfo = ObjectInfoGet(array);
if (!ObjectInfoHasActiveObservers(objectInfo))
return;
@@ -435,19 +470,30 @@ function EnqueueSpliceRecord(array, index, removed, addedCount) {
ObjectFreeze(changeRecord);
ObjectFreeze(changeRecord.removed);
- ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord);
+ ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
}
function NotifyChange(type, object, name, oldValue) {
- var objectInfo = objectInfoMap.get(object);
+ var objectInfo = ObjectInfoGet(object);
if (!ObjectInfoHasActiveObservers(objectInfo))
return;
- var changeRecord = (arguments.length < 4) ?
- { type: type, object: object, name: name } :
- { type: type, object: object, name: name, oldValue: oldValue };
+ var changeRecord;
+ if (arguments.length == 2) {
+ changeRecord = { type: type, object: object };
+ } else if (arguments.length == 3) {
+ changeRecord = { type: type, object: object, name: name };
+ } else {
+ changeRecord = {
+ type: type,
+ object: object,
+ name: name,
+ oldValue: oldValue
+ };
+ }
+
ObjectFreeze(changeRecord);
- ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord);
+ ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
}
var notifierPrototype = {};
@@ -462,19 +508,7 @@ function ObjectNotifierNotify(changeRecord) {
if (!IS_STRING(changeRecord.type))
throw MakeTypeError("observe_type_non_string");
- if (!ObjectInfoHasActiveObservers(objectInfo))
- return;
-
- var newRecord = { object: ObjectInfoGetObject(objectInfo) };
- for (var prop in changeRecord) {
- if (prop === 'object') continue;
- %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
- READ_ONLY + DONT_DELETE);
- }
- ObjectFreeze(newRecord);
-
- ObjectInfoEnqueueChangeRecord(objectInfo, newRecord,
- true /* skip access check */);
+ ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord);
}
function ObjectNotifierPerformChange(changeType, changeFn) {
@@ -491,11 +525,16 @@ function ObjectNotifierPerformChange(changeType, changeFn) {
throw MakeTypeError("observe_perform_non_function");
ObjectInfoAddPerformingType(objectInfo, changeType);
+
+ var changeRecord;
try {
- %_CallFunction(UNDEFINED, changeFn);
+ changeRecord = %_CallFunction(UNDEFINED, changeFn);
} finally {
ObjectInfoRemovePerformingType(objectInfo, changeType);
}
+
+ if (IS_SPEC_OBJECT(changeRecord))
+ ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, changeType);
}
function ObjectGetNotifier(object) {
@@ -504,7 +543,7 @@ function ObjectGetNotifier(object) {
if (ObjectIsFrozen(object)) return null;
- var objectInfo = ObjectInfoGet(object);
+ var objectInfo = ObjectInfoGetOrCreate(object);
return ObjectInfoGetNotifier(objectInfo);
}
@@ -526,7 +565,7 @@ function CallbackDeliverPending(callback) {
try {
%_CallFunction(UNDEFINED, delivered, callback);
- } catch (ex) {}
+ } catch (ex) {} // TODO(rossberg): perhaps log uncaught exceptions.
return true;
}
@@ -537,9 +576,9 @@ function ObjectDeliverChangeRecords(callback) {
while (CallbackDeliverPending(callback)) {}
}
-function DeliverChangeRecords() {
- while (observationState.pendingObservers) {
- var pendingObservers = observationState.pendingObservers;
+function ObserveMicrotaskRunner() {
+ var pendingObservers = observationState.pendingObservers;
+ if (pendingObservers) {
observationState.pendingObservers = null;
for (var i in pendingObservers) {
CallbackDeliverPending(pendingObservers[i]);
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 6ab2ddffe2..4f59a1a5a2 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -104,34 +104,18 @@ void HeapObject::HeapObjectVerify() {
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpaceVerify();
break;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- ExternalPixelArray::cast(this)->ExternalPixelArrayVerify();
- break;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- ExternalByteArray::cast(this)->ExternalByteArrayVerify();
- break;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- ExternalUnsignedByteArray::cast(this)->ExternalUnsignedByteArrayVerify();
- break;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- ExternalShortArray::cast(this)->ExternalShortArrayVerify();
- break;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- ExternalUnsignedShortArray::cast(this)->
- ExternalUnsignedShortArrayVerify();
- break;
- case EXTERNAL_INT_ARRAY_TYPE:
- ExternalIntArray::cast(this)->ExternalIntArrayVerify();
- break;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayVerify();
- break;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- ExternalFloatArray::cast(this)->ExternalFloatArrayVerify();
- break;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- ExternalDoubleArray::cast(this)->ExternalDoubleArrayVerify();
+
+#define VERIFY_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ External##Type##Array::cast(this)->External##Type##ArrayVerify(); \
+ break; \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ Fixed##Type##Array::cast(this)->FixedTypedArrayVerify(); \
break;
+
+ TYPED_ARRAYS(VERIFY_TYPED_ARRAY)
+#undef VERIFY_TYPED_ARRAY
+
case CODE_TYPE:
Code::cast(this)->CodeVerify();
break;
@@ -243,6 +227,7 @@ void Symbol::SymbolVerify() {
CHECK(HasHashCode());
CHECK_GT(Hash(), 0);
CHECK(name()->IsUndefined() || name()->IsString());
+ CHECK(flags()->IsSmi());
}
@@ -261,48 +246,20 @@ void FreeSpace::FreeSpaceVerify() {
}
-void ExternalPixelArray::ExternalPixelArrayVerify() {
- CHECK(IsExternalPixelArray());
-}
-
-
-void ExternalByteArray::ExternalByteArrayVerify() {
- CHECK(IsExternalByteArray());
-}
-
-
-void ExternalUnsignedByteArray::ExternalUnsignedByteArrayVerify() {
- CHECK(IsExternalUnsignedByteArray());
-}
-
-
-void ExternalShortArray::ExternalShortArrayVerify() {
- CHECK(IsExternalShortArray());
-}
-
-
-void ExternalUnsignedShortArray::ExternalUnsignedShortArrayVerify() {
- CHECK(IsExternalUnsignedShortArray());
-}
-
-
-void ExternalIntArray::ExternalIntArrayVerify() {
- CHECK(IsExternalIntArray());
-}
-
-
-void ExternalUnsignedIntArray::ExternalUnsignedIntArrayVerify() {
- CHECK(IsExternalUnsignedIntArray());
-}
+#define EXTERNAL_ARRAY_VERIFY(Type, type, TYPE, ctype, size) \
+ void External##Type##Array::External##Type##ArrayVerify() { \
+ CHECK(IsExternal##Type##Array()); \
+ }
+TYPED_ARRAYS(EXTERNAL_ARRAY_VERIFY)
+#undef EXTERNAL_ARRAY_VERIFY
-void ExternalFloatArray::ExternalFloatArrayVerify() {
- CHECK(IsExternalFloatArray());
-}
-
-void ExternalDoubleArray::ExternalDoubleArrayVerify() {
- CHECK(IsExternalDoubleArray());
+template <class Traits>
+void FixedTypedArray<Traits>::FixedTypedArrayVerify() {
+ CHECK(IsHeapObject() &&
+ HeapObject::cast(this)->map()->instance_type() ==
+ Traits::kInstanceType);
}
@@ -366,9 +323,6 @@ void Map::MapVerify() {
SLOW_ASSERT(transitions()->IsSortedNoDuplicates());
SLOW_ASSERT(transitions()->IsConsistentWithBackPointers(this));
}
- ASSERT(!is_observed() || instance_type() < FIRST_JS_OBJECT_TYPE ||
- instance_type() > LAST_JS_OBJECT_TYPE ||
- has_slow_elements_kind() || has_external_array_elements());
}
@@ -413,7 +367,7 @@ void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
VerifyObjectField(kStorage1Offset);
VerifyObjectField(kStorage2Offset);
- VerifyHeapPointer(type_feedback_cells());
+ VerifyHeapPointer(feedback_vector());
}
@@ -536,7 +490,6 @@ void JSMessageObject::JSMessageObjectVerify() {
VerifyObjectField(kEndPositionOffset);
VerifyObjectField(kArgumentsOffset);
VerifyObjectField(kScriptOffset);
- VerifyObjectField(kStackTraceOffset);
VerifyObjectField(kStackFramesOffset);
}
@@ -1081,17 +1034,15 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_fast_unused_elements_ += holes;
break;
}
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS: {
- info->number_of_objects_with_fast_elements_++;
- ExternalPixelArray* e = ExternalPixelArray::cast(elements());
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ { info->number_of_objects_with_fast_elements_++;
+ FixedArrayBase* e = FixedArrayBase::cast(elements());
info->number_of_fast_used_elements_ += e->length();
break;
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index deb33653f7..ffec178d42 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -41,6 +41,7 @@
#include "conversions-inl.h"
#include "heap.h"
#include "isolate.h"
+#include "heap-inl.h"
#include "property.h"
#include "spaces.h"
#include "store-buffer.h"
@@ -48,6 +49,7 @@
#include "factory.h"
#include "incremental-marking.h"
#include "transitions-inl.h"
+#include "objects-visiting.h"
namespace v8 {
namespace internal {
@@ -57,7 +59,7 @@ PropertyDetails::PropertyDetails(Smi* smi) {
}
-Smi* PropertyDetails::AsSmi() {
+Smi* PropertyDetails::AsSmi() const {
// Ensure the upper 2 bits have the same value by sign extending it. This is
// necessary to be able to use the 31st bit of the property details.
int value = value_ << 1;
@@ -65,7 +67,7 @@ Smi* PropertyDetails::AsSmi() {
}
-PropertyDetails PropertyDetails::AsDeleted() {
+PropertyDetails PropertyDetails::AsDeleted() const {
Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
return PropertyDetails(smi);
}
@@ -85,6 +87,13 @@ PropertyDetails PropertyDetails::AsDeleted() {
}
+#define FIXED_TYPED_ARRAY_CAST_ACCESSOR(type) \
+ template<> \
+ type* type::cast(Object* object) { \
+ SLOW_ASSERT(object->Is##type()); \
+ return reinterpret_cast<type*>(object); \
+ }
+
#define INT_ACCESSORS(holder, name, offset) \
int holder::name() { return READ_INT_FIELD(this, offset); } \
void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
@@ -133,7 +142,8 @@ PropertyDetails PropertyDetails::AsDeleted() {
bool Object::IsFixedArrayBase() {
- return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray();
+ return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray() ||
+ IsFixedTypedArrayBase() || IsExternalArray();
}
@@ -150,25 +160,6 @@ bool Object::IsAccessorInfo() {
}
-bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
- // There is a constraint on the object; check.
- if (!this->IsJSObject()) return false;
- // Fetch the constructor function of the object.
- Object* cons_obj = JSObject::cast(this)->map()->constructor();
- if (!cons_obj->IsJSFunction()) return false;
- JSFunction* fun = JSFunction::cast(cons_obj);
- // Iterate through the chain of inheriting function templates to
- // see if the required one occurs.
- for (Object* type = fun->shared()->function_data();
- type->IsFunctionTemplateInfo();
- type = FunctionTemplateInfo::cast(type)->parent_template()) {
- if (type == expected) return true;
- }
- // Didn't find the required type in the inheritance chain.
- return false;
-}
-
-
bool Object::IsSmi() {
return HAS_SMI_TAG(this);
}
@@ -280,12 +271,16 @@ bool Object::IsExternalTwoByteString() {
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray.
- return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray();
+ return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray() ||
+ IsFixedTypedArrayBase();
}
MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
Representation representation) {
+ if (FLAG_track_fields && representation.IsSmi() && IsUninitialized()) {
+ return Smi::FromInt(0);
+ }
if (!FLAG_track_double_fields) return this;
if (!representation.IsDouble()) return this;
if (IsUninitialized()) {
@@ -464,6 +459,142 @@ uc32 FlatStringReader::Get(int index) {
}
+template <typename Char>
+class SequentialStringKey : public HashTableKey {
+ public:
+ explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
+ : string_(string), hash_field_(0), seed_(seed) { }
+
+ virtual uint32_t Hash() {
+ hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
+ string_.length(),
+ seed_);
+
+ uint32_t result = hash_field_ >> String::kHashShift;
+ ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
+ return result;
+ }
+
+
+ virtual uint32_t HashForObject(Object* other) {
+ return String::cast(other)->Hash();
+ }
+
+ Vector<const Char> string_;
+ uint32_t hash_field_;
+ uint32_t seed_;
+};
+
+
+class OneByteStringKey : public SequentialStringKey<uint8_t> {
+ public:
+ OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
+ : SequentialStringKey<uint8_t>(str, seed) { }
+
+ virtual bool IsMatch(Object* string) {
+ return String::cast(string)->IsOneByteEqualTo(string_);
+ }
+
+ virtual MaybeObject* AsObject(Heap* heap);
+};
+
+
+template<class Char>
+class SubStringKey : public HashTableKey {
+ public:
+ SubStringKey(Handle<String> string, int from, int length)
+ : string_(string), from_(from), length_(length) {
+ if (string_->IsSlicedString()) {
+ string_ = Handle<String>(Unslice(*string_, &from_));
+ }
+ ASSERT(string_->IsSeqString() || string->IsExternalString());
+ }
+
+ virtual uint32_t Hash() {
+ ASSERT(length_ >= 0);
+ ASSERT(from_ + length_ <= string_->length());
+ const Char* chars = GetChars() + from_;
+ hash_field_ = StringHasher::HashSequentialString(
+ chars, length_, string_->GetHeap()->HashSeed());
+ uint32_t result = hash_field_ >> String::kHashShift;
+ ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
+ return result;
+ }
+
+ virtual uint32_t HashForObject(Object* other) {
+ return String::cast(other)->Hash();
+ }
+
+ virtual bool IsMatch(Object* string);
+ virtual MaybeObject* AsObject(Heap* heap);
+
+ private:
+ const Char* GetChars();
+ String* Unslice(String* string, int* offset) {
+ while (string->IsSlicedString()) {
+ SlicedString* sliced = SlicedString::cast(string);
+ *offset += sliced->offset();
+ string = sliced->parent();
+ }
+ return string;
+ }
+
+ Handle<String> string_;
+ int from_;
+ int length_;
+ uint32_t hash_field_;
+};
+
+
+class TwoByteStringKey : public SequentialStringKey<uc16> {
+ public:
+ explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
+ : SequentialStringKey<uc16>(str, seed) { }
+
+ virtual bool IsMatch(Object* string) {
+ return String::cast(string)->IsTwoByteEqualTo(string_);
+ }
+
+ virtual MaybeObject* AsObject(Heap* heap);
+};
+
+
+// Utf8StringKey carries a vector of chars as key.
+class Utf8StringKey : public HashTableKey {
+ public:
+ explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
+ : string_(string), hash_field_(0), seed_(seed) { }
+
+ virtual bool IsMatch(Object* string) {
+ return String::cast(string)->IsUtf8EqualTo(string_);
+ }
+
+ virtual uint32_t Hash() {
+ if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
+ hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
+ uint32_t result = hash_field_ >> String::kHashShift;
+ ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
+ return result;
+ }
+
+ virtual uint32_t HashForObject(Object* other) {
+ return String::cast(other)->Hash();
+ }
+
+ virtual MaybeObject* AsObject(Heap* heap) {
+ if (hash_field_ == 0) Hash();
+ return heap->AllocateInternalizedStringFromUtf8(string_,
+ chars_,
+ hash_field_);
+ }
+
+ Vector<const char> string_;
+ uint32_t hash_field_;
+ int chars_; // Caches the number of characters when computing the hash code.
+ uint32_t seed_;
+};
+
+
bool Object::IsNumber() {
return IsSmi() || IsHeapNumber();
}
@@ -480,9 +611,6 @@ bool Object::IsFiller() {
}
-TYPE_CHECKER(ExternalPixelArray, EXTERNAL_PIXEL_ARRAY_TYPE)
-
-
bool Object::IsExternalArray() {
if (!Object::IsHeapObject())
return false;
@@ -493,14 +621,22 @@ bool Object::IsExternalArray() {
}
-TYPE_CHECKER(ExternalByteArray, EXTERNAL_BYTE_ARRAY_TYPE)
-TYPE_CHECKER(ExternalUnsignedByteArray, EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)
-TYPE_CHECKER(ExternalShortArray, EXTERNAL_SHORT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalUnsignedShortArray, EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalIntArray, EXTERNAL_INT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalUnsignedIntArray, EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalFloatArray, EXTERNAL_FLOAT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalDoubleArray, EXTERNAL_DOUBLE_ARRAY_TYPE)
+#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
+ TYPE_CHECKER(External##Type##Array, EXTERNAL_##TYPE##_ARRAY_TYPE) \
+ TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
+
+TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
+#undef TYPED_ARRAY_TYPE_CHECKER
+
+
+bool Object::IsFixedTypedArrayBase() {
+ if (!Object::IsHeapObject()) return false;
+
+ InstanceType instance_type =
+ HeapObject::cast(this)->map()->instance_type();
+ return (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
+ instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE);
+}
bool MaybeObject::IsFailure() {
@@ -624,16 +760,6 @@ bool Object::IsDependentCode() {
}
-bool Object::IsTypeFeedbackCells() {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a cache cells array. Since this is used for asserts we can check that
- // the length is plausible though.
- if (FixedArray::cast(this)->length() % 2 != 0) return false;
- return true;
-}
-
-
bool Object::IsContext() {
if (!Object::IsHeapObject()) return false;
Map* map = HeapObject::cast(this)->map();
@@ -801,7 +927,8 @@ bool Object::IsJSGlobalProxy() {
bool result = IsHeapObject() &&
(HeapObject::cast(this)->map()->instance_type() ==
JS_GLOBAL_PROXY_TYPE);
- ASSERT(!result || IsAccessCheckNeeded());
+ ASSERT(!result ||
+ HeapObject::cast(this)->map()->is_access_check_needed());
return result;
}
@@ -826,8 +953,14 @@ bool Object::IsUndetectableObject() {
bool Object::IsAccessCheckNeeded() {
- return IsHeapObject()
- && HeapObject::cast(this)->map()->is_access_check_needed();
+ if (!IsHeapObject()) return false;
+ if (IsJSGlobalProxy()) {
+ JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
+ GlobalObject* global =
+ proxy->GetIsolate()->context()->global_object();
+ return proxy->IsDetachedFrom(global);
+ }
+ return HeapObject::cast(this)->map()->is_access_check_needed();
}
@@ -1316,33 +1449,29 @@ void JSObject::ValidateElements() {
}
-bool JSObject::ShouldTrackAllocationInfo() {
- if (AllocationSite::CanTrack(map()->instance_type())) {
- if (!IsJSArray()) {
- return true;
- }
-
- return AllocationSite::GetMode(GetElementsKind()) ==
- TRACK_ALLOCATION_SITE;
- }
- return false;
-}
-
-
void AllocationSite::Initialize() {
+ set_transition_info(Smi::FromInt(0));
SetElementsKind(GetInitialFastElementsKind());
set_nested_site(Smi::FromInt(0));
+ set_pretenure_data(Smi::FromInt(0));
+ set_pretenure_create_count(Smi::FromInt(0));
set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
SKIP_WRITE_BARRIER);
}
+void AllocationSite::MarkZombie() {
+ ASSERT(!IsZombie());
+ Initialize();
+ set_pretenure_decision(kZombie);
+}
+
+
// Heuristic: We only need to create allocation site info if the boilerplate
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSite::GetMode(
ElementsKind boilerplate_elements_kind) {
- if (FLAG_track_allocation_sites &&
- IsFastSmiElementsKind(boilerplate_elements_kind)) {
+ if (IsFastSmiElementsKind(boilerplate_elements_kind)) {
return TRACK_ALLOCATION_SITE;
}
@@ -1352,8 +1481,7 @@ AllocationSiteMode AllocationSite::GetMode(
AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
ElementsKind to) {
- if (FLAG_track_allocation_sites &&
- IsFastSmiElementsKind(from) &&
+ if (IsFastSmiElementsKind(from) &&
IsMoreGeneralElementsKindTransition(from, to)) {
return TRACK_ALLOCATION_SITE;
}
@@ -1363,10 +1491,95 @@ AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
inline bool AllocationSite::CanTrack(InstanceType type) {
+ if (FLAG_allocation_site_pretenuring) {
+ return type == JS_ARRAY_TYPE ||
+ type == JS_OBJECT_TYPE ||
+ type < FIRST_NONSTRING_TYPE;
+ }
return type == JS_ARRAY_TYPE;
}
+inline DependentCode::DependencyGroup AllocationSite::ToDependencyGroup(
+ Reason reason) {
+ switch (reason) {
+ case TENURING:
+ return DependentCode::kAllocationSiteTenuringChangedGroup;
+ break;
+ case TRANSITIONS:
+ return DependentCode::kAllocationSiteTransitionChangedGroup;
+ break;
+ }
+ UNREACHABLE();
+ return DependentCode::kAllocationSiteTransitionChangedGroup;
+}
+
+
+inline void AllocationSite::set_memento_found_count(int count) {
+ int value = pretenure_data()->value();
+ // Verify that we can count more mementos than we can possibly find in one
+ // new space collection.
+ ASSERT((GetHeap()->MaxSemiSpaceSize() /
+ (StaticVisitorBase::kMinObjectSizeInWords * kPointerSize +
+ AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
+ ASSERT(count < MementoFoundCountBits::kMax);
+ set_pretenure_data(
+ Smi::FromInt(MementoFoundCountBits::update(value, count)),
+ SKIP_WRITE_BARRIER);
+}
+
+inline bool AllocationSite::IncrementMementoFoundCount() {
+ if (IsZombie()) return false;
+
+ int value = memento_found_count();
+ set_memento_found_count(value + 1);
+ return value == 0;
+}
+
+
+inline void AllocationSite::IncrementMementoCreateCount() {
+ ASSERT(FLAG_allocation_site_pretenuring);
+ int value = memento_create_count();
+ set_memento_create_count(value + 1);
+}
+
+
+inline bool AllocationSite::DigestPretenuringFeedback() {
+ bool decision_changed = false;
+ int create_count = memento_create_count();
+ int found_count = memento_found_count();
+ bool minimum_mementos_created = create_count >= kPretenureMinimumCreated;
+ double ratio =
+ minimum_mementos_created || FLAG_trace_pretenuring_statistics ?
+ static_cast<double>(found_count) / create_count : 0.0;
+ PretenureFlag current_mode = GetPretenureMode();
+
+ if (minimum_mementos_created) {
+ PretenureDecision result = ratio >= kPretenureRatio
+ ? kTenure
+ : kDontTenure;
+ set_pretenure_decision(result);
+ if (current_mode != GetPretenureMode()) {
+ decision_changed = true;
+ set_deopt_dependent_code(true);
+ }
+ }
+
+ if (FLAG_trace_pretenuring_statistics) {
+ PrintF(
+ "AllocationSite(%p): (created, found, ratio) (%d, %d, %f) %s => %s\n",
+ static_cast<void*>(this), create_count, found_count, ratio,
+ current_mode == TENURED ? "tenured" : "not tenured",
+ GetPretenureMode() == TENURED ? "tenured" : "not tenured");
+ }
+
+ // Clear feedback calculation fields until the next gc.
+ set_memento_found_count(0);
+ set_memento_create_count(0);
+ return decision_changed;
+}
+
+
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
object->ValidateElements();
ElementsKind elements_kind = object->map()->elements_kind();
@@ -1758,18 +1971,12 @@ void JSObject::FastPropertyAtPut(int index, Object* value) {
int JSObject::GetInObjectPropertyOffset(int index) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- ASSERT(index < 0);
- return map()->instance_size() + (index * kPointerSize);
+ return map()->GetInObjectPropertyOffset(index);
}
Object* JSObject::InObjectPropertyAt(int index) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- ASSERT(index < 0);
- int offset = map()->instance_size() + (index * kPointerSize);
+ int offset = GetInObjectPropertyOffset(index);
return READ_FIELD(this, offset);
}
@@ -1778,9 +1985,7 @@ Object* JSObject::InObjectPropertyAtPut(int index,
Object* value,
WriteBarrierMode mode) {
// Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- ASSERT(index < 0);
- int offset = map()->instance_size() + (index * kPointerSize);
+ int offset = GetInObjectPropertyOffset(index);
WRITE_FIELD(this, offset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
return value;
@@ -1896,8 +2101,7 @@ void Object::VerifyApiCallResultType() {
FixedArrayBase* FixedArrayBase::cast(Object* object) {
- ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray() ||
- object->IsConstantPoolArray());
+ ASSERT(object->IsFixedArrayBase());
return reinterpret_cast<FixedArrayBase*>(object);
}
@@ -2293,6 +2497,11 @@ int DescriptorArray::SearchWithCache(Name* name, Map* map) {
}
+PropertyDetails Map::GetLastDescriptorDetails() {
+ return instance_descriptors()->GetDetails(LastAdded());
+}
+
+
void Map::LookupDescriptor(JSObject* holder,
Name* name,
LookupResult* result) {
@@ -2310,7 +2519,8 @@ void Map::LookupTransition(JSObject* holder,
TransitionArray* transition_array = transitions();
int number = transition_array->Search(name);
if (number != TransitionArray::kNotFound) {
- return result->TransitionResult(holder, number);
+ return result->TransitionResult(
+ holder, transition_array->GetTarget(number));
}
}
result->NotFound();
@@ -2319,9 +2529,7 @@ void Map::LookupTransition(JSObject* holder,
Object** DescriptorArray::GetKeySlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
- return HeapObject::RawField(
- reinterpret_cast<HeapObject*>(this),
- OffsetOfElementAt(ToKeyIndex(descriptor_number)));
+ return RawFieldOfElementAt(ToKeyIndex(descriptor_number));
}
@@ -2376,9 +2584,7 @@ void DescriptorArray::InitializeRepresentations(Representation representation) {
Object** DescriptorArray::GetValueSlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
- return HeapObject::RawField(
- reinterpret_cast<HeapObject*>(this),
- OffsetOfElementAt(ToValueIndex(descriptor_number)));
+ return RawFieldOfElementAt(ToValueIndex(descriptor_number));
}
@@ -2580,12 +2786,12 @@ void SeededNumberDictionary::set_requires_slow_elements() {
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedDoubleArray)
+CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(ConstantPoolArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCode)
-CAST_ACCESSOR(TypeFeedbackCells)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(JSFunctionResultCache)
CAST_ACCESSOR(NormalizedMapCache)
@@ -2637,18 +2843,26 @@ CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(FreeSpace)
CAST_ACCESSOR(ExternalArray)
-CAST_ACCESSOR(ExternalByteArray)
-CAST_ACCESSOR(ExternalUnsignedByteArray)
-CAST_ACCESSOR(ExternalShortArray)
-CAST_ACCESSOR(ExternalUnsignedShortArray)
-CAST_ACCESSOR(ExternalIntArray)
-CAST_ACCESSOR(ExternalUnsignedIntArray)
-CAST_ACCESSOR(ExternalFloatArray)
-CAST_ACCESSOR(ExternalDoubleArray)
-CAST_ACCESSOR(ExternalPixelArray)
+CAST_ACCESSOR(ExternalInt8Array)
+CAST_ACCESSOR(ExternalUint8Array)
+CAST_ACCESSOR(ExternalInt16Array)
+CAST_ACCESSOR(ExternalUint16Array)
+CAST_ACCESSOR(ExternalInt32Array)
+CAST_ACCESSOR(ExternalUint32Array)
+CAST_ACCESSOR(ExternalFloat32Array)
+CAST_ACCESSOR(ExternalFloat64Array)
+CAST_ACCESSOR(ExternalUint8ClampedArray)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(AccessorInfo)
+template <class Traits>
+FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) {
+ SLOW_ASSERT(object->IsHeapObject() &&
+ HeapObject::cast(object)->map()->instance_type() ==
+ Traits::kInstanceType);
+ return reinterpret_cast<FixedTypedArray<Traits>*>(object);
+}
+
#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
STRUCT_LIST(MAKE_STRUCT_CAST)
@@ -2692,6 +2906,8 @@ bool Name::Equals(Name* other) {
ACCESSORS(Symbol, name, Object, kNameOffset)
+ACCESSORS(Symbol, flags, Smi, kFlagsOffset)
+BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
bool String::Equals(String* other) {
@@ -2985,6 +3201,7 @@ void ExternalAsciiString::update_data_cache() {
void ExternalAsciiString::set_resource(
const ExternalAsciiString::Resource* resource) {
+ ASSERT(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize));
*reinterpret_cast<const Resource**>(
FIELD_ADDR(this, kResourceOffset)) = resource;
if (resource != NULL) update_data_cache();
@@ -3177,7 +3394,7 @@ void JSFunctionResultCache::MakeZeroSize() {
void JSFunctionResultCache::Clear() {
int cache_size = size();
- Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex));
+ Object** entries_start = RawFieldOfElementAt(kEntriesIndex);
MemsetPointer(entries_start,
GetHeap()->the_hole_value(),
cache_size - kEntriesIndex);
@@ -3234,26 +3451,26 @@ Address ByteArray::GetDataStartAddress() {
}
-uint8_t* ExternalPixelArray::external_pixel_pointer() {
+uint8_t* ExternalUint8ClampedArray::external_uint8_clamped_pointer() {
return reinterpret_cast<uint8_t*>(external_pointer());
}
-uint8_t ExternalPixelArray::get_scalar(int index) {
+uint8_t ExternalUint8ClampedArray::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = external_pixel_pointer();
+ uint8_t* ptr = external_uint8_clamped_pointer();
return ptr[index];
}
-MaybeObject* ExternalPixelArray::get(int index) {
+MaybeObject* ExternalUint8ClampedArray::get(int index) {
return Smi::FromInt(static_cast<int>(get_scalar(index)));
}
-void ExternalPixelArray::set(int index, uint8_t value) {
+void ExternalUint8ClampedArray::set(int index, uint8_t value) {
ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = external_pixel_pointer();
+ uint8_t* ptr = external_uint8_clamped_pointer();
ptr[index] = value;
}
@@ -3270,158 +3487,302 @@ void ExternalArray::set_external_pointer(void* value, WriteBarrierMode mode) {
}
-int8_t ExternalByteArray::get_scalar(int index) {
+int8_t ExternalInt8Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
int8_t* ptr = static_cast<int8_t*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalByteArray::get(int index) {
+MaybeObject* ExternalInt8Array::get(int index) {
return Smi::FromInt(static_cast<int>(get_scalar(index)));
}
-void ExternalByteArray::set(int index, int8_t value) {
+void ExternalInt8Array::set(int index, int8_t value) {
ASSERT((index >= 0) && (index < this->length()));
int8_t* ptr = static_cast<int8_t*>(external_pointer());
ptr[index] = value;
}
-uint8_t ExternalUnsignedByteArray::get_scalar(int index) {
+uint8_t ExternalUint8Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalUnsignedByteArray::get(int index) {
+MaybeObject* ExternalUint8Array::get(int index) {
return Smi::FromInt(static_cast<int>(get_scalar(index)));
}
-void ExternalUnsignedByteArray::set(int index, uint8_t value) {
+void ExternalUint8Array::set(int index, uint8_t value) {
ASSERT((index >= 0) && (index < this->length()));
uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
ptr[index] = value;
}
-int16_t ExternalShortArray::get_scalar(int index) {
+int16_t ExternalInt16Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
int16_t* ptr = static_cast<int16_t*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalShortArray::get(int index) {
+MaybeObject* ExternalInt16Array::get(int index) {
return Smi::FromInt(static_cast<int>(get_scalar(index)));
}
-void ExternalShortArray::set(int index, int16_t value) {
+void ExternalInt16Array::set(int index, int16_t value) {
ASSERT((index >= 0) && (index < this->length()));
int16_t* ptr = static_cast<int16_t*>(external_pointer());
ptr[index] = value;
}
-uint16_t ExternalUnsignedShortArray::get_scalar(int index) {
+uint16_t ExternalUint16Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalUnsignedShortArray::get(int index) {
+MaybeObject* ExternalUint16Array::get(int index) {
return Smi::FromInt(static_cast<int>(get_scalar(index)));
}
-void ExternalUnsignedShortArray::set(int index, uint16_t value) {
+void ExternalUint16Array::set(int index, uint16_t value) {
ASSERT((index >= 0) && (index < this->length()));
uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
ptr[index] = value;
}
-int32_t ExternalIntArray::get_scalar(int index) {
+int32_t ExternalInt32Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
int32_t* ptr = static_cast<int32_t*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalIntArray::get(int index) {
+MaybeObject* ExternalInt32Array::get(int index) {
return GetHeap()->NumberFromInt32(get_scalar(index));
}
-void ExternalIntArray::set(int index, int32_t value) {
+void ExternalInt32Array::set(int index, int32_t value) {
ASSERT((index >= 0) && (index < this->length()));
int32_t* ptr = static_cast<int32_t*>(external_pointer());
ptr[index] = value;
}
-uint32_t ExternalUnsignedIntArray::get_scalar(int index) {
+uint32_t ExternalUint32Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalUnsignedIntArray::get(int index) {
+MaybeObject* ExternalUint32Array::get(int index) {
return GetHeap()->NumberFromUint32(get_scalar(index));
}
-void ExternalUnsignedIntArray::set(int index, uint32_t value) {
+void ExternalUint32Array::set(int index, uint32_t value) {
ASSERT((index >= 0) && (index < this->length()));
uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
ptr[index] = value;
}
-float ExternalFloatArray::get_scalar(int index) {
+float ExternalFloat32Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
float* ptr = static_cast<float*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalFloatArray::get(int index) {
+MaybeObject* ExternalFloat32Array::get(int index) {
return GetHeap()->NumberFromDouble(get_scalar(index));
}
-void ExternalFloatArray::set(int index, float value) {
+void ExternalFloat32Array::set(int index, float value) {
ASSERT((index >= 0) && (index < this->length()));
float* ptr = static_cast<float*>(external_pointer());
ptr[index] = value;
}
-double ExternalDoubleArray::get_scalar(int index) {
+double ExternalFloat64Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
double* ptr = static_cast<double*>(external_pointer());
return ptr[index];
}
-MaybeObject* ExternalDoubleArray::get(int index) {
+MaybeObject* ExternalFloat64Array::get(int index) {
return GetHeap()->NumberFromDouble(get_scalar(index));
}
-void ExternalDoubleArray::set(int index, double value) {
+void ExternalFloat64Array::set(int index, double value) {
ASSERT((index >= 0) && (index < this->length()));
double* ptr = static_cast<double*>(external_pointer());
ptr[index] = value;
}
+int FixedTypedArrayBase::size() {
+ InstanceType instance_type = map()->instance_type();
+ int element_size;
+ switch (instance_type) {
+ case FIXED_UINT8_ARRAY_TYPE:
+ case FIXED_INT8_ARRAY_TYPE:
+ case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
+ element_size = 1;
+ break;
+ case FIXED_UINT16_ARRAY_TYPE:
+ case FIXED_INT16_ARRAY_TYPE:
+ element_size = 2;
+ break;
+ case FIXED_UINT32_ARRAY_TYPE:
+ case FIXED_INT32_ARRAY_TYPE:
+ case FIXED_FLOAT32_ARRAY_TYPE:
+ element_size = 4;
+ break;
+ case FIXED_FLOAT64_ARRAY_TYPE:
+ element_size = 8;
+ break;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ return OBJECT_POINTER_ALIGN(kDataOffset + length() * element_size);
+}
+
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
+ ASSERT((index >= 0) && (index < this->length()));
+ ElementType* ptr = reinterpret_cast<ElementType*>(
+ FIELD_ADDR(this, kDataOffset));
+ return ptr[index];
+}
+
+
+template<> inline
+FixedTypedArray<Float64ArrayTraits>::ElementType
+ FixedTypedArray<Float64ArrayTraits>::get_scalar(int index) {
+ ASSERT((index >= 0) && (index < this->length()));
+ return READ_DOUBLE_FIELD(this, ElementOffset(index));
+}
+
+
+template <class Traits>
+void FixedTypedArray<Traits>::set(int index, ElementType value) {
+ ASSERT((index >= 0) && (index < this->length()));
+ ElementType* ptr = reinterpret_cast<ElementType*>(
+ FIELD_ADDR(this, kDataOffset));
+ ptr[index] = value;
+}
+
+
+template<> inline
+void FixedTypedArray<Float64ArrayTraits>::set(
+ int index, Float64ArrayTraits::ElementType value) {
+ ASSERT((index >= 0) && (index < this->length()));
+ WRITE_DOUBLE_FIELD(this, ElementOffset(index), value);
+}
+
+
+template <class Traits>
+MaybeObject* FixedTypedArray<Traits>::get(int index) {
+ return Traits::ToObject(GetHeap(), get_scalar(index));
+}
+
+template <class Traits>
+MaybeObject* FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
+ ElementType cast_value = Traits::defaultValue();
+ if (index < static_cast<uint32_t>(length())) {
+ if (value->IsSmi()) {
+ int int_value = Smi::cast(value)->value();
+ cast_value = static_cast<ElementType>(int_value);
+ } else if (value->IsHeapNumber()) {
+ double double_value = HeapNumber::cast(value)->value();
+ cast_value = static_cast<ElementType>(DoubleToInt32(double_value));
+ } else {
+ // Clamp undefined to the default value. All other types have been
+ // converted to a number type further up in the call chain.
+ ASSERT(value->IsUndefined());
+ }
+ set(index, cast_value);
+ }
+ return Traits::ToObject(GetHeap(), cast_value);
+}
+
+template <class Traits>
+Handle<Object> FixedTypedArray<Traits>::SetValue(
+ Handle<FixedTypedArray<Traits> > array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
+MaybeObject* Uint8ArrayTraits::ToObject(Heap*, uint8_t scalar) {
+ return Smi::FromInt(scalar);
+}
+
+
+MaybeObject* Uint8ClampedArrayTraits::ToObject(Heap*, uint8_t scalar) {
+ return Smi::FromInt(scalar);
+}
+
+
+MaybeObject* Int8ArrayTraits::ToObject(Heap*, int8_t scalar) {
+ return Smi::FromInt(scalar);
+}
+
+
+MaybeObject* Uint16ArrayTraits::ToObject(Heap*, uint16_t scalar) {
+ return Smi::FromInt(scalar);
+}
+
+
+MaybeObject* Int16ArrayTraits::ToObject(Heap*, int16_t scalar) {
+ return Smi::FromInt(scalar);
+}
+
+
+MaybeObject* Uint32ArrayTraits::ToObject(Heap* heap, uint32_t scalar) {
+ return heap->NumberFromUint32(scalar);
+}
+
+
+MaybeObject* Int32ArrayTraits::ToObject(Heap* heap, int32_t scalar) {
+ return heap->NumberFromInt32(scalar);
+}
+
+
+MaybeObject* Float32ArrayTraits::ToObject(Heap* heap, float scalar) {
+ return heap->NumberFromDouble(scalar);
+}
+
+
+MaybeObject* Float64ArrayTraits::ToObject(Heap* heap, double scalar) {
+ return heap->NumberFromDouble(scalar);
+}
+
+
int Map::visitor_id() {
return READ_BYTE_FIELD(this, kVisitorIdOffset);
}
@@ -3448,6 +3809,14 @@ int Map::pre_allocated_property_fields() {
}
+int Map::GetInObjectPropertyOffset(int index) {
+ // Adjust for the number of properties stored in the object.
+ index -= inobject_properties();
+ ASSERT(index < 0);
+ return instance_size() + (index * kPointerSize);
+}
+
+
int HeapObject::SizeFromMap(Map* map) {
int instance_size = map->instance_size();
if (instance_size != kVariableSizeSentinel) return instance_size;
@@ -3482,6 +3851,10 @@ int HeapObject::SizeFromMap(Map* map) {
reinterpret_cast<ConstantPoolArray*>(this)->count_of_ptr_entries(),
reinterpret_cast<ConstantPoolArray*>(this)->count_of_int32_entries());
}
+ if (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
+ instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE) {
+ return reinterpret_cast<FixedTypedArrayBase*>(this)->size();
+ }
ASSERT(instance_type == CODE_TYPE);
return reinterpret_cast<Code*>(this)->CodeSize();
}
@@ -3624,8 +3997,9 @@ bool Map::is_shared() {
void Map::set_dictionary_map(bool value) {
- if (value) mark_unstable();
- set_bit_field3(DictionaryMap::update(bit_field3(), value));
+ uint32_t new_bit_field3 = DictionaryMap::update(bit_field3(), value);
+ new_bit_field3 = IsUnstable::update(new_bit_field3, value);
+ set_bit_field3(new_bit_field3);
}
@@ -3649,16 +4023,13 @@ bool Map::owns_descriptors() {
}
-void Map::set_is_observed(bool is_observed) {
- ASSERT(instance_type() < FIRST_JS_OBJECT_TYPE ||
- instance_type() > LAST_JS_OBJECT_TYPE ||
- has_slow_elements_kind() || has_external_array_elements());
- set_bit_field3(IsObserved::update(bit_field3(), is_observed));
+void Map::set_has_instance_call_handler() {
+ set_bit_field3(HasInstanceCallHandler::update(bit_field3(), true));
}
-bool Map::is_observed() {
- return IsObserved::decode(bit_field3());
+bool Map::has_instance_call_handler() {
+ return HasInstanceCallHandler::decode(bit_field3());
}
@@ -3786,8 +4157,7 @@ Object* DependentCode::object_at(int i) {
Object** DependentCode::slot_at(int i) {
- return HeapObject::RawField(
- this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i));
+ return RawFieldOfElementAt(kCodesStartIndex + i);
}
@@ -3813,10 +4183,6 @@ void DependentCode::ExtendGroup(DependencyGroup group) {
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
- // Make sure that all call stubs have an arguments count.
- ASSERT((ExtractKindFromFlags(flags) != CALL_IC &&
- ExtractKindFromFlags(flags) != KEYED_CALL_IC) ||
- ExtractArgumentsCountFromFlags(flags) >= 0);
WRITE_INT_FIELD(this, kFlagsOffset, flags);
}
@@ -3838,29 +4204,25 @@ InlineCacheState Code::ic_state() {
}
-Code::ExtraICState Code::extra_ic_state() {
- ASSERT((is_inline_cache_stub() && !needs_extended_extra_ic_state(kind()))
- || ic_state() == DEBUG_STUB);
+ExtraICState Code::extra_ic_state() {
+ ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
return ExtractExtraICStateFromFlags(flags());
}
-Code::ExtraICState Code::extended_extra_ic_state() {
- ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
- ASSERT(needs_extended_extra_ic_state(kind()));
- return ExtractExtendedExtraICStateFromFlags(flags());
+Code::StubType Code::type() {
+ return ExtractTypeFromFlags(flags());
}
-Code::StubType Code::type() {
- return ExtractTypeFromFlags(flags());
+// For initialization.
+void Code::set_raw_kind_specific_flags1(int value) {
+ WRITE_INT_FIELD(this, kKindSpecificFlags1Offset, value);
}
-int Code::arguments_count() {
- ASSERT(is_call_stub() || is_keyed_call_stub() ||
- kind() == STUB || is_handler());
- return ExtractArgumentsCountFromFlags(flags());
+void Code::set_raw_kind_specific_flags2(int value) {
+ WRITE_INT_FIELD(this, kKindSpecificFlags2Offset, value);
}
@@ -3878,31 +4240,14 @@ inline void Code::set_is_crankshafted(bool value) {
int Code::major_key() {
- ASSERT(kind() == STUB ||
- kind() == HANDLER ||
- kind() == BINARY_OP_IC ||
- kind() == COMPARE_IC ||
- kind() == COMPARE_NIL_IC ||
- kind() == STORE_IC ||
- kind() == LOAD_IC ||
- kind() == KEYED_LOAD_IC ||
- kind() == TO_BOOLEAN_IC);
+ ASSERT(has_major_key());
return StubMajorKeyField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
void Code::set_major_key(int major) {
- ASSERT(kind() == STUB ||
- kind() == HANDLER ||
- kind() == BINARY_OP_IC ||
- kind() == COMPARE_IC ||
- kind() == COMPARE_NIL_IC ||
- kind() == LOAD_IC ||
- kind() == KEYED_LOAD_IC ||
- kind() == STORE_IC ||
- kind() == KEYED_STORE_IC ||
- kind() == TO_BOOLEAN_IC);
+ ASSERT(has_major_key());
ASSERT(0 <= major && major < 256);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = StubMajorKeyField::update(previous, major);
@@ -3910,16 +4255,17 @@ void Code::set_major_key(int major) {
}
-bool Code::is_pregenerated() {
- return (kind() == STUB && IsPregeneratedField::decode(flags()));
-}
-
-
-void Code::set_is_pregenerated(bool value) {
- ASSERT(kind() == STUB);
- Flags f = flags();
- f = static_cast<Flags>(IsPregeneratedField::update(f, value));
- set_flags(f);
+bool Code::has_major_key() {
+ return kind() == STUB ||
+ kind() == HANDLER ||
+ kind() == BINARY_OP_IC ||
+ kind() == COMPARE_IC ||
+ kind() == COMPARE_NIL_IC ||
+ kind() == LOAD_IC ||
+ kind() == KEYED_LOAD_IC ||
+ kind() == STORE_IC ||
+ kind() == KEYED_STORE_IC ||
+ kind() == TO_BOOLEAN_IC;
}
@@ -4071,21 +4417,8 @@ void Code::set_back_edges_patched_for_osr(bool value) {
-CheckType Code::check_type() {
- ASSERT(is_call_stub() || is_keyed_call_stub());
- byte type = READ_BYTE_FIELD(this, kCheckTypeOffset);
- return static_cast<CheckType>(type);
-}
-
-
-void Code::set_check_type(CheckType value) {
- ASSERT(is_call_stub() || is_keyed_call_stub());
- WRITE_BYTE_FIELD(this, kCheckTypeOffset, value);
-}
-
-
byte Code::to_boolean_state() {
- return extended_extra_ic_state();
+ return extra_ic_state();
}
@@ -4131,7 +4464,7 @@ bool Code::is_inline_cache_stub() {
bool Code::is_keyed_stub() {
- return is_keyed_load_stub() || is_keyed_store_stub() || is_keyed_call_stub();
+ return is_keyed_load_stub() || is_keyed_store_stub();
}
@@ -4140,37 +4473,45 @@ bool Code::is_debug_stub() {
}
+ConstantPoolArray* Code::constant_pool() {
+ return ConstantPoolArray::cast(READ_FIELD(this, kConstantPoolOffset));
+}
+
+
+void Code::set_constant_pool(Object* value) {
+ ASSERT(value->IsConstantPoolArray());
+ WRITE_FIELD(this, kConstantPoolOffset, value);
+ WRITE_BARRIER(GetHeap(), this, kConstantPoolOffset, value);
+}
+
+
Code::Flags Code::ComputeFlags(Kind kind,
InlineCacheState ic_state,
ExtraICState extra_ic_state,
StubType type,
- int argc,
InlineCacheHolderFlag holder) {
- ASSERT(argc <= Code::kMaxArguments);
- // Since the extended extra ic state overlaps with the argument count
- // for CALL_ICs, do so checks to make sure that they don't interfere.
- ASSERT((kind != Code::CALL_IC &&
- kind != Code::KEYED_CALL_IC) ||
- (ExtraICStateField::encode(extra_ic_state) | true));
// Compute the bit mask.
unsigned int bits = KindField::encode(kind)
| ICStateField::encode(ic_state)
| TypeField::encode(type)
- | ExtendedExtraICStateField::encode(extra_ic_state)
+ | ExtraICStateField::encode(extra_ic_state)
| CacheHolderField::encode(holder);
- if (!Code::needs_extended_extra_ic_state(kind)) {
- bits |= (argc << kArgumentsCountShift);
- }
return static_cast<Flags>(bits);
}
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
ExtraICState extra_ic_state,
- StubType type,
- int argc,
- InlineCacheHolderFlag holder) {
- return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, argc, holder);
+ InlineCacheHolderFlag holder,
+ StubType type) {
+ return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, holder);
+}
+
+
+Code::Flags Code::ComputeHandlerFlags(Kind handler_kind,
+ StubType type,
+ InlineCacheHolderFlag holder) {
+ return ComputeFlags(Code::HANDLER, MONOMORPHIC, handler_kind, type, holder);
}
@@ -4184,27 +4525,16 @@ InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
}
-Code::ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
+ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
return ExtraICStateField::decode(flags);
}
-Code::ExtraICState Code::ExtractExtendedExtraICStateFromFlags(
- Flags flags) {
- return ExtendedExtraICStateField::decode(flags);
-}
-
-
Code::StubType Code::ExtractTypeFromFlags(Flags flags) {
return TypeField::decode(flags);
}
-int Code::ExtractArgumentsCountFromFlags(Flags flags) {
- return (flags & kArgumentsCountMask) >> kArgumentsCountShift;
-}
-
-
InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
return CacheHolderField::decode(flags);
}
@@ -4344,7 +4674,7 @@ bool Map::CanHaveMoreTransitions() {
if (!HasTransitionArray()) return true;
return FixedArray::SizeFor(transitions()->length() +
TransitionArray::kTransitionSize)
- <= Page::kMaxNonCodeHeapObjectSize;
+ <= Page::kMaxRegularHeapObjectSize;
}
@@ -4422,6 +4752,17 @@ void Map::set_transitions(TransitionArray* transition_array,
// When there is another reference to the array somewhere (e.g. a handle),
// not zapping turns from a waste of memory into a source of crashes.
if (HasTransitionArray()) {
+#ifdef DEBUG
+ for (int i = 0; i < transitions()->number_of_transitions(); i++) {
+ Map* target = transitions()->GetTarget(i);
+ if (target->instance_descriptors() == instance_descriptors()) {
+ Name* key = transitions()->GetKey(i);
+ int new_target_index = transition_array->Search(key);
+ ASSERT(new_target_index != TransitionArray::kNotFound);
+ ASSERT(transition_array->GetTarget(new_target_index) == target);
+ }
+ }
+#endif
ASSERT(transitions() != transition_array);
ZapTransitions();
}
@@ -4554,6 +4895,9 @@ ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
+ACCESSORS_TO_SMI(AllocationSite, pretenure_data, kPretenureDataOffset)
+ACCESSORS_TO_SMI(AllocationSite, pretenure_create_count,
+ kPretenureCreateCountOffset)
ACCESSORS(AllocationSite, dependent_code, DependentCode,
kDependentCodeOffset)
ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset)
@@ -4807,6 +5151,8 @@ bool SharedFunctionInfo::is_classic_mode() {
BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
kExtendedModeFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, inline_builtin,
+ kInlineBuiltin)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
name_should_print_as_anonymous,
kNameShouldPrintAsAnonymous)
@@ -4867,6 +5213,7 @@ Code* SharedFunctionInfo::code() {
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
+ ASSERT(value->kind() != Code::OPTIMIZED_FUNCTION);
WRITE_FIELD(this, kCodeOffset, value);
CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
}
@@ -4903,7 +5250,7 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
bool SharedFunctionInfo::is_compiled() {
return code() !=
- GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
+ GetIsolate()->builtins()->builtin(Builtins::kCompileUnoptimized);
}
@@ -5026,20 +5373,21 @@ bool JSFunction::IsOptimizable() {
}
-bool JSFunction::IsMarkedForLazyRecompilation() {
- return code() == GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile);
+bool JSFunction::IsMarkedForOptimization() {
+ return code() == GetIsolate()->builtins()->builtin(
+ Builtins::kCompileOptimized);
}
-bool JSFunction::IsMarkedForConcurrentRecompilation() {
+bool JSFunction::IsMarkedForConcurrentOptimization() {
return code() == GetIsolate()->builtins()->builtin(
- Builtins::kConcurrentRecompile);
+ Builtins::kCompileOptimizedConcurrent);
}
-bool JSFunction::IsInRecompileQueue() {
+bool JSFunction::IsInOptimizationQueue() {
return code() == GetIsolate()->builtins()->builtin(
- Builtins::kInRecompileQueue);
+ Builtins::kInOptimizationQueue);
}
@@ -5071,6 +5419,11 @@ void JSFunction::ReplaceCode(Code* code) {
bool was_optimized = IsOptimized();
bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
+ if (was_optimized && is_optimized) {
+ shared()->EvictFromOptimizedCodeMap(
+ this->code(), "Replacing with another optimized code");
+ }
+
set_code(code);
// Add/remove the function from the list of optimized functions for this
@@ -5149,7 +5502,8 @@ bool JSFunction::should_have_prototype() {
bool JSFunction::is_compiled() {
- return code() != GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
+ return code() !=
+ GetIsolate()->builtins()->builtin(Builtins::kCompileUnoptimized);
}
@@ -5302,7 +5656,6 @@ JSDate* JSDate::cast(Object* obj) {
ACCESSORS(JSMessageObject, type, String, kTypeOffset)
ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
-ACCESSORS(JSMessageObject, stack_trace, Object, kStackTraceOffset)
ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
@@ -5320,23 +5673,29 @@ INT_ACCESSORS(Code, prologue_offset, kPrologueOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
+ACCESSORS(Code, raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
-// Type feedback slot: type_feedback_info for FUNCTIONs, stub_info for STUBs.
-void Code::InitializeTypeFeedbackInfoNoWriteBarrier(Object* value) {
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+void Code::WipeOutHeader() {
+ WRITE_FIELD(this, kRelocationInfoOffset, NULL);
+ WRITE_FIELD(this, kHandlerTableOffset, NULL);
+ WRITE_FIELD(this, kDeoptimizationDataOffset, NULL);
+ // Do not wipe out e.g. a minor key.
+ if (!READ_FIELD(this, kTypeFeedbackInfoOffset)->IsSmi()) {
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL);
+ }
}
Object* Code::type_feedback_info() {
ASSERT(kind() == FUNCTION);
- return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+ return raw_type_feedback_info();
}
void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
ASSERT(kind() == FUNCTION);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+ set_raw_type_feedback_info(value, mode);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
value, mode);
}
@@ -5344,13 +5703,13 @@ void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
Object* Code::next_code_link() {
CHECK(kind() == OPTIMIZED_FUNCTION);
- return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+ return raw_type_feedback_info();
}
void Code::set_next_code_link(Object* value, WriteBarrierMode mode) {
CHECK(kind() == OPTIMIZED_FUNCTION);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+ set_raw_type_feedback_info(value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
value, mode);
}
@@ -5359,8 +5718,7 @@ void Code::set_next_code_link(Object* value, WriteBarrierMode mode) {
int Code::stub_info() {
ASSERT(kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
kind() == BINARY_OP_IC || kind() == LOAD_IC);
- Object* value = READ_FIELD(this, kTypeFeedbackInfoOffset);
- return Smi::cast(value)->value();
+ return Smi::cast(raw_type_feedback_info())->value();
}
@@ -5373,7 +5731,7 @@ void Code::set_stub_info(int value) {
kind() == KEYED_LOAD_IC ||
kind() == STORE_IC ||
kind() == KEYED_STORE_IC);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, Smi::FromInt(value));
+ set_raw_type_feedback_info(Smi::FromInt(value));
}
@@ -5450,6 +5808,16 @@ void JSArrayBuffer::set_is_external(bool value) {
}
+bool JSArrayBuffer::should_be_freed() {
+ return BooleanBit::get(flag(), kShouldBeFreed);
+}
+
+
+void JSArrayBuffer::set_should_be_freed(bool value) {
+ set_flag(BooleanBit::set(flag(), kShouldBeFreed, value));
+}
+
+
ACCESSORS(JSArrayBuffer, weak_next, Object, kWeakNextOffset)
ACCESSORS(JSArrayBuffer, weak_first_view, Object, kWeakFirstViewOffset)
@@ -5593,29 +5961,25 @@ bool JSObject::HasExternalArrayElements() {
}
-#define EXTERNAL_ELEMENTS_CHECK(name, type) \
-bool JSObject::HasExternal##name##Elements() { \
- HeapObject* array = elements(); \
- ASSERT(array != NULL); \
- if (!array->IsHeapObject()) \
- return false; \
- return array->map()->instance_type() == type; \
+#define EXTERNAL_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \
+bool JSObject::HasExternal##Type##Elements() { \
+ HeapObject* array = elements(); \
+ ASSERT(array != NULL); \
+ if (!array->IsHeapObject()) \
+ return false; \
+ return array->map()->instance_type() == EXTERNAL_##TYPE##_ARRAY_TYPE; \
}
+TYPED_ARRAYS(EXTERNAL_ELEMENTS_CHECK)
+
+#undef EXTERNAL_ELEMENTS_CHECK
-EXTERNAL_ELEMENTS_CHECK(Byte, EXTERNAL_BYTE_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(UnsignedByte, EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Short, EXTERNAL_SHORT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(UnsignedShort,
- EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Int, EXTERNAL_INT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(UnsignedInt,
- EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Float,
- EXTERNAL_FLOAT_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Double,
- EXTERNAL_DOUBLE_ARRAY_TYPE)
-EXTERNAL_ELEMENTS_CHECK(Pixel, EXTERNAL_PIXEL_ARRAY_TYPE)
+
+bool JSObject::HasFixedTypedArrayElements() {
+ HeapObject* array = elements();
+ ASSERT(array != NULL);
+ return array->IsFixedTypedArrayBase();
+}
bool JSObject::HasNamedInterceptor() {
@@ -5833,23 +6197,27 @@ PropertyAttributes JSReceiver::GetElementAttribute(uint32_t index) {
}
-// TODO(504): this may be useful in other places too where JSGlobalProxy
-// is used.
-Object* JSObject::BypassGlobalProxy() {
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return GetHeap()->undefined_value();
- ASSERT(proto->IsJSGlobalObject());
- return proto;
- }
- return this;
+bool JSGlobalObject::IsDetached() {
+ return JSGlobalProxy::cast(global_receiver())->IsDetachedFrom(this);
}
-MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) {
+bool JSGlobalProxy::IsDetachedFrom(GlobalObject* global) {
+ return GetPrototype() != global;
+}
+
+
+Handle<Object> JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver> object) {
+ return object->IsJSProxy()
+ ? JSProxy::GetOrCreateIdentityHash(Handle<JSProxy>::cast(object))
+ : JSObject::GetOrCreateIdentityHash(Handle<JSObject>::cast(object));
+}
+
+
+Object* JSReceiver::GetIdentityHash() {
return IsJSProxy()
- ? JSProxy::cast(this)->GetIdentityHash(flag)
- : JSObject::cast(this)->GetIdentityHash(flag);
+ ? JSProxy::cast(this)->GetIdentityHash()
+ : JSObject::cast(this)->GetIdentityHash();
}
@@ -5925,7 +6293,7 @@ void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
Object* function_template = expected_receiver_type();
if (!function_template->IsFunctionTemplateInfo()) return true;
- return receiver->IsInstanceOf(FunctionTemplateInfo::cast(function_template));
+ return FunctionTemplateInfo::cast(function_template)->IsTemplateFor(receiver);
}
@@ -6049,16 +6417,14 @@ bool ObjectHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
template <int entrysize>
uint32_t ObjectHashTableShape<entrysize>::Hash(Object* key) {
- MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
- return Smi::cast(maybe_hash->ToObjectChecked())->value();
+ return Smi::cast(key->GetHash())->value();
}
template <int entrysize>
uint32_t ObjectHashTableShape<entrysize>::HashForObject(Object* key,
Object* other) {
- MaybeObject* maybe_hash = other->GetHash(OMIT_CREATION);
- return Smi::cast(maybe_hash->ToObjectChecked())->value();
+ return Smi::cast(other->GetHash())->value();
}
@@ -6172,43 +6538,28 @@ MaybeObject* ConstantPoolArray::Copy() {
}
-void TypeFeedbackCells::SetAstId(int index, TypeFeedbackId id) {
- set(1 + index * 2, Smi::FromInt(id.ToInt()));
-}
-
-
-TypeFeedbackId TypeFeedbackCells::AstId(int index) {
- return TypeFeedbackId(Smi::cast(get(1 + index * 2))->value());
-}
-
-
-void TypeFeedbackCells::SetCell(int index, Cell* cell) {
- set(index * 2, cell);
-}
-
-
-Cell* TypeFeedbackCells::GetCell(int index) {
- return Cell::cast(get(index * 2));
+Handle<Object> TypeFeedbackInfo::UninitializedSentinel(Isolate* isolate) {
+ return isolate->factory()->the_hole_value();
}
-Handle<Object> TypeFeedbackCells::UninitializedSentinel(Isolate* isolate) {
- return isolate->factory()->the_hole_value();
+Handle<Object> TypeFeedbackInfo::PremonomorphicSentinel(Isolate* isolate) {
+ return isolate->factory()->null_value();
}
-Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
+Handle<Object> TypeFeedbackInfo::MegamorphicSentinel(Isolate* isolate) {
return isolate->factory()->undefined_value();
}
-Handle<Object> TypeFeedbackCells::MonomorphicArraySentinel(Isolate* isolate,
+Handle<Object> TypeFeedbackInfo::MonomorphicArraySentinel(Isolate* isolate,
ElementsKind elements_kind) {
return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
}
-Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
+Object* TypeFeedbackInfo::RawUninitializedSentinel(Heap* heap) {
return heap->the_hole_value();
}
@@ -6291,8 +6642,8 @@ bool TypeFeedbackInfo::matches_inlined_type_change_checksum(int checksum) {
}
-ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
- kTypeFeedbackCellsOffset)
+ACCESSORS(TypeFeedbackInfo, feedback_vector, FixedArray,
+ kFeedbackVectorOffset)
SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
@@ -6403,7 +6754,6 @@ void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
#undef READ_BYTE_FIELD
#undef WRITE_BYTE_FIELD
-
} } // namespace v8::internal
#endif // V8_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 60c1ef4c38..e9fb83258a 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -107,35 +107,23 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpacePrint(out);
break;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- ExternalPixelArray::cast(this)->ExternalPixelArrayPrint(out);
- break;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- ExternalByteArray::cast(this)->ExternalByteArrayPrint(out);
- break;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- ExternalUnsignedByteArray::cast(this)
- ->ExternalUnsignedByteArrayPrint(out);
- break;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- ExternalShortArray::cast(this)->ExternalShortArrayPrint(out);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- ExternalUnsignedShortArray::cast(this)
- ->ExternalUnsignedShortArrayPrint(out);
- break;
- case EXTERNAL_INT_ARRAY_TYPE:
- ExternalIntArray::cast(this)->ExternalIntArrayPrint(out);
- break;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayPrint(out);
- break;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- ExternalFloatArray::cast(this)->ExternalFloatArrayPrint(out);
+
+#define PRINT_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ External##Type##Array::cast(this)->External##Type##ArrayPrint(out); \
break;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- ExternalDoubleArray::cast(this)->ExternalDoubleArrayPrint(out);
+
+ TYPED_ARRAYS(PRINT_EXTERNAL_ARRAY)
+#undef PRINT_EXTERNAL_ARRAY
+
+#define PRINT_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ case Fixed##Type##Array::kInstanceType: \
+ Fixed##Type##Array::cast(this)->FixedTypedArrayPrint(out); \
break;
+
+ TYPED_ARRAYS(PRINT_FIXED_TYPED_ARRAY)
+#undef PRINT_FIXED_TYPED_ARRAY
+
case FILLER_TYPE:
PrintF(out, "filler");
break;
@@ -241,48 +229,19 @@ void FreeSpace::FreeSpacePrint(FILE* out) {
}
-void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) {
- PrintF(out, "external pixel array");
-}
-
-
-void ExternalByteArray::ExternalByteArrayPrint(FILE* out) {
- PrintF(out, "external byte array");
-}
-
-
-void ExternalUnsignedByteArray::ExternalUnsignedByteArrayPrint(FILE* out) {
- PrintF(out, "external unsigned byte array");
-}
-
-
-void ExternalShortArray::ExternalShortArrayPrint(FILE* out) {
- PrintF(out, "external short array");
-}
-
-
-void ExternalUnsignedShortArray::ExternalUnsignedShortArrayPrint(FILE* out) {
- PrintF(out, "external unsigned short array");
-}
-
-
-void ExternalIntArray::ExternalIntArrayPrint(FILE* out) {
- PrintF(out, "external int array");
-}
-
+#define EXTERNAL_ARRAY_PRINTER(Type, type, TYPE, ctype, size) \
+ void External##Type##Array::External##Type##ArrayPrint(FILE* out) { \
+ PrintF(out, "external " #type " array"); \
+ }
-void ExternalUnsignedIntArray::ExternalUnsignedIntArrayPrint(FILE* out) {
- PrintF(out, "external unsigned int array");
-}
+TYPED_ARRAYS(EXTERNAL_ARRAY_PRINTER)
+#undef EXTERNAL_ARRAY_PRINTER
-void ExternalFloatArray::ExternalFloatArrayPrint(FILE* out) {
- PrintF(out, "external float array");
-}
-
-void ExternalDoubleArray::ExternalDoubleArrayPrint(FILE* out) {
- PrintF(out, "external double array");
+template <class Traits>
+void FixedTypedArray<Traits>::FixedTypedArrayPrint(FILE* out) {
+ PrintF(out, "fixed %s", Traits::Designator());
}
@@ -324,6 +283,24 @@ void JSObject::PrintProperties(FILE* out) {
}
+template<class T>
+static void DoPrintElements(FILE *out, Object* object) {
+ T* p = T::cast(object);
+ for (int i = 0; i < p->length(); i++) {
+ PrintF(out, " %d: %d\n", i, p->get_scalar(i));
+ }
+}
+
+
+template<class T>
+static void DoPrintDoubleElements(FILE* out, Object* object) {
+ T* p = T::cast(object);
+ for (int i = 0; i < p->length(); i++) {
+ PrintF(out, " %d: %f\n", i, p->get_scalar(i));
+ }
+}
+
+
void JSObject::PrintElements(FILE* out) {
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
@@ -357,72 +334,47 @@ void JSObject::PrintElements(FILE* out) {
}
break;
}
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* p = ExternalPixelArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, p->get_scalar(i));
- }
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- ExternalByteArray* p = ExternalByteArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- ExternalUnsignedByteArray* p =
- ExternalUnsignedByteArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- ExternalShortArray* p = ExternalShortArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- ExternalUnsignedShortArray* p =
- ExternalUnsignedShortArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_INT_ELEMENTS: {
- ExternalIntArray* p = ExternalIntArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- ExternalUnsignedIntArray* p =
- ExternalUnsignedIntArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
- }
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalFloatArray* p = ExternalFloatArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %f\n", i, p->get_scalar(i));
- }
- break;
+
+
+#define PRINT_ELEMENTS(Kind, Type) \
+ case Kind: { \
+ DoPrintElements<Type>(out, elements()); \
+ break; \
}
- case EXTERNAL_DOUBLE_ELEMENTS: {
- ExternalDoubleArray* p = ExternalDoubleArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %f\n", i, p->get_scalar(i));
- }
- break;
+
+#define PRINT_DOUBLE_ELEMENTS(Kind, Type) \
+ case Kind: { \
+ DoPrintDoubleElements<Type>(out, elements()); \
+ break; \
}
+
+ PRINT_ELEMENTS(EXTERNAL_UINT8_CLAMPED_ELEMENTS, ExternalUint8ClampedArray)
+ PRINT_ELEMENTS(EXTERNAL_INT8_ELEMENTS, ExternalInt8Array)
+ PRINT_ELEMENTS(EXTERNAL_UINT8_ELEMENTS,
+ ExternalUint8Array)
+ PRINT_ELEMENTS(EXTERNAL_INT16_ELEMENTS, ExternalInt16Array)
+ PRINT_ELEMENTS(EXTERNAL_UINT16_ELEMENTS,
+ ExternalUint16Array)
+ PRINT_ELEMENTS(EXTERNAL_INT32_ELEMENTS, ExternalInt32Array)
+ PRINT_ELEMENTS(EXTERNAL_UINT32_ELEMENTS,
+ ExternalUint32Array)
+ PRINT_DOUBLE_ELEMENTS(EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array)
+ PRINT_DOUBLE_ELEMENTS(EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array)
+
+
+ PRINT_ELEMENTS(UINT8_ELEMENTS, FixedUint8Array)
+ PRINT_ELEMENTS(UINT8_CLAMPED_ELEMENTS, FixedUint8ClampedArray)
+ PRINT_ELEMENTS(INT8_ELEMENTS, FixedInt8Array)
+ PRINT_ELEMENTS(UINT16_ELEMENTS, FixedUint16Array)
+ PRINT_ELEMENTS(INT16_ELEMENTS, FixedInt16Array)
+ PRINT_ELEMENTS(UINT32_ELEMENTS, FixedUint32Array)
+ PRINT_ELEMENTS(INT32_ELEMENTS, FixedInt32Array)
+ PRINT_DOUBLE_ELEMENTS(FLOAT32_ELEMENTS, FixedFloat32Array)
+ PRINT_DOUBLE_ELEMENTS(FLOAT64_ELEMENTS, FixedFloat64Array)
+
+#undef PRINT_DOUBLE_ELEMENTS
+#undef PRINT_ELEMENTS
+
case DICTIONARY_ELEMENTS:
elements()->Print(out);
break;
@@ -523,6 +475,7 @@ void Symbol::SymbolPrint(FILE* out) {
PrintF(out, " - hash: %d\n", Hash());
PrintF(out, " - name: ");
name()->ShortPrint();
+ PrintF(out, " - private: %d\n", is_private());
PrintF(out, "\n");
}
@@ -555,6 +508,11 @@ void Map::MapPrint(FILE* out) {
if (is_access_check_needed()) {
PrintF(out, " - access_check_needed\n");
}
+ if (is_frozen()) {
+ PrintF(out, " - frozen\n");
+ } else if (!is_extensible()) {
+ PrintF(out, " - sealed\n");
+ }
PrintF(out, " - back pointer: ");
GetBackPointer()->ShortPrint(out);
PrintF(out, "\n - instance descriptors %s#%i: ",
@@ -597,8 +555,8 @@ void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "TypeFeedbackInfo");
PrintF(out, " - ic_total_count: %d, ic_with_type_info_count: %d\n",
ic_total_count(), ic_with_type_info_count());
- PrintF(out, " - type_feedback_cells: ");
- type_feedback_cells()->FixedArrayPrint(out);
+ PrintF(out, " - feedback_vector: ");
+ feedback_vector()->FixedArrayPrint(out);
}
@@ -666,8 +624,6 @@ void JSMessageObject::JSMessageObjectPrint(FILE* out) {
PrintF(out, "\n - end_position: %d", end_position());
PrintF(out, "\n - script: ");
script()->ShortPrint(out);
- PrintF(out, "\n - stack_trace: ");
- stack_trace()->ShortPrint(out);
PrintF(out, "\n - stack_frames: ");
stack_frames()->ShortPrint(out);
PrintF(out, "\n");
@@ -830,7 +786,7 @@ void JSTypedArray::JSTypedArrayPrint(FILE* out) {
byte_length()->ShortPrint(out);
PrintF(out, "\n - length = ");
length()->ShortPrint(out);
- PrintF("\n");
+ PrintF(out, "\n");
PrintElements(out);
}
@@ -844,7 +800,7 @@ void JSDataView::JSDataViewPrint(FILE* out) {
byte_offset()->ShortPrint(out);
PrintF(out, "\n - byte_length = ");
byte_length()->ShortPrint(out);
- PrintF("\n");
+ PrintF(out, "\n");
}
@@ -861,8 +817,13 @@ void JSFunction::JSFunctionPrint(FILE* out) {
shared()->name()->Print(out);
PrintF(out, "\n - context = ");
context()->ShortPrint(out);
- PrintF(out, "\n - literals = ");
- literals()->ShortPrint(out);
+ if (shared()->bound()) {
+ PrintF(out, "\n - bindings = ");
+ function_bindings()->ShortPrint(out);
+ } else {
+ PrintF(out, "\n - literals = ");
+ literals()->ShortPrint(out);
+ }
PrintF(out, "\n - code = ");
code()->ShortPrint(out);
PrintF(out, "\n");
@@ -892,7 +853,7 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
source->ToCString(DISALLOW_NULLS,
FAST_STRING_TRAVERSAL,
start, length, NULL);
- PrintF(out, "%s", *source_string);
+ PrintF(out, "%s", source_string.get());
}
// Script files are often large, hard to read.
// PrintF(out, "\n - script =");
@@ -1124,18 +1085,19 @@ void AllocationSite::AllocationSitePrint(FILE* out) {
dependent_code()->ShortPrint(out);
PrintF(out, "\n - nested site: ");
nested_site()->ShortPrint(out);
+ PrintF(out, "\n - memento found count: ");
+ Smi::FromInt(memento_found_count())->ShortPrint(out);
+ PrintF(out, "\n - memento create count: ");
+ Smi::FromInt(memento_create_count())->ShortPrint(out);
+ PrintF(out, "\n - pretenure decision: ");
+ Smi::FromInt(pretenure_decision())->ShortPrint(out);
PrintF(out, "\n - transition_info: ");
- if (transition_info()->IsCell()) {
- Cell* cell = Cell::cast(transition_info());
- Object* cell_contents = cell->value();
- if (cell_contents->IsSmi()) {
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(cell_contents)->value());
- PrintF(out, "Array allocation with ElementsKind ");
- PrintElementsKind(out, kind);
- PrintF(out, "\n");
- return;
- }
+ if (transition_info()->IsSmi()) {
+ ElementsKind kind = GetElementsKind();
+ PrintF(out, "Array allocation with ElementsKind ");
+ PrintElementsKind(out, kind);
+ PrintF(out, "\n");
+ return;
} else if (transition_info()->IsJSArray()) {
PrintF(out, "Array literal ");
transition_info()->ShortPrint(out);
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index 93b7cb96ad..9c3378357d 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -60,6 +60,8 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
int>::Visit);
table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+ table_.Register(kVisitFixedTypedArray, &VisitFixedTypedArray);
+ table_.Register(kVisitFixedFloat64Array, &VisitFixedTypedArray);
table_.Register(kVisitNativeContext,
&FixedBodyVisitor<StaticVisitor,
@@ -185,14 +187,15 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
+ table_.Register(kVisitFixedTypedArray, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitFixedFloat64Array, &DataObjectVisitor::Visit);
+
table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray);
table_.Register(kVisitNativeContext, &VisitNativeContext);
- table_.Register(kVisitAllocationSite,
- &FixedBodyVisitor<StaticVisitor,
- AllocationSite::BodyDescriptor,
- void>::Visit);
+ table_.Register(kVisitAllocationSite, &VisitAllocationSite);
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
@@ -264,6 +267,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
ASSERT(!rinfo->target_object()->IsConsString());
HeapObject* object = HeapObject::cast(rinfo->target_object());
heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ // TODO(ulan): It could be better to record slots only for strongly embedded
+ // objects here and record slots for weakly embedded object during clearing
+ // of non-live references in mark-compact.
if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), object)) {
StaticVisitor::MarkObject(heap, object);
}
@@ -275,7 +281,10 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCell(
Heap* heap, RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::CELL);
Cell* cell = rinfo->target_cell();
- StaticVisitor::MarkObject(heap, cell);
+ // No need to record slots because the cell space is not compacted during GC.
+ if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), cell)) {
+ StaticVisitor::MarkObject(heap, cell);
+ }
}
@@ -334,8 +343,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
for (int idx = Context::FIRST_WEAK_SLOT;
idx < Context::NATIVE_CONTEXT_SLOTS;
++idx) {
- Object** slot =
- HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
+ Object** slot = Context::cast(object)->RawFieldOfElementAt(idx);
collector->RecordSlot(slot, slot, *slot);
}
}
@@ -389,12 +397,37 @@ void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+
+ Object** slot =
+ HeapObject::RawField(object, AllocationSite::kDependentCodeOffset);
+ if (FLAG_collect_maps) {
+ // Mark allocation site dependent codes array but do not push it onto
+ // marking stack, this will make references from it weak. We will clean
+ // dead codes when we iterate over allocation sites in
+ // ClearNonLiveReferences.
+ HeapObject* obj = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ StaticVisitor::MarkObjectWithoutPush(heap, obj);
+ } else {
+ StaticVisitor::VisitPointer(heap, slot);
+ }
+
+ StaticVisitor::VisitPointers(heap,
+ HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset),
+ HeapObject::RawField(object, AllocationSite::kPointerFieldsEndOffset));
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitCode(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
Code* code = Code::cast(object);
if (FLAG_cleanup_code_caches_at_gc) {
- code->ClearTypeFeedbackCells(heap);
+ code->ClearTypeFeedbackInfo(heap);
}
if (FLAG_age_code && !Serializer::enabled()) {
code->MakeOlder(heap->mark_compact_collector()->marking_parity());
@@ -456,14 +489,17 @@ void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
- int first_ptr_offset = constant_pool->OffsetOfElementAt(
- constant_pool->first_ptr_index());
- int last_ptr_offset = constant_pool->OffsetOfElementAt(
- constant_pool->first_ptr_index() + constant_pool->count_of_ptr_entries());
- StaticVisitor::VisitPointers(
- heap,
- HeapObject::RawField(object, first_ptr_offset),
- HeapObject::RawField(object, last_ptr_offset));
+ if (constant_pool->count_of_ptr_entries() > 0) {
+ int first_ptr_offset = constant_pool->OffsetOfElementAt(
+ constant_pool->first_ptr_index());
+ int last_ptr_offset = constant_pool->OffsetOfElementAt(
+ constant_pool->first_ptr_index() +
+ constant_pool->count_of_ptr_entries() - 1);
+ StaticVisitor::VisitPointers(
+ heap,
+ HeapObject::RawField(object, first_ptr_offset),
+ HeapObject::RawField(object, last_ptr_offset));
+ }
}
@@ -862,6 +898,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
IteratePointer(v, kHandlerTableOffset);
IteratePointer(v, kDeoptimizationDataOffset);
IteratePointer(v, kTypeFeedbackInfoOffset);
+ IteratePointer(v, kConstantPoolOffset);
RelocIterator it(this, mode_mask);
Isolate* isolate = this->GetIsolate();
@@ -895,6 +932,10 @@ void Code::CodeIterateBody(Heap* heap) {
StaticVisitor::VisitPointer(
heap,
reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
+ StaticVisitor::VisitPointer(
+ heap,
+ reinterpret_cast<Object**>(this->address() + kConstantPoolOffset));
+
RelocIterator it(this, mode_mask);
for (; !it.done(); it.next()) {
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
index 5ced2cf7a3..16c51676b5 100644
--- a/deps/v8/src/objects-visiting.cc
+++ b/deps/v8/src/objects-visiting.cc
@@ -171,18 +171,27 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
return kVisitJSFunction;
case HEAP_NUMBER_TYPE:
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- case EXTERNAL_BYTE_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- case EXTERNAL_SHORT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- case EXTERNAL_INT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
+#define EXTERNAL_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE:
+
+ TYPED_ARRAYS(EXTERNAL_ARRAY_CASE)
return GetVisitorIdForSize(kVisitDataObject,
kVisitDataObjectGeneric,
instance_size);
+#undef EXTERNAL_ARRAY_CASE
+
+ case FIXED_UINT8_ARRAY_TYPE:
+ case FIXED_INT8_ARRAY_TYPE:
+ case FIXED_UINT16_ARRAY_TYPE:
+ case FIXED_INT16_ARRAY_TYPE:
+ case FIXED_UINT32_ARRAY_TYPE:
+ case FIXED_INT32_ARRAY_TYPE:
+ case FIXED_FLOAT32_ARRAY_TYPE:
+ case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
+ return kVisitFixedTypedArray;
+
+ case FIXED_FLOAT64_ARRAY_TYPE:
+ return kVisitFixedFloat64Array;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE:
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index 60e6f67471..41e5fd6fd3 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -47,13 +47,15 @@ namespace internal {
class StaticVisitorBase : public AllStatic {
public:
#define VISITOR_ID_LIST(V) \
- V(SeqOneByteString) \
+ V(SeqOneByteString) \
V(SeqTwoByteString) \
V(ShortcutCandidate) \
V(ByteArray) \
V(FreeSpace) \
V(FixedArray) \
V(FixedDoubleArray) \
+ V(FixedTypedArray) \
+ V(FixedFloat64Array) \
V(ConstantPoolArray) \
V(NativeContext) \
V(AllocationSite) \
@@ -142,7 +144,7 @@ class StaticVisitorBase : public AllStatic {
(base == kVisitJSObject));
ASSERT(IsAligned(object_size, kPointerSize));
ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
const VisitorId specialization = static_cast<VisitorId>(
base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
@@ -322,6 +324,10 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return FixedDoubleArray::SizeFor(length);
}
+ INLINE(static int VisitFixedTypedArray(Map* map, HeapObject* object)) {
+ return reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+ }
+
INLINE(static int VisitJSObject(Map* map, HeapObject* object)) {
return JSObjectVisitor::Visit(map, object);
}
@@ -399,6 +405,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
+ INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index cf51024c86..3156edc142 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -33,6 +33,7 @@
#include "arguments.h"
#include "bootstrapper.h"
#include "codegen.h"
+#include "code-stubs.h"
#include "cpu-profiler.h"
#include "debug.h"
#include "deoptimizer.h"
@@ -44,7 +45,6 @@
#include "isolate-inl.h"
#include "log.h"
#include "objects-inl.h"
-#include "objects-visiting.h"
#include "objects-visiting-inl.h"
#include "macro-assembler.h"
#include "mark-compact.h"
@@ -119,6 +119,17 @@ bool Object::BooleanValue() {
}
+bool Object::IsCallable() {
+ Object* fun = this;
+ while (fun->IsJSFunctionProxy()) {
+ fun = JSFunctionProxy::cast(fun)->call_trap();
+ }
+ return fun->IsJSFunction() ||
+ (fun->IsHeapObject() &&
+ HeapObject::cast(fun)->map()->has_instance_call_handler());
+}
+
+
void Object::Lookup(Name* name, LookupResult* result) {
Object* holder = NULL;
if (IsJSReceiver()) {
@@ -203,6 +214,31 @@ bool Object::ToUint32(uint32_t* value) {
}
+bool FunctionTemplateInfo::IsTemplateFor(Object* object) {
+ if (!object->IsHeapObject()) return false;
+ return IsTemplateFor(HeapObject::cast(object)->map());
+}
+
+
+bool FunctionTemplateInfo::IsTemplateFor(Map* map) {
+ // There is a constraint on the object; check.
+ if (!map->IsJSObjectMap()) return false;
+ // Fetch the constructor function of the object.
+ Object* cons_obj = map->constructor();
+ if (!cons_obj->IsJSFunction()) return false;
+ JSFunction* fun = JSFunction::cast(cons_obj);
+ // Iterate through the chain of inheriting function templates to
+ // see if the required one occurs.
+ for (Object* type = fun->shared()->function_data();
+ type->IsFunctionTemplateInfo();
+ type = FunctionTemplateInfo::cast(type)->parent_template()) {
+ if (type == this) return true;
+ }
+ // Didn't find the required type in the inheritance chain.
+ return false;
+}
+
+
template<typename To>
static inline To* CheckedCast(void *from) {
uintptr_t temp = reinterpret_cast<uintptr_t>(from);
@@ -651,7 +687,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
}
-Object* JSObject::GetNormalizedProperty(LookupResult* result) {
+Object* JSObject::GetNormalizedProperty(const LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
if (IsGlobalObject()) {
@@ -663,7 +699,7 @@ Object* JSObject::GetNormalizedProperty(LookupResult* result) {
void JSObject::SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
+ const LookupResult* result,
Handle<Object> value) {
ASSERT(!object->HasFastProperties());
NameDictionary* property_dictionary = object->property_dictionary();
@@ -696,7 +732,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
Handle<NameDictionary> property_dictionary(object->property_dictionary());
if (!name->IsUniqueName()) {
- name = object->GetIsolate()->factory()->InternalizedStringFromString(
+ name = object->GetIsolate()->factory()->InternalizeString(
Handle<String>::cast(name));
}
@@ -1028,7 +1064,13 @@ Object* Object::GetPrototype(Isolate* isolate) {
}
-MaybeObject* Object::GetHash(CreationFlag flag) {
+Map* Object::GetMarkerMap(Isolate* isolate) {
+ if (IsSmi()) return isolate->heap()->heap_number_map();
+ return HeapObject::cast(this)->map();
+}
+
+
+Object* Object::GetHash() {
// The object is either a number, a name, an odd-ball,
// a real JS object, or a Harmony proxy.
if (IsNumber()) {
@@ -1043,12 +1085,20 @@ MaybeObject* Object::GetHash(CreationFlag flag) {
uint32_t hash = Oddball::cast(this)->to_string()->Hash();
return Smi::FromInt(hash);
}
- if (IsJSReceiver()) {
- return JSReceiver::cast(this)->GetIdentityHash(flag);
- }
- UNREACHABLE();
- return Smi::FromInt(0);
+ ASSERT(IsJSReceiver());
+ return JSReceiver::cast(this)->GetIdentityHash();
+}
+
+
+Handle<Object> Object::GetOrCreateHash(Handle<Object> object,
+ Isolate* isolate) {
+ Handle<Object> hash(object->GetHash(), isolate);
+ if (hash->IsSmi())
+ return hash;
+
+ ASSERT(object->IsJSReceiver());
+ return JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver>::cast(object));
}
@@ -1223,27 +1273,37 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool is_ascii = this->IsOneByteRepresentation();
bool is_internalized = this->IsInternalizedString();
- // Morph the object to an external string by adjusting the map and
- // reinitializing the fields.
- if (size >= ExternalString::kSize) {
+ // Morph the string to an external string by replacing the map and
+ // reinitializing the fields. This won't work if
+ // - the space the existing string occupies is too small for a regular
+ // external string.
+ // - the existing string is in old pointer space and the backing store of
+ // the external string is not aligned. The GC cannot deal with fields
+ // containing an unaligned address that points to outside of V8's heap.
+ // In either case we resort to a short external string instead, omitting
+ // the field caching the address of the backing store. When we encounter
+ // short external strings in generated code, we need to bailout to runtime.
+ if (size < ExternalString::kSize ||
+ (!IsAligned(reinterpret_cast<intptr_t>(resource->data()), kPointerSize) &&
+ heap->old_pointer_space()->Contains(this))) {
this->set_map_no_write_barrier(
is_internalized
? (is_ascii
- ? heap->external_internalized_string_with_one_byte_data_map()
- : heap->external_internalized_string_map())
+ ? heap->
+ short_external_internalized_string_with_one_byte_data_map()
+ : heap->short_external_internalized_string_map())
: (is_ascii
- ? heap->external_string_with_one_byte_data_map()
- : heap->external_string_map()));
+ ? heap->short_external_string_with_one_byte_data_map()
+ : heap->short_external_string_map()));
} else {
this->set_map_no_write_barrier(
is_internalized
- ? (is_ascii
- ? heap->
- short_external_internalized_string_with_one_byte_data_map()
- : heap->short_external_internalized_string_map())
- : (is_ascii
- ? heap->short_external_string_with_one_byte_data_map()
- : heap->short_external_string_map()));
+ ? (is_ascii
+ ? heap->external_internalized_string_with_one_byte_data_map()
+ : heap->external_internalized_string_map())
+ : (is_ascii
+ ? heap->external_string_with_one_byte_data_map()
+ : heap->external_string_map()));
}
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_resource(resource);
@@ -1284,16 +1344,26 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
}
bool is_internalized = this->IsInternalizedString();
- // Morph the object to an external string by adjusting the map and
- // reinitializing the fields. Use short version if space is limited.
- if (size >= ExternalString::kSize) {
- this->set_map_no_write_barrier(
- is_internalized ? heap->external_ascii_internalized_string_map()
- : heap->external_ascii_string_map());
- } else {
+ // Morph the string to an external string by replacing the map and
+ // reinitializing the fields. This won't work if
+ // - the space the existing string occupies is too small for a regular
+ // external string.
+ // - the existing string is in old pointer space and the backing store of
+ // the external string is not aligned. The GC cannot deal with fields
+ // containing an unaligned address that points to outside of V8's heap.
+ // In either case we resort to a short external string instead, omitting
+ // the field caching the address of the backing store. When we encounter
+ // short external strings in generated code, we need to bailout to runtime.
+ if (size < ExternalString::kSize ||
+ (!IsAligned(reinterpret_cast<intptr_t>(resource->data()), kPointerSize) &&
+ heap->old_pointer_space()->Contains(this))) {
this->set_map_no_write_barrier(
is_internalized ? heap->short_external_ascii_internalized_string_map()
: heap->short_external_ascii_string_map());
+ } else {
+ this->set_map_no_write_barrier(
+ is_internalized ? heap->external_ascii_internalized_string_map()
+ : heap->external_ascii_string_map());
}
ExternalAsciiString* self = ExternalAsciiString::cast(this);
self->set_resource(resource);
@@ -1591,48 +1661,25 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
case FREE_SPACE_TYPE:
accumulator->Add("<FreeSpace[%u]>", FreeSpace::cast(this)->Size());
break;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- accumulator->Add("<ExternalPixelArray[%u]>",
- ExternalPixelArray::cast(this)->length());
- break;
- case EXTERNAL_BYTE_ARRAY_TYPE:
- accumulator->Add("<ExternalByteArray[%u]>",
- ExternalByteArray::cast(this)->length());
- break;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- accumulator->Add("<ExternalUnsignedByteArray[%u]>",
- ExternalUnsignedByteArray::cast(this)->length());
- break;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- accumulator->Add("<ExternalShortArray[%u]>",
- ExternalShortArray::cast(this)->length());
- break;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- accumulator->Add("<ExternalUnsignedShortArray[%u]>",
- ExternalUnsignedShortArray::cast(this)->length());
- break;
- case EXTERNAL_INT_ARRAY_TYPE:
- accumulator->Add("<ExternalIntArray[%u]>",
- ExternalIntArray::cast(this)->length());
- break;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- accumulator->Add("<ExternalUnsignedIntArray[%u]>",
- ExternalUnsignedIntArray::cast(this)->length());
- break;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- accumulator->Add("<ExternalFloatArray[%u]>",
- ExternalFloatArray::cast(this)->length());
- break;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- accumulator->Add("<ExternalDoubleArray[%u]>",
- ExternalDoubleArray::cast(this)->length());
+#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ accumulator->Add("<External" #Type "Array[%u]>", \
+ External##Type##Array::cast(this)->length()); \
+ break; \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ accumulator->Add("<Fixed" #Type "Array[%u]>", \
+ Fixed##Type##Array::cast(this)->length()); \
break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_SHORT_PRINT)
+#undef TYPED_ARRAY_SHORT_PRINT
+
case SHARED_FUNCTION_INFO_TYPE: {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(this);
SmartArrayPointer<char> debug_name =
shared->DebugName()->ToCString();
if (debug_name[0] != 0) {
- accumulator->Add("<SharedFunctionInfo %s>", *debug_name);
+ accumulator->Add("<SharedFunctionInfo %s>", debug_name.get());
} else {
accumulator->Add("<SharedFunctionInfo>");
}
@@ -1803,20 +1850,21 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case SYMBOL_TYPE:
Symbol::BodyDescriptor::IterateBody(this, v);
break;
+
case HEAP_NUMBER_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
case FREE_SPACE_TYPE:
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- case EXTERNAL_BYTE_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- case EXTERNAL_SHORT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- case EXTERNAL_INT_ARRAY_TYPE:
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
break;
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
case SHARED_FUNCTION_INFO_TYPE: {
SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
break;
@@ -2104,7 +2152,7 @@ Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
if (!name->IsUniqueName()) {
- name = isolate->factory()->InternalizedStringFromString(
+ name = isolate->factory()->InternalizeString(
Handle<String>::cast(name));
}
@@ -2123,8 +2171,7 @@ Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
if (object->HasFastProperties()) {
// Ensure the descriptor array does not get too big.
- if (object->map()->NumberOfOwnDescriptors() <
- DescriptorArray::kMaxNumberOfDescriptors) {
+ if (object->map()->NumberOfOwnDescriptors() <= kMaxNumberOfDescriptors) {
// TODO(verwaest): Support other constants.
// if (mode == ALLOW_AS_CONSTANT &&
// !value->IsTheHole() &&
@@ -2149,7 +2196,7 @@ Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
object->map()->is_observed() &&
*name != isolate->heap()->hidden_string()) {
Handle<Object> old_value = isolate->factory()->the_hole_value();
- EnqueueChangeRecord(object, "new", name, old_value);
+ EnqueueChangeRecord(object, "add", name, old_value);
}
return value;
@@ -2167,31 +2214,18 @@ void JSObject::EnqueueChangeRecord(Handle<JSObject> object,
object = handle(JSGlobalObject::cast(*object)->global_receiver(), isolate);
}
Handle<Object> args[] = { type, object, name, old_value };
+ int argc = name.is_null() ? 2 : old_value->IsTheHole() ? 3 : 4;
bool threw;
+
Execution::Call(isolate,
Handle<JSFunction>(isolate->observers_notify_change()),
isolate->factory()->undefined_value(),
- old_value->IsTheHole() ? 3 : 4, args,
+ argc, args,
&threw);
ASSERT(!threw);
}
-void JSObject::DeliverChangeRecords(Isolate* isolate) {
- ASSERT(isolate->observer_delivery_pending());
- bool threw = false;
- Execution::Call(
- isolate,
- isolate->observers_deliver_changes(),
- isolate->factory()->undefined_value(),
- 0,
- NULL,
- &threw);
- ASSERT(!threw);
- isolate->set_observer_delivery_pending(false);
-}
-
-
Handle<Object> JSObject::SetPropertyPostInterceptor(
Handle<JSObject> object,
Handle<Name> name,
@@ -2544,7 +2578,7 @@ void Map::DeprecateTarget(Name* key, DescriptorArray* new_descriptors) {
DescriptorArray* to_replace = instance_descriptors();
Map* current = this;
while (current->instance_descriptors() == to_replace) {
- current->SetEnumLength(Map::kInvalidEnumCache);
+ current->SetEnumLength(kInvalidEnumCacheSentinel);
current->set_instance_descriptors(new_descriptors);
Object* next = current->GetBackPointer();
if (next->IsUndefined()) break;
@@ -2744,7 +2778,6 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
Handle<Map> new_map = split_map;
for (; descriptor < descriptors; descriptor++) {
new_map = Map::CopyInstallDescriptors(new_map, descriptor, new_descriptors);
- new_map->set_migration_target(true);
}
new_map->set_owns_descriptors(true);
@@ -2767,31 +2800,44 @@ Handle<Map> Map::GeneralizeAllFieldRepresentations(
}
-Map* Map::CurrentMapForDeprecated() {
- DisallowHeapAllocation no_allocation;
- if (!is_deprecated()) return this;
+Handle<Map> Map::CurrentMapForDeprecated(Handle<Map> map) {
+ Handle<Map> proto_map(map);
+ while (proto_map->prototype()->IsJSObject()) {
+ Handle<JSObject> holder(JSObject::cast(proto_map->prototype()));
+ if (holder->map()->is_deprecated()) {
+ JSObject::TryMigrateInstance(holder);
+ }
+ proto_map = Handle<Map>(holder->map());
+ }
+ return CurrentMapForDeprecatedInternal(map);
+}
- DescriptorArray* old_descriptors = instance_descriptors();
- int descriptors = NumberOfOwnDescriptors();
- Map* root_map = FindRootMap();
+Handle<Map> Map::CurrentMapForDeprecatedInternal(Handle<Map> map) {
+ if (!map->is_deprecated()) return map;
+
+ DisallowHeapAllocation no_allocation;
+ DescriptorArray* old_descriptors = map->instance_descriptors();
+
+ int descriptors = map->NumberOfOwnDescriptors();
+ Map* root_map = map->FindRootMap();
// Check the state of the root map.
- if (!EquivalentToForTransition(root_map)) return NULL;
+ if (!map->EquivalentToForTransition(root_map)) return Handle<Map>();
int verbatim = root_map->NumberOfOwnDescriptors();
Map* updated = root_map->FindUpdatedMap(
verbatim, descriptors, old_descriptors);
- if (updated == NULL) return NULL;
+ if (updated == NULL) return Handle<Map>();
DescriptorArray* updated_descriptors = updated->instance_descriptors();
int valid = updated->NumberOfOwnDescriptors();
if (!updated_descriptors->IsMoreGeneralThan(
verbatim, valid, descriptors, old_descriptors)) {
- return NULL;
+ return Handle<Map>();
}
- return updated;
+ return handle(updated);
}
@@ -2956,52 +3002,44 @@ Handle<Object> JSReceiver::SetPropertyWithDefinedSetter(
}
-MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
+Handle<Object> JSObject::SetElementWithCallbackSetterInPrototypes(
+ Handle<JSObject> object,
uint32_t index,
- Object* value,
+ Handle<Object> value,
bool* found,
StrictModeFlag strict_mode) {
- Heap* heap = GetHeap();
- for (Object* pt = GetPrototype();
- pt != heap->null_value();
- pt = pt->GetPrototype(GetIsolate())) {
- if (pt->IsJSProxy()) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> proxy(JSProxy::cast(pt));
- Handle<JSObject> self(this, isolate);
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- Handle<Object> value_handle(value, isolate);
- Handle<Object> result = JSProxy::SetPropertyViaPrototypesWithHandler(
- proxy, self, name, value_handle, NONE, strict_mode, found);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
- }
- if (!JSObject::cast(pt)->HasDictionaryElements()) {
+ Isolate *isolate = object->GetIsolate();
+ for (Handle<Object> proto = handle(object->GetPrototype(), isolate);
+ !proto->IsNull();
+ proto = handle(proto->GetPrototype(isolate), isolate)) {
+ if (proto->IsJSProxy()) {
+ return JSProxy::SetPropertyViaPrototypesWithHandler(
+ Handle<JSProxy>::cast(proto),
+ object,
+ isolate->factory()->Uint32ToString(index), // name
+ value,
+ NONE,
+ strict_mode,
+ found);
+ }
+ Handle<JSObject> js_proto = Handle<JSObject>::cast(proto);
+ if (!js_proto->HasDictionaryElements()) {
continue;
}
- SeededNumberDictionary* dictionary =
- JSObject::cast(pt)->element_dictionary();
+ Handle<SeededNumberDictionary> dictionary(js_proto->element_dictionary());
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
*found = true;
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSObject> self(this, isolate);
Handle<Object> structure(dictionary->ValueAt(entry), isolate);
- Handle<Object> value_handle(value, isolate);
- Handle<JSObject> holder(JSObject::cast(pt));
- Handle<Object> result = SetElementWithCallback(
- self, structure, index, value_handle, holder, strict_mode);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
+ return SetElementWithCallback(object, structure, index, value, js_proto,
+ strict_mode);
}
}
}
*found = false;
- return heap->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
@@ -3097,7 +3135,7 @@ static int AppendUniqueCallbacks(NeanderArray* callbacks,
Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)));
if (entry->name()->IsUniqueName()) continue;
Handle<String> key =
- isolate->factory()->InternalizedStringFromString(
+ isolate->factory()->InternalizeString(
Handle<String>(String::cast(entry->name())));
entry->set_name(*key);
}
@@ -3755,7 +3793,7 @@ void JSProxy::Fix(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
// Save identity hash.
- Handle<Object> hash = JSProxy::GetIdentityHash(proxy, OMIT_CREATION);
+ Handle<Object> hash(proxy->GetIdentityHash(), isolate);
if (proxy->IsJSFunctionProxy()) {
isolate->factory()->BecomeJSFunction(proxy);
@@ -3767,7 +3805,8 @@ void JSProxy::Fix(Handle<JSProxy> proxy) {
// Inherit identity, if it was present.
if (hash->IsSmi()) {
- JSObject::SetIdentityHash(Handle<JSObject>::cast(proxy), Smi::cast(*hash));
+ JSObject::SetIdentityHash(Handle<JSObject>::cast(proxy),
+ Handle<Smi>::cast(hash));
}
}
@@ -3842,6 +3881,7 @@ void JSObject::MigrateInstance(Handle<JSObject> object) {
Handle<Map> original_map(object->map());
GeneralizeFieldRepresentation(
object, 0, Representation::None(), ALLOW_AS_CONSTANT);
+ object->map()->set_migration_target(true);
if (FLAG_trace_migration) {
object->PrintInstanceMigration(stdout, *original_map, object->map());
}
@@ -3849,10 +3889,10 @@ void JSObject::MigrateInstance(Handle<JSObject> object) {
Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
- Map* new_map = object->map()->CurrentMapForDeprecated();
- if (new_map == NULL) return Handle<Object>();
Handle<Map> original_map(object->map());
- JSObject::MigrateToMap(object, handle(new_map));
+ Handle<Map> new_map = Map::CurrentMapForDeprecatedInternal(original_map);
+ if (new_map.is_null()) return Handle<Object>();
+ JSObject::MigrateToMap(object, new_map);
if (FLAG_trace_migration) {
object->PrintInstanceMigration(stdout, *original_map, object->map());
}
@@ -4094,14 +4134,14 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
if (is_observed) {
if (lookup->IsTransition()) {
- EnqueueChangeRecord(object, "new", name, old_value);
+ EnqueueChangeRecord(object, "add", name, old_value);
} else {
LookupResult new_lookup(isolate);
object->LocalLookup(*name, &new_lookup, true);
if (new_lookup.IsDataProperty()) {
Handle<Object> new_value = Object::GetProperty(object, name);
if (!new_value->SameValue(*old_value)) {
- EnqueueChangeRecord(object, "updated", name, old_value);
+ EnqueueChangeRecord(object, "update", name, old_value);
}
}
}
@@ -4111,29 +4151,6 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
}
-MaybeObject* JSObject::SetLocalPropertyIgnoreAttributesTrampoline(
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- ValueType value_type,
- StoreMode mode,
- ExtensibilityCheck extensibility_check) {
- // TODO(mstarzinger): The trampoline is a giant hack, don't use it anywhere
- // else or handlification people will start hating you for all eternity.
- HandleScope scope(GetIsolate());
- IdempotentPointerToHandleCodeTrampoline trampoline(GetIsolate());
- return trampoline.CallWithReturnValue(
- &JSObject::SetLocalPropertyIgnoreAttributes,
- Handle<JSObject>(this),
- Handle<Name>(key),
- Handle<Object>(value, GetIsolate()),
- attributes,
- value_type,
- mode,
- extensibility_check);
-}
-
-
// Set a real local property, even if it is READ_ONLY. If the property is not
// present, add it with attributes NONE. This code is an exact clone of
// SetProperty, with the check for IsReadOnly and the check for a
@@ -4186,9 +4203,12 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
// Check for accessor in prototype chain removed here in clone.
if (!lookup.IsFound()) {
+ object->map()->LookupTransition(*object, *name, &lookup);
+ TransitionFlag flag = lookup.IsFound()
+ ? OMIT_TRANSITION : INSERT_TRANSITION;
// Neither properties nor transitions found.
return AddProperty(object, name, value, attributes, kNonStrictMode,
- MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode);
+ MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode, flag);
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
@@ -4234,9 +4254,9 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
if (is_observed) {
if (lookup.IsTransition()) {
- EnqueueChangeRecord(object, "new", name, old_value);
+ EnqueueChangeRecord(object, "add", name, old_value);
} else if (old_value->IsTheHole()) {
- EnqueueChangeRecord(object, "reconfigured", name, old_value);
+ EnqueueChangeRecord(object, "reconfigure", name, old_value);
} else {
LookupResult new_lookup(isolate);
object->LocalLookup(*name, &new_lookup, true);
@@ -4247,9 +4267,9 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
}
if (new_lookup.GetAttributes() != old_attributes) {
if (!value_changed) old_value = isolate->factory()->the_hole_value();
- EnqueueChangeRecord(object, "reconfigured", name, old_value);
+ EnqueueChangeRecord(object, "reconfigure", name, old_value);
} else if (value_changed) {
- EnqueueChangeRecord(object, "updated", name, old_value);
+ EnqueueChangeRecord(object, "update", name, old_value);
}
}
}
@@ -4788,52 +4808,52 @@ Smi* JSReceiver::GenerateIdentityHash() {
}
-void JSObject::SetIdentityHash(Handle<JSObject> object, Smi* hash) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->SetHiddenProperty(
- object->GetHeap()->identity_hash_string(), hash));
+void JSObject::SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash) {
+ Isolate* isolate = object->GetIsolate();
+ SetHiddenProperty(object, isolate->factory()->identity_hash_string(), hash);
}
-int JSObject::GetIdentityHash(Handle<JSObject> object) {
- CALL_AND_RETRY_OR_DIE(object->GetIsolate(),
- object->GetIdentityHash(ALLOW_CREATION),
- return Smi::cast(__object__)->value(),
- return 0);
+Object* JSObject::GetIdentityHash() {
+ Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_string());
+ return stored_value->IsSmi() ? stored_value : GetHeap()->undefined_value();
}
-MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
- Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_string());
- if (stored_value->IsSmi()) return stored_value;
+Handle<Object> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) {
+ Handle<Object> hash(object->GetIdentityHash(), object->GetIsolate());
+ if (hash->IsSmi())
+ return hash;
- // Do not generate permanent identity hash code if not requested.
- if (flag == OMIT_CREATION) return GetHeap()->undefined_value();
+ Isolate* isolate = object->GetIsolate();
+
+ hash = handle(object->GenerateIdentityHash(), isolate);
+ Handle<Object> result = SetHiddenProperty(object,
+ isolate->factory()->identity_hash_string(), hash);
- Smi* hash = GenerateIdentityHash();
- MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_string(),
- hash);
- if (result->IsFailure()) return result;
- if (result->ToObjectUnchecked()->IsUndefined()) {
+ if (result->IsUndefined()) {
// Trying to get hash of detached proxy.
- return Smi::FromInt(0);
+ return handle(Smi::FromInt(0), isolate);
}
+
return hash;
}
-Handle<Object> JSProxy::GetIdentityHash(Handle<JSProxy> proxy,
- CreationFlag flag) {
- CALL_HEAP_FUNCTION(proxy->GetIsolate(), proxy->GetIdentityHash(flag), Object);
+Object* JSProxy::GetIdentityHash() {
+ return this->hash();
}
-MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
- Object* hash = this->hash();
- if (!hash->IsSmi() && flag == ALLOW_CREATION) {
- hash = GenerateIdentityHash();
- set_hash(hash);
- }
+Handle<Object> JSProxy::GetOrCreateIdentityHash(Handle<JSProxy> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
+
+ Handle<Object> hash(proxy->GetIdentityHash(), isolate);
+ if (hash->IsSmi())
+ return hash;
+
+ hash = handle(proxy->GenerateIdentityHash(), isolate);
+ proxy->set_hash(*hash);
return hash;
}
@@ -4849,9 +4869,7 @@ Object* JSObject::GetHiddenProperty(Name* key) {
return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
}
ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup =
- GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
- Object* inline_value = hidden_lookup->ToObjectUnchecked();
+ Object* inline_value = GetHiddenPropertiesHashTable();
if (inline_value->IsSmi()) {
// Handle inline-stored identity hash.
@@ -4870,53 +4888,45 @@ Object* JSObject::GetHiddenProperty(Name* key) {
}
-Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
+Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> object,
Handle<Name> key,
Handle<Object> value) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->SetHiddenProperty(*key, *value),
- Object);
-}
-
+ Isolate* isolate = object->GetIsolate();
-MaybeObject* JSObject::SetHiddenProperty(Name* key, Object* value) {
ASSERT(key->IsUniqueName());
- if (IsJSGlobalProxy()) {
+ if (object->IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
- Object* proxy_parent = GetPrototype();
+ Handle<Object> proxy_parent(object->GetPrototype(), isolate);
// If the proxy is detached, return undefined.
- if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+ if (proxy_parent->IsNull()) return isolate->factory()->undefined_value();
ASSERT(proxy_parent->IsJSGlobalObject());
- return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value);
+ return SetHiddenProperty(Handle<JSObject>::cast(proxy_parent), key, value);
}
- ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup =
- GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
- Object* inline_value = hidden_lookup->ToObjectUnchecked();
+ ASSERT(!object->IsJSGlobalProxy());
+
+ Handle<Object> inline_value(object->GetHiddenPropertiesHashTable(), isolate);
// If there is no backing store yet, store the identity hash inline.
if (value->IsSmi() &&
- key == GetHeap()->identity_hash_string() &&
+ *key == *isolate->factory()->identity_hash_string() &&
(inline_value->IsUndefined() || inline_value->IsSmi())) {
- return SetHiddenPropertiesHashTable(value);
+ return JSObject::SetHiddenPropertiesHashTable(object, value);
}
- hidden_lookup = GetHiddenPropertiesHashTable(CREATE_NEW_IF_ABSENT);
- ObjectHashTable* hashtable;
- if (!hidden_lookup->To(&hashtable)) return hidden_lookup;
+ Handle<ObjectHashTable> hashtable =
+ GetOrCreateHiddenPropertiesHashtable(object);
// If it was found, check if the key is already in the dictionary.
- MaybeObject* insert_result = hashtable->Put(key, value);
- ObjectHashTable* new_table;
- if (!insert_result->To(&new_table)) return insert_result;
- if (new_table != hashtable) {
+ Handle<ObjectHashTable> new_table = ObjectHashTable::Put(hashtable, key,
+ value);
+ if (*new_table != *hashtable) {
// If adding the key expanded the dictionary (i.e., Add returned a new
// dictionary), store it back to the object.
- MaybeObject* store_result = SetHiddenPropertiesHashTable(new_table);
- if (store_result->IsFailure()) return store_result;
+ SetHiddenPropertiesHashTable(object, new_table);
}
+
// Return this to mark success.
- return this;
+ return object;
}
@@ -4931,16 +4941,14 @@ void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
return DeleteHiddenProperty(Handle<JSObject>::cast(proto), key);
}
- MaybeObject* hidden_lookup =
- object->GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
- Object* inline_value = hidden_lookup->ToObjectUnchecked();
+ Object* inline_value = object->GetHiddenPropertiesHashTable();
// We never delete (inline-stored) identity hashes.
- ASSERT(*key != isolate->heap()->identity_hash_string());
+ ASSERT(*key != *isolate->factory()->identity_hash_string());
if (inline_value->IsUndefined() || inline_value->IsSmi()) return;
Handle<ObjectHashTable> hashtable(ObjectHashTable::cast(inline_value));
- PutIntoObjectHashTable(hashtable, key, isolate->factory()->the_hole_value());
+ ObjectHashTable::Put(hashtable, key, isolate->factory()->the_hole_value());
}
@@ -4951,10 +4959,8 @@ bool JSObject::HasHiddenProperties() {
}
-MaybeObject* JSObject::GetHiddenPropertiesHashTable(
- InitializeHiddenProperties init_option) {
+Object* JSObject::GetHiddenPropertiesHashTable() {
ASSERT(!IsJSGlobalProxy());
- Object* inline_value;
if (HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden string. Since the
@@ -4966,93 +4972,97 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
sorted_index < map()->NumberOfOwnDescriptors()) {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
- MaybeObject* maybe_value = this->FastPropertyAt(
- descriptors->GetDetails(sorted_index).representation(),
+ ASSERT(descriptors->GetDetails(sorted_index).representation().
+ IsCompatibleForLoad(Representation::Tagged()));
+ return this->RawFastPropertyAt(
descriptors->GetFieldIndex(sorted_index));
- if (!maybe_value->To(&inline_value)) return maybe_value;
} else {
- inline_value = GetHeap()->undefined_value();
+ return GetHeap()->undefined_value();
}
} else {
- inline_value = GetHeap()->undefined_value();
+ return GetHeap()->undefined_value();
}
} else {
PropertyAttributes attributes;
// You can't install a getter on a property indexed by the hidden string,
// so we can be sure that GetLocalPropertyPostInterceptor returns a real
// object.
- inline_value =
- GetLocalPropertyPostInterceptor(this,
- GetHeap()->hidden_string(),
- &attributes)->ToObjectUnchecked();
+ return GetLocalPropertyPostInterceptor(this,
+ GetHeap()->hidden_string(),
+ &attributes)->ToObjectUnchecked();
}
+}
- if (init_option == ONLY_RETURN_INLINE_VALUE ||
- inline_value->IsHashTable()) {
- return inline_value;
- }
+Handle<ObjectHashTable> JSObject::GetOrCreateHiddenPropertiesHashtable(
+ Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
- ObjectHashTable* hashtable;
static const int kInitialCapacity = 4;
- MaybeObject* maybe_obj =
- ObjectHashTable::Allocate(GetHeap(),
- kInitialCapacity,
- ObjectHashTable::USE_CUSTOM_MINIMUM_CAPACITY);
- if (!maybe_obj->To<ObjectHashTable>(&hashtable)) return maybe_obj;
+ Handle<Object> inline_value(object->GetHiddenPropertiesHashTable(), isolate);
+ if (inline_value->IsHashTable()) {
+ return Handle<ObjectHashTable>::cast(inline_value);
+ }
+
+ Handle<ObjectHashTable> hashtable = isolate->factory()->NewObjectHashTable(
+ kInitialCapacity,
+ USE_CUSTOM_MINIMUM_CAPACITY);
if (inline_value->IsSmi()) {
// We were storing the identity hash inline and now allocated an actual
// dictionary. Put the identity hash into the new dictionary.
- MaybeObject* insert_result =
- hashtable->Put(GetHeap()->identity_hash_string(), inline_value);
- ObjectHashTable* new_table;
- if (!insert_result->To(&new_table)) return insert_result;
- // We expect no resizing for the first insert.
- ASSERT_EQ(hashtable, new_table);
+ hashtable = ObjectHashTable::Put(hashtable,
+ isolate->factory()->identity_hash_string(),
+ inline_value);
}
- MaybeObject* store_result = SetLocalPropertyIgnoreAttributesTrampoline(
- GetHeap()->hidden_string(),
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ object,
+ isolate->factory()->hidden_string(),
hashtable,
DONT_ENUM,
OPTIMAL_REPRESENTATION,
ALLOW_AS_CONSTANT,
OMIT_EXTENSIBILITY_CHECK);
- if (store_result->IsFailure()) return store_result;
+
return hashtable;
}
-MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
- ASSERT(!IsJSGlobalProxy());
+Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
+ Handle<Object> value) {
+ ASSERT(!object->IsJSGlobalProxy());
+
+ Isolate* isolate = object->GetIsolate();
+
// We can store the identity hash inline iff there is no backing store
// for hidden properties yet.
- ASSERT(HasHiddenProperties() != value->IsSmi());
- if (HasFastProperties()) {
+ ASSERT(object->HasHiddenProperties() != value->IsSmi());
+ if (object->HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden string. Since the
// hidden strings hash code is zero (and no other name has hash
// code zero) it will always occupy the first entry if present.
- DescriptorArray* descriptors = this->map()->instance_descriptors();
+ DescriptorArray* descriptors = object->map()->instance_descriptors();
if (descriptors->number_of_descriptors() > 0) {
int sorted_index = descriptors->GetSortedKeyIndex(0);
- if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
- sorted_index < map()->NumberOfOwnDescriptors()) {
+ if (descriptors->GetKey(sorted_index) == isolate->heap()->hidden_string()
+ && sorted_index < object->map()->NumberOfOwnDescriptors()) {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
- FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index), value);
- return this;
+ object->FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index),
+ *value);
+ return object;
}
}
}
- MaybeObject* store_result = SetLocalPropertyIgnoreAttributesTrampoline(
- GetHeap()->hidden_string(),
- value,
- DONT_ENUM,
- OPTIMAL_REPRESENTATION,
- ALLOW_AS_CONSTANT,
- OMIT_EXTENSIBILITY_CHECK);
- if (store_result->IsFailure()) return store_result;
- return this;
+
+ SetLocalPropertyIgnoreAttributes(object,
+ isolate->factory()->hidden_string(),
+ value,
+ DONT_ENUM,
+ OPTIMAL_REPRESENTATION,
+ ALLOW_AS_CONSTANT,
+ OMIT_EXTENSIBILITY_CHECK);
+ return object;
}
@@ -5205,7 +5215,7 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
if (should_enqueue_change_record && !HasLocalElement(object, index)) {
Handle<String> name = factory->Uint32ToString(index);
- EnqueueChangeRecord(object, "deleted", name, old_value);
+ EnqueueChangeRecord(object, "delete", name, old_value);
}
return result;
@@ -5281,7 +5291,7 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
}
if (is_observed && !HasLocalProperty(object, name)) {
- EnqueueChangeRecord(object, "deleted", name, old_value);
+ EnqueueChangeRecord(object, "delete", name, old_value);
}
return result;
@@ -5357,19 +5367,18 @@ bool JSObject::ReferencesObject(Object* obj) {
// Check if the object is among the indexed properties.
ElementsKind kind = GetElementsKind();
switch (kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ // Raw pixels and external arrays do not reference other
+ // objects.
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+ break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- // Raw pixels and external arrays do not reference other
- // objects.
break;
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
@@ -5443,6 +5452,9 @@ bool JSObject::ReferencesObject(Object* obj) {
Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
Isolate* isolate = object->GetIsolate();
+
+ if (!object->map()->is_extensible()) return object;
+
if (object->IsAccessCheckNeeded() &&
!isolate->MayNamedAccess(*object,
isolate->heap()->undefined_value(),
@@ -5485,6 +5497,11 @@ Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
new_map->set_is_extensible(false);
object->set_map(*new_map);
ASSERT(!object->map()->is_extensible());
+
+ if (FLAG_harmony_observation && object->map()->is_observed()) {
+ EnqueueChangeRecord(object, "preventExtensions", Handle<Name>(),
+ isolate->factory()->the_hole_value());
+ }
return object;
}
@@ -5513,6 +5530,7 @@ static void FreezeDictionary(Dictionary* dictionary) {
Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
// Freezing non-strict arguments should be handled elsewhere.
ASSERT(!object->HasNonStrictArgumentsElements());
+ ASSERT(!object->map()->is_observed());
if (object->map()->is_frozen()) return object;
@@ -5627,12 +5645,6 @@ void JSObject::SetObserved(Handle<JSObject> object) {
if (object->map()->is_observed())
return;
- if (!object->HasExternalArrayElements()) {
- // Go to dictionary mode, so that we don't skip map checks.
- NormalizeElements(object);
- ASSERT(!object->HasFastElements());
- }
-
LookupResult result(isolate);
object->map()->LookupTransition(*object,
isolate->heap()->observed_symbol(),
@@ -5646,20 +5658,12 @@ void JSObject::SetObserved(Handle<JSObject> object) {
new_map = Map::CopyForObserved(handle(object->map()));
} else {
new_map = Map::Copy(handle(object->map()));
- new_map->set_is_observed(true);
+ new_map->set_is_observed();
}
object->set_map(*new_map);
}
-Handle<JSObject> JSObject::Copy(Handle<JSObject> object,
- Handle<AllocationSite> site) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- isolate->heap()->CopyJSObject(*object, *site), JSObject);
-}
-
-
Handle<JSObject> JSObject::Copy(Handle<JSObject> object) {
Isolate* isolate = object->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
@@ -5667,256 +5671,230 @@ Handle<JSObject> JSObject::Copy(Handle<JSObject> object) {
}
+template<class ContextObject>
class JSObjectWalkVisitor {
public:
- explicit JSObjectWalkVisitor(AllocationSiteContext* site_context) :
- site_context_(site_context) {}
- virtual ~JSObjectWalkVisitor() {}
-
- Handle<JSObject> Visit(Handle<JSObject> object) {
- return StructureWalk(object);
- }
+ JSObjectWalkVisitor(ContextObject* site_context, bool copying,
+ JSObject::DeepCopyHints hints)
+ : site_context_(site_context),
+ copying_(copying),
+ hints_(hints) {}
- virtual bool is_copying() = 0;
-
- protected:
Handle<JSObject> StructureWalk(Handle<JSObject> object);
- // The returned handle will be used for the object in all subsequent usages.
- // This allows VisitObject to make a copy of the object if desired.
- virtual Handle<JSObject> VisitObject(Handle<JSObject> object) = 0;
- virtual Handle<JSObject> VisitElementOrProperty(Handle<JSObject> object,
- Handle<JSObject> value) = 0;
-
- AllocationSiteContext* site_context() { return site_context_; }
-
- private:
- AllocationSiteContext* site_context_;
-};
-
-
-class JSObjectCopyVisitor: public JSObjectWalkVisitor {
- public:
- explicit JSObjectCopyVisitor(AllocationSiteContext* site_context)
- : JSObjectWalkVisitor(site_context) {}
-
- virtual bool is_copying() V8_OVERRIDE { return true; }
-
- // The returned handle will be used for the object in all
- // subsequent usages. This allows VisitObject to make a copy
- // of the object if desired.
- virtual Handle<JSObject> VisitObject(Handle<JSObject> object) V8_OVERRIDE {
- // Only create a memento if
- // 1) we have a JSArray, and
- // 2) the elements kind is palatable
- // 3) allow_mementos is true
- Handle<JSObject> copy;
- if (site_context()->activated() &&
- AllocationSite::CanTrack(object->map()->instance_type()) &&
- AllocationSite::GetMode(object->GetElementsKind()) ==
- TRACK_ALLOCATION_SITE) {
- copy = JSObject::Copy(object, site_context()->current());
- } else {
- copy = JSObject::Copy(object);
- }
-
- return copy;
- }
-
- virtual Handle<JSObject> VisitElementOrProperty(
- Handle<JSObject> object,
- Handle<JSObject> value) V8_OVERRIDE {
+ protected:
+ inline Handle<JSObject> VisitElementOrProperty(Handle<JSObject> object,
+ Handle<JSObject> value) {
Handle<AllocationSite> current_site = site_context()->EnterNewScope();
Handle<JSObject> copy_of_value = StructureWalk(value);
site_context()->ExitScope(current_site, value);
return copy_of_value;
}
-};
+ inline ContextObject* site_context() { return site_context_; }
+ inline Isolate* isolate() { return site_context()->isolate(); }
-class JSObjectCreateAllocationSitesVisitor: public JSObjectWalkVisitor {
- public:
- explicit JSObjectCreateAllocationSitesVisitor(
- AllocationSiteContext* site_context)
- : JSObjectWalkVisitor(site_context) {}
+ inline bool copying() const { return copying_; }
- virtual bool is_copying() V8_OVERRIDE { return false; }
+ private:
+ ContextObject* site_context_;
+ const bool copying_;
+ const JSObject::DeepCopyHints hints_;
+};
- // The returned handle will be used for the object in all
- // subsequent usages. This allows VisitObject to make a copy
- // of the object if desired.
- virtual Handle<JSObject> VisitObject(Handle<JSObject> object) V8_OVERRIDE {
- return object;
- }
- virtual Handle<JSObject> VisitElementOrProperty(
- Handle<JSObject> object,
- Handle<JSObject> value) V8_OVERRIDE {
- Handle<AllocationSite> current_site = site_context()->EnterNewScope();
- value = StructureWalk(value);
- site_context()->ExitScope(current_site, value);
- return value;
- }
-};
+template <class ContextObject>
+Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
+ Handle<JSObject> object) {
+ Isolate* isolate = this->isolate();
+ bool copying = this->copying();
+ bool shallow = hints_ == JSObject::kObjectIsShallowArray;
+ if (!shallow) {
+ StackLimitCheck check(isolate);
-Handle<JSObject> JSObjectWalkVisitor::StructureWalk(Handle<JSObject> object) {
- bool copying = is_copying();
- Isolate* isolate = object->GetIsolate();
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) {
- isolate->StackOverflow();
- return Handle<JSObject>::null();
+ if (check.HasOverflowed()) {
+ isolate->StackOverflow();
+ return Handle<JSObject>::null();
+ }
}
if (object->map()->is_deprecated()) {
JSObject::MigrateInstance(object);
}
- Handle<JSObject> copy = VisitObject(object);
+ Handle<JSObject> copy;
+ if (copying) {
+ Handle<AllocationSite> site_to_pass;
+ if (site_context()->ShouldCreateMemento(object)) {
+ site_to_pass = site_context()->current();
+ }
+ CALL_AND_RETRY_OR_DIE(isolate,
+ isolate->heap()->CopyJSObject(*object,
+ site_to_pass.is_null() ? NULL : *site_to_pass),
+ { copy = Handle<JSObject>(JSObject::cast(__object__),
+ isolate);
+ break;
+ },
+ return Handle<JSObject>());
+ } else {
+ copy = object;
+ }
+
ASSERT(copying || copy.is_identical_to(object));
- HandleScope scope(isolate);
+ ElementsKind kind = copy->GetElementsKind();
+ if (copying && IsFastSmiOrObjectElementsKind(kind) &&
+ FixedArray::cast(copy->elements())->map() ==
+ isolate->heap()->fixed_cow_array_map()) {
+ isolate->counters()->cow_arrays_created_runtime()->Increment();
+ }
- // Deep copy local properties.
- if (copy->HasFastProperties()) {
- Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors());
- int limit = copy->map()->NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != FIELD) continue;
- int index = descriptors->GetFieldIndex(i);
- Handle<Object> value(object->RawFastPropertyAt(index), isolate);
- if (value->IsJSObject()) {
- value = VisitElementOrProperty(copy, Handle<JSObject>::cast(value));
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<JSObject>());
- } else {
- Representation representation = details.representation();
- value = NewStorageFor(isolate, value, representation);
- }
- if (copying) {
- copy->FastPropertyAtPut(index, *value);
- }
- }
- } else {
- Handle<FixedArray> names =
- isolate->factory()->NewFixedArray(copy->NumberOfLocalProperties());
- copy->GetLocalPropertyNames(*names, 0);
- for (int i = 0; i < names->length(); i++) {
- ASSERT(names->get(i)->IsString());
- Handle<String> key_string(String::cast(names->get(i)));
- PropertyAttributes attributes =
- copy->GetLocalPropertyAttribute(*key_string);
- // Only deep copy fields from the object literal expression.
- // In particular, don't try to copy the length attribute of
- // an array.
- if (attributes != NONE) continue;
- Handle<Object> value(
- copy->GetProperty(*key_string, &attributes)->ToObjectUnchecked(),
- isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> result = VisitElementOrProperty(
- copy, Handle<JSObject>::cast(value));
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (!shallow) {
+ HandleScope scope(isolate);
+
+ // Deep copy local properties.
+ if (copy->HasFastProperties()) {
+ Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors());
+ int limit = copy->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ int index = descriptors->GetFieldIndex(i);
+ Handle<Object> value(object->RawFastPropertyAt(index), isolate);
+ if (value->IsJSObject()) {
+ value = VisitElementOrProperty(copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<JSObject>());
+ } else {
+ Representation representation = details.representation();
+ value = NewStorageFor(isolate, value, representation);
+ }
if (copying) {
- // Creating object copy for literals. No strict mode needed.
- CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetProperty(
- copy, key_string, result, NONE, kNonStrictMode));
+ copy->FastPropertyAtPut(index, *value);
+ }
+ }
+ } else {
+ Handle<FixedArray> names =
+ isolate->factory()->NewFixedArray(copy->NumberOfLocalProperties());
+ copy->GetLocalPropertyNames(*names, 0);
+ for (int i = 0; i < names->length(); i++) {
+ ASSERT(names->get(i)->IsString());
+ Handle<String> key_string(String::cast(names->get(i)));
+ PropertyAttributes attributes =
+ copy->GetLocalPropertyAttribute(*key_string);
+ // Only deep copy fields from the object literal expression.
+ // In particular, don't try to copy the length attribute of
+ // an array.
+ if (attributes != NONE) continue;
+ Handle<Object> value(
+ copy->GetProperty(*key_string, &attributes)->ToObjectUnchecked(),
+ isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> result = VisitElementOrProperty(
+ copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (copying) {
+ // Creating object copy for literals. No strict mode needed.
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetProperty(
+ copy, key_string, result, NONE, kNonStrictMode));
+ }
}
}
}
- }
- // Deep copy local elements.
- // Pixel elements cannot be created using an object literal.
- ASSERT(!copy->HasExternalArrayElements());
- switch (copy->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
- if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
- isolate->counters()->cow_arrays_created_runtime()->Increment();
+ // Deep copy local elements.
+ // Pixel elements cannot be created using an object literal.
+ ASSERT(!copy->HasExternalArrayElements());
+ switch (kind) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
+ if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
#ifdef DEBUG
- for (int i = 0; i < elements->length(); i++) {
- ASSERT(!elements->get(i)->IsJSObject());
- }
+ for (int i = 0; i < elements->length(); i++) {
+ ASSERT(!elements->get(i)->IsJSObject());
+ }
#endif
- } else {
- for (int i = 0; i < elements->length(); i++) {
- Handle<Object> value(elements->get(i), isolate);
- ASSERT(value->IsSmi() ||
- value->IsTheHole() ||
- (IsFastObjectElementsKind(copy->GetElementsKind())));
- if (value->IsJSObject()) {
- Handle<JSObject> result = VisitElementOrProperty(
- copy, Handle<JSObject>::cast(value));
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
- if (copying) {
- elements->set(i, *result);
+ } else {
+ for (int i = 0; i < elements->length(); i++) {
+ Handle<Object> value(elements->get(i), isolate);
+ ASSERT(value->IsSmi() ||
+ value->IsTheHole() ||
+ (IsFastObjectElementsKind(copy->GetElementsKind())));
+ if (value->IsJSObject()) {
+ Handle<JSObject> result = VisitElementOrProperty(
+ copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (copying) {
+ elements->set(i, *result);
+ }
}
}
}
+ break;
}
- break;
- }
- case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> element_dictionary(
- copy->element_dictionary());
- int capacity = element_dictionary->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = element_dictionary->KeyAt(i);
- if (element_dictionary->IsKey(k)) {
- Handle<Object> value(element_dictionary->ValueAt(i), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> result = VisitElementOrProperty(
- copy, Handle<JSObject>::cast(value));
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
- if (copying) {
- element_dictionary->ValueAtPut(i, *result);
+ case DICTIONARY_ELEMENTS: {
+ Handle<SeededNumberDictionary> element_dictionary(
+ copy->element_dictionary());
+ int capacity = element_dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = element_dictionary->KeyAt(i);
+ if (element_dictionary->IsKey(k)) {
+ Handle<Object> value(element_dictionary->ValueAt(i), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> result = VisitElementOrProperty(
+ copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (copying) {
+ element_dictionary->ValueAtPut(i, *result);
+ }
}
}
}
+ break;
}
- break;
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
+
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ // No contained objects, nothing to do.
+ break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNIMPLEMENTED();
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- // No contained objects, nothing to do.
- break;
}
+
return copy;
}
-Handle<JSObject> JSObject::DeepWalk(Handle<JSObject> object,
- AllocationSiteContext* site_context) {
- JSObjectCreateAllocationSitesVisitor v(site_context);
- Handle<JSObject> result = v.Visit(object);
- ASSERT(!v.is_copying() &&
- (result.is_null() || result.is_identical_to(object)));
+Handle<JSObject> JSObject::DeepWalk(
+ Handle<JSObject> object,
+ AllocationSiteCreationContext* site_context) {
+ JSObjectWalkVisitor<AllocationSiteCreationContext> v(site_context, false,
+ kNoHints);
+ Handle<JSObject> result = v.StructureWalk(object);
+ ASSERT(result.is_null() || result.is_identical_to(object));
return result;
}
Handle<JSObject> JSObject::DeepCopy(Handle<JSObject> object,
- AllocationSiteContext* site_context) {
- JSObjectCopyVisitor v(site_context);
- Handle<JSObject> copy = v.Visit(object);
- ASSERT(v.is_copying() && !copy.is_identical_to(object));
+ AllocationSiteUsageContext* site_context,
+ DeepCopyHints hints) {
+ JSObjectWalkVisitor<AllocationSiteUsageContext> v(site_context, true, hints);
+ Handle<JSObject> copy = v.StructureWalk(object);
+ ASSERT(!copy.is_identical_to(object));
return copy;
}
@@ -5934,7 +5912,7 @@ bool JSReceiver::IsSimpleEnum() {
if (!o->IsJSObject()) return false;
JSObject* curr = JSObject::cast(o);
int enum_length = curr->map()->EnumLength();
- if (enum_length == Map::kInvalidEnumCache) return false;
+ if (enum_length == kInvalidEnumCacheSentinel) return false;
ASSERT(!curr->HasNamedInterceptor());
ASSERT(!curr->HasIndexedInterceptor());
ASSERT(!curr->IsAccessCheckNeeded());
@@ -5945,6 +5923,24 @@ bool JSReceiver::IsSimpleEnum() {
}
+static bool FilterKey(Object* key, PropertyAttributes filter) {
+ if ((filter & SYMBOLIC) && key->IsSymbol()) {
+ return true;
+ }
+
+ if ((filter & PRIVATE_SYMBOL) &&
+ key->IsSymbol() && Symbol::cast(key)->is_private()) {
+ return true;
+ }
+
+ if ((filter & STRING) && !key->IsSymbol()) {
+ return true;
+ }
+
+ return false;
+}
+
+
int Map::NumberOfDescribedProperties(DescriptorFlag which,
PropertyAttributes filter) {
int result = 0;
@@ -5954,7 +5950,7 @@ int Map::NumberOfDescribedProperties(DescriptorFlag which,
: NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
if ((descs->GetDetails(i).attributes() & filter) == 0 &&
- ((filter & SYMBOLIC) == 0 || !descs->GetKey(i)->IsSymbol())) {
+ !FilterKey(descs->GetKey(i), filter)) {
result++;
}
}
@@ -6101,17 +6097,16 @@ void JSObject::DefineElementAccessor(Handle<JSObject> object,
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
// Ignore getters and setters on pixel and external array elements.
return;
+
case DICTIONARY_ELEMENTS:
if (UpdateGetterSetterInDictionary(object->element_dictionary(),
index,
@@ -6188,8 +6183,7 @@ void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
bool only_attribute_changes = getter->IsNull() && setter->IsNull();
if (object->HasFastProperties() && !only_attribute_changes &&
access_control == v8::DEFAULT &&
- (object->map()->NumberOfOwnDescriptors() <
- DescriptorArray::kMaxNumberOfDescriptors)) {
+ (object->map()->NumberOfOwnDescriptors() <= kMaxNumberOfDescriptors)) {
bool getterOk = getter->IsNull() ||
DefineFastAccessor(object, name, ACCESSOR_GETTER, getter, attributes);
bool setterOk = !getterOk || setter->IsNull() ||
@@ -6230,6 +6224,31 @@ bool JSObject::CanSetCallback(Name* name) {
}
+bool Map::DictionaryElementsInPrototypeChainOnly() {
+ Heap* heap = GetHeap();
+
+ if (IsDictionaryElementsKind(elements_kind())) {
+ return false;
+ }
+
+ for (Object* prototype = this->prototype();
+ prototype != heap->null_value();
+ prototype = prototype->GetPrototype(GetIsolate())) {
+ if (prototype->IsJSProxy()) {
+ // Be conservative, don't walk into proxies.
+ return true;
+ }
+
+ if (IsDictionaryElementsKind(
+ JSObject::cast(prototype)->map()->elements_kind())) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
void JSObject::SetElementCallback(Handle<JSObject> object,
uint32_t index,
Handle<Object> structure,
@@ -6238,10 +6257,10 @@ void JSObject::SetElementCallback(Handle<JSObject> object,
PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
// Normalize elements to make this operation simple.
+ bool had_dictionary_elements = object->HasDictionaryElements();
Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
ASSERT(object->HasDictionaryElements() ||
object->HasDictionaryArgumentsElements());
-
// Update the dictionary with the new CALLBACKS property.
dictionary = SeededNumberDictionary::Set(dictionary, index, structure,
details);
@@ -6261,6 +6280,11 @@ void JSObject::SetElementCallback(Handle<JSObject> object,
parameter_map->set(1, *dictionary);
} else {
object->set_elements(*dictionary);
+
+ if (!had_dictionary_elements) {
+ // KeyedStoreICs (at least the non-generic ones) need a reset.
+ heap->ClearAllICsByKind(Code::KEYED_STORE_IC);
+ }
}
}
@@ -6360,7 +6384,7 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
}
if (is_observed) {
- const char* type = preexists ? "reconfigured" : "new";
+ const char* type = preexists ? "reconfigure" : "add";
EnqueueChangeRecord(object, type, name, old_value);
}
}
@@ -6530,18 +6554,17 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
// Ignore getters and setters on pixel and external array
// elements.
return factory->undefined_value();
+
case DICTIONARY_ELEMENTS:
break;
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -6676,9 +6699,12 @@ MaybeObject* Map::RawCopy(int instance_size) {
int new_bit_field3 = bit_field3();
new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true);
new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
- new_bit_field3 = EnumLengthBits::update(new_bit_field3, kInvalidEnumCache);
+ new_bit_field3 = EnumLengthBits::update(new_bit_field3,
+ kInvalidEnumCacheSentinel);
new_bit_field3 = Deprecated::update(new_bit_field3, false);
- new_bit_field3 = IsUnstable::update(new_bit_field3, false);
+ if (!is_dictionary_map()) {
+ new_bit_field3 = IsUnstable::update(new_bit_field3, false);
+ }
result->set_bit_field3(new_bit_field3);
return result;
}
@@ -6951,7 +6977,7 @@ Handle<Map> Map::CopyForObserved(Handle<Map> map) {
map->set_transitions(*transitions);
- new_map->set_is_observed(true);
+ new_map->set_is_observed();
if (map->owns_descriptors()) {
new_map->InitializeDescriptors(map->instance_descriptors());
@@ -7830,6 +7856,14 @@ MaybeObject* PolymorphicCodeCacheHashTable::Put(MapHandleList* maps,
}
+void FixedArray::Shrink(int new_length) {
+ ASSERT(0 <= new_length && new_length <= length());
+ if (new_length < length()) {
+ RightTrimFixedArray<FROM_MUTATOR>(GetHeap(), this, length() - new_length);
+ }
+}
+
+
MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_result =
@@ -8319,11 +8353,6 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
}
-const uc16* String::GetTwoByteData() {
- return GetTwoByteData(0);
-}
-
-
const uc16* String::GetTwoByteData(unsigned start) {
ASSERT(!IsOneByteRepresentationUnderneath());
switch (StringShape(this).representation_tag()) {
@@ -9180,37 +9209,6 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
}
-AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object,
- bool in_GC) {
- // Currently, AllocationMemento objects are only allocated immediately
- // after JSArrays in NewSpace, and detecting whether a JSArray has one
- // involves carefully checking the object immediately after the JSArray
- // (if there is one) to see if it's an AllocationMemento.
- if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) {
- Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) +
- object->Size();
- Address top;
- if (in_GC) {
- top = object->GetHeap()->new_space()->FromSpacePageHigh();
- } else {
- top = object->GetHeap()->NewSpaceTop();
- }
- if ((ptr_end + AllocationMemento::kSize) <= top) {
- // There is room in newspace for allocation info. Do we have some?
- Map** possible_allocation_memento_map =
- reinterpret_cast<Map**>(ptr_end);
- if (*possible_allocation_memento_map ==
- object->GetHeap()->allocation_memento_map()) {
- AllocationMemento* memento = AllocationMemento::cast(
- reinterpret_cast<Object*>(ptr_end + kHeapObjectTag));
- return memento;
- }
- }
- }
- return NULL;
-}
-
-
uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
// For array indexes mix the length into the hash as an array index could
// be zero.
@@ -9291,14 +9289,6 @@ uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars,
}
-MaybeObject* String::SubString(int start, int end, PretenureFlag pretenure) {
- Heap* heap = GetHeap();
- if (start == 0 && end == length()) return this;
- MaybeObject* result = heap->AllocateSubString(this, start, end, pretenure);
- return result;
-}
-
-
void String::PrintOn(FILE* file) {
int length = this->length();
for (int i = 0; i < length; i++) {
@@ -9309,7 +9299,7 @@ void String::PrintOn(FILE* file) {
static void TrimEnumCache(Heap* heap, Map* map, DescriptorArray* descriptors) {
int live_enum = map->EnumLength();
- if (live_enum == Map::kInvalidEnumCache) {
+ if (live_enum == kInvalidEnumCacheSentinel) {
live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
}
if (live_enum == 0) return descriptors->ClearEnumCache();
@@ -9458,12 +9448,14 @@ bool Map::EquivalentToForNormalization(Map* other,
void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) {
- int first_ptr_offset = OffsetOfElementAt(first_ptr_index());
- int last_ptr_offset =
- OffsetOfElementAt(first_ptr_index() + count_of_ptr_entries());
- v->VisitPointers(
- HeapObject::RawField(this, first_ptr_offset),
- HeapObject::RawField(this, last_ptr_offset));
+ if (count_of_ptr_entries() > 0) {
+ int first_ptr_offset = OffsetOfElementAt(first_ptr_index());
+ int last_ptr_offset =
+ OffsetOfElementAt(first_ptr_index() + count_of_ptr_entries() - 1);
+ v->VisitPointers(
+ HeapObject::RawField(this, first_ptr_offset),
+ HeapObject::RawField(this, last_ptr_offset));
+ }
}
@@ -9476,115 +9468,99 @@ void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
}
-void JSFunction::MarkForLazyRecompilation() {
+void JSFunction::MarkForOptimization() {
ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
ASSERT(!shared()->is_generator());
set_code_no_write_barrier(
- GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile));
+ GetIsolate()->builtins()->builtin(Builtins::kCompileOptimized));
// No write barrier required, since the builtin is part of the root set.
}
-void JSFunction::MarkForConcurrentRecompilation() {
+void JSFunction::MarkForConcurrentOptimization() {
ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
ASSERT(!shared()->is_generator());
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(GetIsolate()->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
PrintName();
PrintF(" for concurrent recompilation.\n");
}
set_code_no_write_barrier(
- GetIsolate()->builtins()->builtin(Builtins::kConcurrentRecompile));
+ GetIsolate()->builtins()->builtin(Builtins::kCompileOptimizedConcurrent));
// No write barrier required, since the builtin is part of the root set.
}
-void JSFunction::MarkInRecompileQueue() {
+void JSFunction::MarkInOptimizationQueue() {
// We can only arrive here via the concurrent-recompilation builtin. If
// break points were set, the code would point to the lazy-compile builtin.
ASSERT(!GetIsolate()->DebuggerHasBreakPoints());
- ASSERT(IsMarkedForConcurrentRecompilation() && !IsOptimized());
+ ASSERT(IsMarkedForConcurrentOptimization() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(GetIsolate()->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queueing ");
PrintName();
PrintF(" for concurrent recompilation.\n");
}
set_code_no_write_barrier(
- GetIsolate()->builtins()->builtin(Builtins::kInRecompileQueue));
+ GetIsolate()->builtins()->builtin(Builtins::kInOptimizationQueue));
// No write barrier required, since the builtin is part of the root set.
}
-static bool CompileLazyHelper(CompilationInfo* info,
- ClearExceptionFlag flag) {
- // Compile the source information to a code object.
- ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
- ASSERT(!info->isolate()->has_pending_exception());
- bool result = Compiler::CompileLazy(info);
- ASSERT(result != info->isolate()->has_pending_exception());
- if (!result && flag == CLEAR_EXCEPTION) {
- info->isolate()->clear_pending_exception();
- }
- return result;
-}
-
-
-bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag) {
- ASSERT(shared->allows_lazy_compilation_without_context());
- CompilationInfoWithZone info(shared);
- return CompileLazyHelper(&info, flag);
-}
-
-
void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
- Handle<FixedArray> literals) {
+ Handle<FixedArray> literals,
+ BailoutId osr_ast_id) {
CALL_HEAP_FUNCTION_VOID(
shared->GetIsolate(),
- shared->AddToOptimizedCodeMap(*native_context, *code, *literals));
+ shared->AddToOptimizedCodeMap(
+ *native_context, *code, *literals, osr_ast_id));
}
MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
Code* code,
- FixedArray* literals) {
+ FixedArray* literals,
+ BailoutId osr_ast_id) {
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(native_context->IsNativeContext());
- STATIC_ASSERT(kEntryLength == 3);
+ STATIC_ASSERT(kEntryLength == 4);
Heap* heap = GetHeap();
FixedArray* new_code_map;
Object* value = optimized_code_map();
+ Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
if (value->IsSmi()) {
// No optimized code map.
ASSERT_EQ(0, Smi::cast(value)->value());
// Create 3 entries per context {context, code, literals}.
MaybeObject* maybe = heap->AllocateFixedArray(kInitialLength);
if (!maybe->To(&new_code_map)) return maybe;
- new_code_map->set(kEntriesStart + 0, native_context);
- new_code_map->set(kEntriesStart + 1, code);
- new_code_map->set(kEntriesStart + 2, literals);
+ new_code_map->set(kEntriesStart + kContextOffset, native_context);
+ new_code_map->set(kEntriesStart + kCachedCodeOffset, code);
+ new_code_map->set(kEntriesStart + kLiteralsOffset, literals);
+ new_code_map->set(kEntriesStart + kOsrAstIdOffset, osr_ast_id_smi);
} else {
// Copy old map and append one new entry.
FixedArray* old_code_map = FixedArray::cast(value);
- ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context));
+ ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context, osr_ast_id));
int old_length = old_code_map->length();
int new_length = old_length + kEntryLength;
MaybeObject* maybe = old_code_map->CopySize(new_length);
if (!maybe->To(&new_code_map)) return maybe;
- new_code_map->set(old_length + 0, native_context);
- new_code_map->set(old_length + 1, code);
- new_code_map->set(old_length + 2, literals);
+ new_code_map->set(old_length + kContextOffset, native_context);
+ new_code_map->set(old_length + kCachedCodeOffset, code);
+ new_code_map->set(old_length + kLiteralsOffset, literals);
+ new_code_map->set(old_length + kOsrAstIdOffset, osr_ast_id_smi);
// Zap the old map for the sake of the heap verifier.
if (Heap::ShouldZapGarbage()) {
Object** data = old_code_map->data_start();
@@ -9593,11 +9569,12 @@ MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
}
#ifdef DEBUG
for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
- ASSERT(new_code_map->get(i)->IsNativeContext());
- ASSERT(new_code_map->get(i + 1)->IsCode());
- ASSERT(Code::cast(new_code_map->get(i + 1))->kind() ==
+ ASSERT(new_code_map->get(i + kContextOffset)->IsNativeContext());
+ ASSERT(new_code_map->get(i + kCachedCodeOffset)->IsCode());
+ ASSERT(Code::cast(new_code_map->get(i + kCachedCodeOffset))->kind() ==
Code::OPTIMIZED_FUNCTION);
- ASSERT(new_code_map->get(i + 2)->IsFixedArray());
+ ASSERT(new_code_map->get(i + kLiteralsOffset)->IsFixedArray());
+ ASSERT(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
}
#endif
set_optimized_code_map(new_code_map);
@@ -9605,19 +9582,24 @@ MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
}
-void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
- int index) {
+FixedArray* SharedFunctionInfo::GetLiteralsFromOptimizedCodeMap(int index) {
ASSERT(index > kEntriesStart);
FixedArray* code_map = FixedArray::cast(optimized_code_map());
if (!bound()) {
FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
- ASSERT(cached_literals != NULL);
- function->set_literals(cached_literals);
+ ASSERT_NE(NULL, cached_literals);
+ return cached_literals;
}
+ return NULL;
+}
+
+
+Code* SharedFunctionInfo::GetCodeFromOptimizedCodeMap(int index) {
+ ASSERT(index > kEntriesStart);
+ FixedArray* code_map = FixedArray::cast(optimized_code_map());
Code* code = Code::cast(code_map->get(index));
- ASSERT(code != NULL);
- ASSERT(function->context()->native_context() == code_map->get(index - 1));
- function->ReplaceCode(code);
+ ASSERT_NE(NULL, code);
+ return code;
}
@@ -9656,9 +9638,14 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
}
}
while (i < (code_map->length() - kEntryLength)) {
- code_map->set(i, code_map->get(i + kEntryLength));
- code_map->set(i + 1, code_map->get(i + 1 + kEntryLength));
- code_map->set(i + 2, code_map->get(i + 2 + kEntryLength));
+ code_map->set(i + kContextOffset,
+ code_map->get(i + kContextOffset + kEntryLength));
+ code_map->set(i + kCachedCodeOffset,
+ code_map->get(i + kCachedCodeOffset + kEntryLength));
+ code_map->set(i + kLiteralsOffset,
+ code_map->get(i + kLiteralsOffset + kEntryLength));
+ code_map->set(i + kOsrAstIdOffset,
+ code_map->get(i + kOsrAstIdOffset + kEntryLength));
i += kEntryLength;
}
if (removed_entry) {
@@ -9683,64 +9670,6 @@ void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
}
-bool JSFunction::CompileLazy(Handle<JSFunction> function,
- ClearExceptionFlag flag) {
- bool result = true;
- if (function->shared()->is_compiled()) {
- function->ReplaceCode(function->shared()->code());
- } else {
- ASSERT(function->shared()->allows_lazy_compilation());
- CompilationInfoWithZone info(function);
- result = CompileLazyHelper(&info, flag);
- ASSERT(!result || function->is_compiled());
- }
- return result;
-}
-
-
-Handle<Code> JSFunction::CompileOsr(Handle<JSFunction> function,
- BailoutId osr_ast_id,
- ClearExceptionFlag flag) {
- CompilationInfoWithZone info(function);
- info.SetOptimizing(osr_ast_id);
- if (CompileLazyHelper(&info, flag)) {
- // TODO(titzer): don't install the OSR code.
- // ASSERT(function->code() != *info.code());
- return info.code();
- } else {
- return Handle<Code>::null();
- }
-}
-
-
-bool JSFunction::CompileOptimized(Handle<JSFunction> function,
- ClearExceptionFlag flag) {
- CompilationInfoWithZone info(function);
- info.SetOptimizing(BailoutId::None());
- return CompileLazyHelper(&info, flag);
-}
-
-
-bool JSFunction::EnsureCompiled(Handle<JSFunction> function,
- ClearExceptionFlag flag) {
- return function->is_compiled() || CompileLazy(function, flag);
-}
-
-
-bool JSFunction::IsInlineable() {
- if (IsBuiltin()) return false;
- SharedFunctionInfo* shared_info = shared();
- // Check that the function has a script associated with it.
- if (!shared_info->script()->IsScript()) return false;
- if (shared_info->optimization_disabled()) return false;
- Code* code = shared_info->code();
- if (code->kind() == Code::OPTIMIZED_FUNCTION) return true;
- // If we never ran this (unlikely) then lets try to optimize it.
- if (code->kind() != Code::FUNCTION) return true;
- return code->optimizable();
-}
-
-
void JSObject::OptimizeAsPrototype(Handle<JSObject> object) {
if (object->IsGlobalObject()) return;
@@ -9884,6 +9813,48 @@ void JSFunction::RemovePrototype() {
}
+void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
+ if (function->has_initial_map()) return;
+ Isolate* isolate = function->GetIsolate();
+
+ // First create a new map with the size and number of in-object properties
+ // suggested by the function.
+ InstanceType instance_type;
+ int instance_size;
+ int in_object_properties;
+ if (function->shared()->is_generator()) {
+ instance_type = JS_GENERATOR_OBJECT_TYPE;
+ instance_size = JSGeneratorObject::kSize;
+ in_object_properties = 0;
+ } else {
+ instance_type = JS_OBJECT_TYPE;
+ instance_size = function->shared()->CalculateInstanceSize();
+ in_object_properties = function->shared()->CalculateInObjectProperties();
+ }
+ Handle<Map> map = isolate->factory()->NewMap(instance_type, instance_size);
+
+ // Fetch or allocate prototype.
+ Handle<Object> prototype;
+ if (function->has_instance_prototype()) {
+ prototype = handle(function->instance_prototype(), isolate);
+ } else {
+ prototype = isolate->factory()->NewFunctionPrototype(function);
+ }
+ map->set_inobject_properties(in_object_properties);
+ map->set_unused_property_fields(in_object_properties);
+ map->set_prototype(*prototype);
+ ASSERT(map->has_fast_object_elements());
+
+ if (!function->shared()->is_generator()) {
+ function->shared()->StartInobjectSlackTracking(*map);
+ }
+
+ // Finally link initial map and constructor function.
+ function->set_initial_map(*map);
+ map->set_constructor(*function);
+}
+
+
void JSFunction::SetInstanceClassName(String* name) {
shared()->set_instance_class_name(name);
}
@@ -9891,7 +9862,7 @@ void JSFunction::SetInstanceClassName(String* name) {
void JSFunction::PrintName(FILE* out) {
SmartArrayPointer<char> name = shared()->DebugName()->ToCString();
- PrintF(out, "%s", *name);
+ PrintF(out, "%s", name.get());
}
@@ -9913,11 +9884,18 @@ bool JSFunction::PassesFilter(const char* raw_filter) {
Vector<const char> filter = CStrVector(raw_filter);
if (filter.length() == 0) return name->length() == 0;
if (filter[0] == '-') {
+ // Negative filter.
if (filter.length() == 1) {
return (name->length() != 0);
- } else if (!name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
- return true;
+ } else if (name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+ return false;
+ }
+ if (filter[filter.length() - 1] == '*' &&
+ name->IsUtf8EqualTo(filter.SubVector(1, filter.length() - 1), true)) {
+ return false;
}
+ return true;
+
} else if (name->IsUtf8EqualTo(filter)) {
return true;
}
@@ -9967,6 +9945,16 @@ Handle<Object> SharedFunctionInfo::GetSourceCode() {
}
+bool SharedFunctionInfo::IsInlineable() {
+ // Check that the function has a script associated with it.
+ if (!script()->IsScript()) return false;
+ if (optimization_disabled()) return false;
+ // If we never ran this (unlikely) then lets try to optimize it.
+ if (code()->kind() != Code::FUNCTION) return true;
+ return code()->optimizable();
+}
+
+
int SharedFunctionInfo::SourceSize() {
return end_position() - start_position();
}
@@ -10226,16 +10214,19 @@ void SharedFunctionInfo::CompleteInobjectSlackTracking() {
}
-int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context) {
+int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context,
+ BailoutId osr_ast_id) {
ASSERT(native_context->IsNativeContext());
if (!FLAG_cache_optimized_code) return -1;
Object* value = optimized_code_map();
if (!value->IsSmi()) {
FixedArray* optimized_code_map = FixedArray::cast(value);
int length = optimized_code_map->length();
+ Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
for (int i = kEntriesStart; i < length; i += kEntryLength) {
- if (optimized_code_map->get(i) == native_context) {
- return i + 1;
+ if (optimized_code_map->get(i + kContextOffset) == native_context &&
+ optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
+ return i + kCachedCodeOffset;
}
}
if (FLAG_trace_opt) {
@@ -10317,13 +10308,14 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- VisitPointer(rinfo->target_object_address());
+ Object* p = rinfo->target_object();
+ VisitPointer(&p);
}
void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
- Address* p = rinfo->target_reference_address();
- VisitExternalReference(p);
+ Address p = rinfo->target_reference();
+ VisitExternalReference(&p);
}
@@ -10334,11 +10326,15 @@ void Code::InvalidateRelocation() {
void Code::InvalidateEmbeddedObjects() {
Object* undefined = GetHeap()->undefined_value();
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ Cell* undefined_cell = GetHeap()->undefined_cell();
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
+ } else if (mode == RelocInfo::CELL) {
+ it.rinfo()->set_target_cell(undefined_cell, SKIP_WRITE_BARRIER);
}
}
}
@@ -10483,6 +10479,12 @@ Object* Code::FindNthObject(int n, Map* match_map) {
}
+AllocationSite* Code::FindFirstAllocationSite() {
+ Object* result = FindNthObject(1, GetHeap()->allocation_site_map());
+ return (result != NULL) ? AllocationSite::cast(result) : NULL;
+}
+
+
Map* Code::FindFirstMap() {
Object* result = FindNthObject(1, GetHeap()->meta_map());
return (result != NULL) ? Map::cast(result) : NULL;
@@ -10492,7 +10494,7 @@ Map* Code::FindFirstMap() {
void Code::ReplaceNthObject(int n,
Map* match_map,
Object* replace_with) {
- ASSERT(is_inline_cache_stub());
+ ASSERT(is_inline_cache_stub() || is_handler());
DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
@@ -10518,7 +10520,22 @@ void Code::FindAllMaps(MapHandleList* maps) {
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Object* object = info->target_object();
- if (object->IsMap()) maps->Add(Handle<Map>(Map::cast(object)));
+ if (object->IsMap()) maps->Add(handle(Map::cast(object)));
+ }
+}
+
+
+void Code::FindAllTypes(TypeHandleList* types) {
+ ASSERT(is_inline_cache_stub());
+ DisallowHeapAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Object* object = info->target_object();
+ if (object->IsMap()) {
+ Handle<Map> map(Map::cast(object));
+ types->Add(IC::MapToType<HeapType>(map, map->GetIsolate()));
+ }
}
}
@@ -10589,32 +10606,43 @@ void Code::ReplaceNthCell(int n, Cell* replace_with) {
void Code::ClearInlineCaches() {
+ ClearInlineCaches(NULL);
+}
+
+
+void Code::ClearInlineCaches(Code::Kind kind) {
+ ClearInlineCaches(&kind);
+}
+
+
+void Code::ClearInlineCaches(Code::Kind* kind) {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID) |
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET_CONTEXT);
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
if (target->is_inline_cache_stub()) {
- IC::Clear(this->GetIsolate(), info->pc());
+ if (kind == NULL || *kind == target->kind()) {
+ IC::Clear(this->GetIsolate(), info->pc());
+ }
}
}
}
-void Code::ClearTypeFeedbackCells(Heap* heap) {
+void Code::ClearTypeFeedbackInfo(Heap* heap) {
if (kind() != FUNCTION) return;
Object* raw_info = type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
- TypeFeedbackCells* type_feedback_cells =
- TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
- for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
- Cell* cell = type_feedback_cells->GetCell(i);
- // Don't clear AllocationSites
- Object* value = cell->value();
- if (value == NULL || !value->IsAllocationSite()) {
- cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
+ FixedArray* feedback_vector =
+ TypeFeedbackInfo::cast(raw_info)->feedback_vector();
+ for (int i = 0; i < feedback_vector->length(); i++) {
+ Object* obj = feedback_vector->get(i);
+ if (!obj->IsAllocationSite()) {
+ // TODO(mvstanton): Can't I avoid a write barrier for this sentinel?
+ feedback_vector->set(i,
+ TypeFeedbackInfo::RawUninitializedSentinel(heap));
}
}
}
@@ -10632,6 +10660,18 @@ BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
}
+uint32_t Code::TranslateAstIdToPcOffset(BailoutId ast_id) {
+ DisallowHeapAllocation no_gc;
+ ASSERT(kind() == FUNCTION);
+ BackEdgeTable back_edges(this, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (back_edges.ast_id(i) == ast_id) return back_edges.pc_offset(i);
+ }
+ UNREACHABLE(); // We expect to find the back edge.
+ return 0;
+}
+
+
void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge, NO_MARKING_PARITY);
}
@@ -10643,12 +10683,25 @@ void Code::MarkCodeAsExecuted(byte* sequence, Isolate* isolate) {
}
+static Code::Age EffectiveAge(Code::Age age) {
+ if (age == Code::kNotExecutedCodeAge) {
+ // Treat that's never been executed as old immediately.
+ age = Code::kIsOldCodeAge;
+ } else if (age == Code::kExecutedOnceCodeAge) {
+ // Pre-age code that has only been executed once.
+ age = Code::kPreAgedCodeAge;
+ }
+ return age;
+}
+
+
void Code::MakeOlder(MarkingParity current_parity) {
byte* sequence = FindCodeAgeSequence();
if (sequence != NULL) {
Age age;
MarkingParity code_parity;
GetCodeAgeAndParity(sequence, &age, &code_parity);
+ age = EffectiveAge(age);
if (age != kLastCodeAge && code_parity != current_parity) {
PatchPlatformCodeAge(GetIsolate(),
sequence,
@@ -10660,8 +10713,7 @@ void Code::MakeOlder(MarkingParity current_parity) {
bool Code::IsOld() {
- Age age = GetAge();
- return age >= kIsOldCodeAge;
+ return GetAge() >= kIsOldCodeAge;
}
@@ -10676,9 +10728,14 @@ byte* Code::FindCodeAgeSequence() {
Code::Age Code::GetAge() {
+ return EffectiveAge(GetRawAge());
+}
+
+
+Code::Age Code::GetRawAge() {
byte* sequence = FindCodeAgeSequence();
if (sequence == NULL) {
- return Code::kNoAgeCodeAge;
+ return kNoAgeCodeAge;
}
Age age;
MarkingParity parity;
@@ -10709,15 +10766,13 @@ void Code::GetCodeAgeAndParity(Code* code, Age* age,
#undef HANDLE_CODE_AGE
stub = *builtins->MarkCodeAsExecutedOnce();
if (code == stub) {
- // Treat that's never been executed as old immediatly.
- *age = kIsOldCodeAge;
+ *age = kNotExecutedCodeAge;
*parity = NO_MARKING_PARITY;
return;
}
stub = *builtins->MarkCodeAsExecutedTwice();
if (code == stub) {
- // Pre-age code that has only been executed once.
- *age = kPreAgedCodeAge;
+ *age = kExecutedOnceCodeAge;
*parity = NO_MARKING_PARITY;
return;
}
@@ -10753,7 +10808,7 @@ Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
}
-void Code::PrintDeoptLocation(int bailout_id) {
+void Code::PrintDeoptLocation(FILE* out, int bailout_id) {
const char* last_comment = NULL;
int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
| RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
@@ -10767,7 +10822,7 @@ void Code::PrintDeoptLocation(int bailout_id) {
(bailout_id == Deoptimizer::GetDeoptimizationId(
GetIsolate(), info->target_address(), Deoptimizer::SOFT))) {
CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
- PrintF(" %s\n", last_comment);
+ PrintF(out, " %s\n", last_comment);
return;
}
}
@@ -10969,10 +11024,10 @@ void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
this->DeoptPoints());
if (this->DeoptPoints() == 0) return;
- PrintF("%6s %8s %s\n", "ast id", "pc", "state");
+ PrintF(out, "%6s %8s %s\n", "ast id", "pc", "state");
for (int i = 0; i < this->DeoptPoints(); i++) {
int pc_and_state = this->PcAndState(i)->value();
- PrintF("%6d %8d %s\n",
+ PrintF(out, "%6d %8d %s\n",
this->AstId(i).ToInt(),
FullCodeGenerator::PcField::decode(pc_and_state),
FullCodeGenerator::State2String(
@@ -11000,12 +11055,7 @@ const char* Code::ICState2String(InlineCacheState state) {
const char* Code::StubType2String(StubType type) {
switch (type) {
case NORMAL: return "NORMAL";
- case FIELD: return "FIELD";
- case CONSTANT: return "CONSTANT";
- case CALLBACKS: return "CALLBACKS";
- case INTERCEPTOR: return "INTERCEPTOR";
- case TRANSITION: return "TRANSITION";
- case NONEXISTENT: return "NONEXISTENT";
+ case FAST: return "FAST";
}
UNREACHABLE(); // keep the compiler happy
return NULL;
@@ -11016,11 +11066,6 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
PrintF(out, "extra_ic_state = ");
const char* name = NULL;
switch (kind) {
- case CALL_IC:
- if (extra == STRING_INDEX_OUT_OF_BOUNDS) {
- name = "STRING_INDEX_OUT_OF_BOUNDS";
- }
- break;
case STORE_IC:
case KEYED_STORE_IC:
if (extra == kStrictMode) {
@@ -11040,16 +11085,16 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "kind = %s\n", Kind2String(kind()));
+ if (has_major_key()) {
+ PrintF(out, "major_key = %s\n",
+ CodeStub::MajorName(CodeStub::GetMajorKey(this), true));
+ }
if (is_inline_cache_stub()) {
PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
- PrintExtraICState(out, kind(), needs_extended_extra_ic_state(kind()) ?
- extended_extra_ic_state() : extra_ic_state());
+ PrintExtraICState(out, kind(), extra_ic_state());
if (ic_state() == MONOMORPHIC) {
PrintF(out, "type = %s\n", StubType2String(type()));
}
- if (is_call_stub() || is_keyed_call_stub()) {
- PrintF(out, "argc = %d\n", arguments_count());
- }
if (is_compare_ic_stub()) {
ASSERT(major_key() == CodeStub::CompareIC);
CompareIC::State left_state, right_state, handler_state;
@@ -11083,7 +11128,7 @@ void Code::Disassemble(const char* name, FILE* out) {
DeoptimizationInputData::cast(this->deoptimization_data());
data->DeoptimizationInputDataPrint(out);
}
- PrintF("\n");
+ PrintF(out, "\n");
if (is_crankshafted()) {
SafepointTable table(this);
@@ -11091,7 +11136,7 @@ void Code::Disassemble(const char* name, FILE* out) {
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
PrintF(out, "%p %4d ", (instruction_start() + pc_offset), pc_offset);
- table.PrintEntry(i);
+ table.PrintEntry(i, out);
PrintF(out, " (sp -> fp)");
SafepointEntry entry = table.GetEntry(i);
if (entry.deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
@@ -11132,7 +11177,7 @@ void Code::Disassemble(const char* name, FILE* out) {
#endif
}
- PrintF("RelocInfo (size = %d)\n", relocation_size());
+ PrintF(out, "RelocInfo (size = %d)\n", relocation_size());
for (RelocIterator it(this); !it.done(); it.next()) {
it.rinfo()->Print(GetIsolate(), out);
}
@@ -11141,6 +11186,18 @@ void Code::Disassemble(const char* name, FILE* out) {
#endif // ENABLE_DISASSEMBLER
+Handle<FixedArray> JSObject::SetFastElementsCapacityAndLength(
+ Handle<JSObject> object,
+ int capacity,
+ int length,
+ SetFastElementsCapacitySmiMode smi_mode) {
+ CALL_HEAP_FUNCTION(
+ object->GetIsolate(),
+ object->SetFastElementsCapacityAndLength(capacity, length, smi_mode),
+ FixedArray);
+}
+
+
MaybeObject* JSObject::SetFastElementsCapacityAndLength(
int capacity,
int length,
@@ -11148,7 +11205,6 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
- ASSERT(!map()->is_observed());
// Allocate a new fast elements backing store.
FixedArray* new_elements;
@@ -11220,20 +11276,30 @@ bool Code::IsWeakEmbeddedObject(Kind kind, Object* object) {
FLAG_weak_embedded_maps_in_optimized_code;
}
- if (object->IsJSObject()) {
+ if (object->IsJSObject() ||
+ (object->IsCell() && Cell::cast(object)->value()->IsJSObject())) {
return FLAG_weak_embedded_objects_in_optimized_code;
}
return false;
}
+
+void JSObject::SetFastDoubleElementsCapacityAndLength(Handle<JSObject> object,
+ int capacity,
+ int length) {
+ CALL_HEAP_FUNCTION_VOID(
+ object->GetIsolate(),
+ object->SetFastDoubleElementsCapacityAndLength(capacity, length));
+}
+
+
MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
int capacity,
int length) {
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
- ASSERT(!map()->is_observed());
FixedArrayBase* elems;
{ MaybeObject* maybe_obj =
@@ -11382,10 +11448,6 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
if (!new_length_handle->ToArrayIndex(&new_length))
return Failure::InternalError();
- // Observed arrays should always be in dictionary mode;
- // if they were in fast mode, the below is slower than necessary
- // as it iterates over the array backing store multiple times.
- ASSERT(self->HasDictionaryElements());
static const PropertyAttributes kNoAttrFilter = NONE;
int num_elements = self->NumberOfLocalElements(kNoAttrFilter);
if (num_elements > 0) {
@@ -11396,6 +11458,8 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
}
} else {
// For sparse arrays, only iterate over existing elements.
+ // TODO(rafaelw): For fast, sparse arrays, we can avoid iterating over
+ // the to-be-removed indices twice.
Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
self->GetLocalElementKeys(*keys, kNoAttrFilter);
while (num_elements-- > 0) {
@@ -11418,11 +11482,11 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
for (int i = 0; i < indices.length(); ++i) {
JSObject::EnqueueChangeRecord(
- self, "deleted", isolate->factory()->Uint32ToString(indices[i]),
+ self, "delete", isolate->factory()->Uint32ToString(indices[i]),
old_values[i]);
}
JSObject::EnqueueChangeRecord(
- self, "updated", isolate->factory()->length_string(),
+ self, "update", isolate->factory()->length_string(),
old_length_handle);
EndPerformSplice(self);
@@ -11564,6 +11628,9 @@ DependentCode* DependentCode::ForObject(Handle<HeapObject> object,
AllowDeferredHandleDereference dependencies_are_safe;
if (group == DependentCode::kPropertyCellChangedGroup) {
return Handle<PropertyCell>::cast(object)->dependent_code();
+ } else if (group == DependentCode::kAllocationSiteTenuringChangedGroup ||
+ group == DependentCode::kAllocationSiteTransitionChangedGroup) {
+ return Handle<AllocationSite>::cast(object)->dependent_code();
}
return Handle<Map>::cast(object)->dependent_code();
}
@@ -11683,16 +11750,15 @@ bool DependentCode::Contains(DependencyGroup group, Code* code) {
}
-void DependentCode::DeoptimizeDependentCodeGroup(
+bool DependentCode::MarkCodeForDeoptimization(
Isolate* isolate,
DependentCode::DependencyGroup group) {
- ASSERT(AllowCodeDependencyChange::IsAllowed());
DisallowHeapAllocation no_allocation_scope;
DependentCode::GroupStartIndexes starts(this);
int start = starts.at(group);
int end = starts.at(group + 1);
int code_entries = starts.number_of_entries();
- if (start == end) return;
+ if (start == end) return false;
// Mark all the code that needs to be deoptimized.
bool marked = false;
@@ -11718,6 +11784,16 @@ void DependentCode::DeoptimizeDependentCodeGroup(
clear_at(i);
}
set_number_of_entries(group, 0);
+ return marked;
+}
+
+
+void DependentCode::DeoptimizeDependentCodeGroup(
+ Isolate* isolate,
+ DependentCode::DependencyGroup group) {
+ ASSERT(AllowCodeDependencyChange::IsAllowed());
+ DisallowHeapAllocation no_allocation_scope;
+ bool marked = MarkCodeForDeoptimization(isolate, group);
if (marked) Deoptimizer::DeoptimizeMarkedCode(isolate);
}
@@ -11768,6 +11844,8 @@ Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
}
}
+ bool dictionary_elements_in_chain =
+ object->map()->DictionaryElementsInPrototypeChainOnly();
Handle<JSObject> real_receiver = object;
if (skip_hidden_prototypes) {
@@ -11800,6 +11878,14 @@ Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
ASSERT(new_map->prototype() == *value);
real_receiver->set_map(*new_map);
+ if (!dictionary_elements_in_chain &&
+ new_map->DictionaryElementsInPrototypeChainOnly()) {
+ // If the prototype chain didn't previously have element callbacks, then
+ // KeyedStoreICs need to be cleared to ensure any that involve this
+ // map go generic.
+ object->GetHeap()->ClearAllICsByKind(Code::KEYED_STORE_IC);
+ }
+
heap->ClearInstanceofCache();
ASSERT(size == object->Size());
return value;
@@ -11851,42 +11937,38 @@ AccessorPair* JSObject::GetLocalElementAccessorPair(uint32_t index) {
}
-MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSObject::SetElementWithInterceptor(
+ Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool check_prototype,
+ SetPropertyMode set_mode) {
+ Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSObject> this_handle(this);
- Handle<Object> value_handle(value, isolate);
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
if (!interceptor->setter()->IsUndefined()) {
v8::IndexedPropertySetterCallback setter =
v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
- PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
+ ApiIndexedPropertyAccess("interceptor-indexed-set", *object, index));
+ PropertyCallbackArguments args(isolate, interceptor->data(), *object,
+ *object);
v8::Handle<v8::Value> result =
- args.Call(setter, index, v8::Utils::ToLocal(value_handle));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *value_handle;
- }
- MaybeObject* raw_result =
- this_handle->SetElementWithoutInterceptor(index,
- *value_handle,
- attributes,
- strict_mode,
- check_prototype,
- set_mode);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
+ args.Call(setter, index, v8::Utils::ToLocal(value));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (!result.IsEmpty()) return value;
+ }
+
+ return SetElementWithoutInterceptor(object, index, value, attributes,
+ strict_mode,
+ check_prototype,
+ set_mode);
}
@@ -12034,41 +12116,39 @@ bool JSObject::HasDictionaryArgumentsElements() {
// Adding n elements in fast case is O(n*n).
// Note: revisit design to have dual undefined values to capture absent
// elements.
-MaybeObject* JSObject::SetFastElement(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype) {
- ASSERT(HasFastSmiOrObjectElements() ||
- HasFastArgumentsElements());
+Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode,
+ bool check_prototype) {
+ ASSERT(object->HasFastSmiOrObjectElements() ||
+ object->HasFastArgumentsElements());
+
+ Isolate* isolate = object->GetIsolate();
// Array optimizations rely on the prototype lookups of Array objects always
// returning undefined. If there is a store to the initial prototype object,
// make sure all of these optimizations are invalidated.
- Isolate* isolate(GetIsolate());
- if (isolate->is_initial_object_prototype(this) ||
- isolate->is_initial_array_prototype(this)) {
- HandleScope scope(GetIsolate());
- map()->dependent_code()->DeoptimizeDependentCodeGroup(
- GetIsolate(),
+ if (isolate->is_initial_object_prototype(*object) ||
+ isolate->is_initial_array_prototype(*object)) {
+ object->map()->dependent_code()->DeoptimizeDependentCodeGroup(isolate,
DependentCode::kElementsCantBeAddedGroup);
}
- FixedArray* backing_store = FixedArray::cast(elements());
- if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
- backing_store = FixedArray::cast(backing_store->get(1));
+ Handle<FixedArray> backing_store(FixedArray::cast(object->elements()));
+ if (backing_store->map() ==
+ isolate->heap()->non_strict_arguments_elements_map()) {
+ backing_store = handle(FixedArray::cast(backing_store->get(1)));
} else {
- MaybeObject* maybe = EnsureWritableFastElements();
- if (!maybe->To(&backing_store)) return maybe;
+ backing_store = EnsureWritableFastElements(object);
}
uint32_t capacity = static_cast<uint32_t>(backing_store->length());
if (check_prototype &&
(index >= capacity || backing_store->get(index)->IsTheHole())) {
bool found;
- MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
- value,
- &found,
- strict_mode);
+ Handle<Object> result = SetElementWithCallbackSetterInPrototypes(
+ object, index, value, &found, strict_mode);
if (found) return result;
}
@@ -12077,8 +12157,8 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
uint32_t array_length = 0;
bool must_update_array_length = false;
bool introduces_holes = true;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+ if (object->IsJSArray()) {
+ CHECK(Handle<JSArray>::cast(object)->length()->ToArrayIndex(&array_length));
introduces_holes = index > array_length;
if (index >= array_length) {
must_update_array_length = true;
@@ -12090,13 +12170,12 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
// If the array is growing, and it's not growth by a single element at the
// end, make sure that the ElementsKind is HOLEY.
- ElementsKind elements_kind = GetElementsKind();
+ ElementsKind elements_kind = object->GetElementsKind();
if (introduces_holes &&
IsFastElementsKind(elements_kind) &&
!IsFastHoleyElementsKind(elements_kind)) {
ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
- MaybeObject* maybe = TransitionElementsKind(transitioned_kind);
- if (maybe->IsFailure()) return maybe;
+ TransitionElementsKind(object, transitioned_kind);
}
// Check if the capacity of the backing store needs to be increased, or if
@@ -12106,94 +12185,80 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
if ((index - capacity) < kMaxGap) {
new_capacity = NewElementsCapacity(index + 1);
ASSERT(new_capacity > index);
- if (!ShouldConvertToSlowElements(new_capacity)) {
+ if (!object->ShouldConvertToSlowElements(new_capacity)) {
convert_to_slow = false;
}
}
if (convert_to_slow) {
- MaybeObject* result = NormalizeElements();
- if (result->IsFailure()) return result;
- return SetDictionaryElement(index, value, NONE, strict_mode,
+ NormalizeElements(object);
+ return SetDictionaryElement(object, index, value, NONE, strict_mode,
check_prototype);
}
}
// Convert to fast double elements if appropriate.
- if (HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) {
+ if (object->HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) {
// Consider fixing the boilerplate as well if we have one.
ElementsKind to_kind = IsHoleyElementsKind(elements_kind)
? FAST_HOLEY_DOUBLE_ELEMENTS
: FAST_DOUBLE_ELEMENTS;
- MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ UpdateAllocationSite(object, to_kind);
- MaybeObject* maybe =
- SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
- if (maybe->IsFailure()) return maybe;
- FixedDoubleArray::cast(elements())->set(index, value->Number());
- ValidateElements();
+ SetFastDoubleElementsCapacityAndLength(object, new_capacity, array_length);
+ FixedDoubleArray::cast(object->elements())->set(index, value->Number());
+ object->ValidateElements();
return value;
}
// Change elements kind from Smi-only to generic FAST if necessary.
- if (HasFastSmiElements() && !value->IsSmi()) {
- Map* new_map;
- ElementsKind kind = HasFastHoleyElements()
+ if (object->HasFastSmiElements() && !value->IsSmi()) {
+ ElementsKind kind = object->HasFastHoleyElements()
? FAST_HOLEY_ELEMENTS
: FAST_ELEMENTS;
- MaybeObject* maybe_failure = UpdateAllocationSite(kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
-
- MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
- kind);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- set_map(new_map);
+ UpdateAllocationSite(object, kind);
+ Handle<Map> new_map = GetElementsTransitionMap(object, kind);
+ object->set_map(*new_map);
+ ASSERT(IsFastObjectElementsKind(object->GetElementsKind()));
}
// Increase backing store capacity if that's been decided previously.
if (new_capacity != capacity) {
- FixedArray* new_elements;
SetFastElementsCapacitySmiMode smi_mode =
- value->IsSmi() && HasFastSmiElements()
+ value->IsSmi() && object->HasFastSmiElements()
? kAllowSmiElements
: kDontAllowSmiElements;
- { MaybeObject* maybe =
- SetFastElementsCapacityAndLength(new_capacity,
- array_length,
- smi_mode);
- if (!maybe->To(&new_elements)) return maybe;
- }
- new_elements->set(index, value);
- ValidateElements();
+ Handle<FixedArray> new_elements =
+ SetFastElementsCapacityAndLength(object, new_capacity, array_length,
+ smi_mode);
+ new_elements->set(index, *value);
+ object->ValidateElements();
return value;
}
// Finally, set the new element and length.
- ASSERT(elements()->IsFixedArray());
- backing_store->set(index, value);
+ ASSERT(object->elements()->IsFixedArray());
+ backing_store->set(index, *value);
if (must_update_array_length) {
- JSArray::cast(this)->set_length(Smi::FromInt(array_length));
+ Handle<JSArray>::cast(object)->set_length(Smi::FromInt(array_length));
}
return value;
}
-MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
- Handle<JSObject> self(this);
- Handle<Object> value(value_raw, isolate);
+Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool check_prototype,
+ SetPropertyMode set_mode) {
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements());
+ Isolate* isolate = object->GetIsolate();
// Insert element in the dictionary.
- Handle<FixedArray> elements(FixedArray::cast(this->elements()));
+ Handle<FixedArray> elements(FixedArray::cast(object->elements()));
bool is_arguments =
- (elements->map() == heap->non_strict_arguments_elements_map());
+ (elements->map() == isolate->heap()->non_strict_arguments_elements_map());
Handle<SeededNumberDictionary> dictionary(is_arguments
? SeededNumberDictionary::cast(elements->get(1))
: SeededNumberDictionary::cast(*elements));
@@ -12203,10 +12268,8 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
Handle<Object> element(dictionary->ValueAt(entry), isolate);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
- Handle<Object> result = SetElementWithCallback(self, element, index,
- value, self, strict_mode);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
+ return SetElementWithCallback(object, element, index, value, object,
+ strict_mode);
} else {
dictionary->UpdateMaxNumberKey(index);
// If a value has not been initialized we allow writing to it even if it
@@ -12218,21 +12281,22 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
dictionary->DetailsAtPut(entry, details);
} else if (details.IsReadOnly() && !element->IsTheHole()) {
if (strict_mode == kNonStrictMode) {
- return isolate->heap()->undefined_value();
+ return isolate->factory()->undefined_value();
} else {
- Handle<Object> holder(this, isolate);
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[2] = { number, holder };
+ Handle<Object> args[2] = { number, object };
Handle<Object> error =
isolate->factory()->NewTypeError("strict_read_only_property",
HandleVector(args, 2));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
// Elements of the arguments object in slow mode might be slow aliases.
if (is_arguments && element->IsAliasedArgumentsEntry()) {
- AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(*element);
- Context* context = Context::cast(elements->get(0));
+ Handle<AliasedArgumentsEntry> entry =
+ Handle<AliasedArgumentsEntry>::cast(element);
+ Handle<Context> context(Context::cast(elements->get(0)));
int context_index = entry->aliased_context_slot();
ASSERT(!context->get(context_index)->IsTheHole());
context->set(context_index, *value);
@@ -12246,15 +12310,16 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
// Can cause GC!
if (check_prototype) {
bool found;
- MaybeObject* result = SetElementWithCallbackSetterInPrototypes(
- index, *value, &found, strict_mode);
+ Handle<Object> result = SetElementWithCallbackSetterInPrototypes(object,
+ index, value, &found, strict_mode);
if (found) return result;
}
+
// When we set the is_extensible flag to false we always force the
// element into dictionary mode (and force them to stay there).
- if (!self->map()->is_extensible()) {
+ if (!object->map()->is_extensible()) {
if (strict_mode == kNonStrictMode) {
- return isolate->heap()->undefined_value();
+ return isolate->factory()->undefined_value();
} else {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> name = isolate->factory()->NumberToString(number);
@@ -12262,36 +12327,36 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
Handle<Object> error =
isolate->factory()->NewTypeError("object_not_extensible",
HandleVector(args, 1));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
- FixedArrayBase* new_dictionary;
+
PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
- MaybeObject* maybe = dictionary->AddNumberEntry(index, *value, details);
- if (!maybe->To(&new_dictionary)) return maybe;
- if (*dictionary != SeededNumberDictionary::cast(new_dictionary)) {
+ Handle<SeededNumberDictionary> new_dictionary =
+ SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
+ details);
+ if (*dictionary != *new_dictionary) {
if (is_arguments) {
- elements->set(1, new_dictionary);
+ elements->set(1, *new_dictionary);
} else {
- self->set_elements(new_dictionary);
+ object->set_elements(*new_dictionary);
}
- dictionary =
- handle(SeededNumberDictionary::cast(new_dictionary), isolate);
+ dictionary = new_dictionary;
}
}
// Update the array length if this JSObject is an array.
- if (self->IsJSArray()) {
- MaybeObject* result =
- JSArray::cast(*self)->JSArrayUpdateLengthFromIndex(index, *value);
- if (result->IsFailure()) return result;
+ if (object->IsJSArray()) {
+ JSArray::JSArrayUpdateLengthFromIndex(Handle<JSArray>::cast(object), index,
+ value);
}
// Attempt to put this object back in fast case.
- if (self->ShouldConvertToFastElements()) {
+ if (object->ShouldConvertToFastElements()) {
uint32_t new_length = 0;
- if (self->IsJSArray()) {
- CHECK(JSArray::cast(*self)->length()->ToArrayIndex(&new_length));
+ if (object->IsJSArray()) {
+ CHECK(Handle<JSArray>::cast(object)->length()->ToArrayIndex(&new_length));
} else {
new_length = dictionary->max_number_key() + 1;
}
@@ -12300,47 +12365,47 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
: kDontAllowSmiElements;
bool has_smi_only_elements = false;
bool should_convert_to_fast_double_elements =
- self->ShouldConvertToFastDoubleElements(&has_smi_only_elements);
+ object->ShouldConvertToFastDoubleElements(&has_smi_only_elements);
if (has_smi_only_elements) {
smi_mode = kForceSmiElements;
}
- MaybeObject* result = should_convert_to_fast_double_elements
- ? self->SetFastDoubleElementsCapacityAndLength(new_length, new_length)
- : self->SetFastElementsCapacityAndLength(
- new_length, new_length, smi_mode);
- self->ValidateElements();
- if (result->IsFailure()) return result;
+
+ if (should_convert_to_fast_double_elements) {
+ SetFastDoubleElementsCapacityAndLength(object, new_length, new_length);
+ } else {
+ SetFastElementsCapacityAndLength(object, new_length, new_length,
+ smi_mode);
+ }
+ object->ValidateElements();
#ifdef DEBUG
if (FLAG_trace_normalization) {
PrintF("Object elements are fast case again:\n");
- Print();
+ object->Print();
}
#endif
}
- return *value;
+ return value;
}
-
-MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
+Handle<Object> JSObject::SetFastDoubleElement(
+ Handle<JSObject> object,
uint32_t index,
- Object* value,
+ Handle<Object> value,
StrictModeFlag strict_mode,
bool check_prototype) {
- ASSERT(HasFastDoubleElements());
+ ASSERT(object->HasFastDoubleElements());
- FixedArrayBase* base_elms = FixedArrayBase::cast(elements());
+ Handle<FixedArrayBase> base_elms(FixedArrayBase::cast(object->elements()));
uint32_t elms_length = static_cast<uint32_t>(base_elms->length());
// If storing to an element that isn't in the array, pass the store request
// up the prototype chain before storing in the receiver's elements.
if (check_prototype &&
(index >= elms_length ||
- FixedDoubleArray::cast(base_elms)->is_the_hole(index))) {
+ Handle<FixedDoubleArray>::cast(base_elms)->is_the_hole(index))) {
bool found;
- MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
- value,
- &found,
- strict_mode);
+ Handle<Object> result = SetElementWithCallbackSetterInPrototypes(object,
+ index, value, &found, strict_mode);
if (found) return result;
}
@@ -12349,48 +12414,47 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
bool value_is_smi = value->IsSmi();
bool introduces_holes = true;
uint32_t length = elms_length;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ if (object->IsJSArray()) {
+ CHECK(Handle<JSArray>::cast(object)->length()->ToArrayIndex(&length));
introduces_holes = index > length;
} else {
introduces_holes = index >= elms_length;
}
if (!value->IsNumber()) {
- MaybeObject* maybe_obj = SetFastElementsCapacityAndLength(
- elms_length,
- length,
- kDontAllowSmiElements);
- if (maybe_obj->IsFailure()) return maybe_obj;
- maybe_obj = SetFastElement(index, value, strict_mode, check_prototype);
- if (maybe_obj->IsFailure()) return maybe_obj;
- ValidateElements();
- return maybe_obj;
+ SetFastElementsCapacityAndLength(object, elms_length, length,
+ kDontAllowSmiElements);
+ Handle<Object> result = SetFastElement(object, index, value, strict_mode,
+ check_prototype);
+ RETURN_IF_EMPTY_HANDLE_VALUE(object->GetIsolate(), result,
+ Handle<Object>());
+ object->ValidateElements();
+ return result;
}
double double_value = value_is_smi
- ? static_cast<double>(Smi::cast(value)->value())
- : HeapNumber::cast(value)->value();
+ ? static_cast<double>(Handle<Smi>::cast(value)->value())
+ : Handle<HeapNumber>::cast(value)->value();
// If the array is growing, and it's not growth by a single element at the
// end, make sure that the ElementsKind is HOLEY.
- ElementsKind elements_kind = GetElementsKind();
+ ElementsKind elements_kind = object->GetElementsKind();
if (introduces_holes && !IsFastHoleyElementsKind(elements_kind)) {
ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
- MaybeObject* maybe = TransitionElementsKind(transitioned_kind);
- if (maybe->IsFailure()) return maybe;
+ TransitionElementsKind(object, transitioned_kind);
}
// Check whether there is extra space in the fixed array.
if (index < elms_length) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
+ Handle<FixedDoubleArray> elms(FixedDoubleArray::cast(object->elements()));
elms->set(index, double_value);
- if (IsJSArray()) {
+ if (object->IsJSArray()) {
// Update the length of the array if needed.
uint32_t array_length = 0;
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+ CHECK(
+ Handle<JSArray>::cast(object)->length()->ToArrayIndex(&array_length));
if (index >= array_length) {
- JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
+ Handle<JSArray>::cast(object)->set_length(Smi::FromInt(index + 1));
}
}
return value;
@@ -12400,27 +12464,23 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
if ((index - elms_length) < kMaxGap) {
// Try allocating extra space.
int new_capacity = NewElementsCapacity(index+1);
- if (!ShouldConvertToSlowElements(new_capacity)) {
+ if (!object->ShouldConvertToSlowElements(new_capacity)) {
ASSERT(static_cast<uint32_t>(new_capacity) > index);
- MaybeObject* maybe_obj =
- SetFastDoubleElementsCapacityAndLength(new_capacity, index + 1);
- if (maybe_obj->IsFailure()) return maybe_obj;
- FixedDoubleArray::cast(elements())->set(index, double_value);
- ValidateElements();
+ SetFastDoubleElementsCapacityAndLength(object, new_capacity, index + 1);
+ FixedDoubleArray::cast(object->elements())->set(index, double_value);
+ object->ValidateElements();
return value;
}
}
// Otherwise default to slow case.
- ASSERT(HasFastDoubleElements());
- ASSERT(map()->has_fast_double_elements());
- ASSERT(elements()->IsFixedDoubleArray());
- Object* obj;
- { MaybeObject* maybe_obj = NormalizeElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- ASSERT(HasDictionaryElements());
- return SetElement(index, value, NONE, strict_mode, check_prototype);
+ ASSERT(object->HasFastDoubleElements());
+ ASSERT(object->map()->has_fast_double_elements());
+ ASSERT(object->elements()->IsFixedDoubleArray());
+
+ NormalizeElements(object);
+ ASSERT(object->HasDictionaryElements());
+ return SetElement(object, index, value, NONE, strict_mode, check_prototype);
}
@@ -12443,260 +12503,231 @@ Handle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
Handle<Object> value,
StrictModeFlag strict_mode) {
ASSERT(!object->HasExternalArrayElements());
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetElement(index, *value, NONE, strict_mode, false),
- Object);
+ return JSObject::SetElement(object, index, value, NONE, strict_mode, false);
}
Handle<Object> JSObject::SetElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- PropertyAttributes attr,
+ PropertyAttributes attributes,
StrictModeFlag strict_mode,
+ bool check_prototype,
SetPropertyMode set_mode) {
+ Isolate* isolate = object->GetIsolate();
+
if (object->HasExternalArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
Handle<Object> number =
- Execution::ToNumber(object->GetIsolate(), value, &has_exception);
+ Execution::ToNumber(isolate, value, &has_exception);
if (has_exception) return Handle<Object>();
value = number;
}
}
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetElement(index, *value, attr, strict_mode, true, set_mode),
- Object);
-}
-
-
-MaybeObject* JSObject::SetElement(uint32_t index,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
- Isolate* isolate = GetIsolate();
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return value_raw;
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetElement(index,
- value_raw,
- attributes,
- strict_mode,
- check_prototype,
- set_mode);
+ return SetElement(Handle<JSObject>::cast(proto), index, value, attributes,
+ strict_mode,
+ check_prototype,
+ set_mode);
}
// Don't allow element properties to be redefined for external arrays.
- if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
+ if (object->HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[] = { handle(this, isolate), number };
+ Handle<Object> args[] = { object, number };
Handle<Object> error = isolate->factory()->NewTypeError(
"redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// Normalize the elements to enable attributes on the property.
if ((attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) {
- SeededNumberDictionary* dictionary;
- MaybeObject* maybe_object = NormalizeElements();
- if (!maybe_object->To(&dictionary)) return maybe_object;
+ Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
}
- if (!(FLAG_harmony_observation && map()->is_observed())) {
- return HasIndexedInterceptor()
- ? SetElementWithInterceptor(
- index, value_raw, attributes, strict_mode, check_prototype, set_mode)
- : SetElementWithoutInterceptor(
- index, value_raw, attributes, strict_mode, check_prototype, set_mode);
+ if (!(FLAG_harmony_observation && object->map()->is_observed())) {
+ return object->HasIndexedInterceptor()
+ ? SetElementWithInterceptor(object, index, value, attributes, strict_mode,
+ check_prototype,
+ set_mode)
+ : SetElementWithoutInterceptor(object, index, value, attributes,
+ strict_mode,
+ check_prototype,
+ set_mode);
}
- // From here on, everything has to be handlified.
- Handle<JSObject> self(this);
- Handle<Object> value(value_raw, isolate);
- PropertyAttributes old_attributes = self->GetLocalElementAttribute(index);
+ PropertyAttributes old_attributes = object->GetLocalElementAttribute(index);
Handle<Object> old_value = isolate->factory()->the_hole_value();
Handle<Object> old_length_handle;
Handle<Object> new_length_handle;
if (old_attributes != ABSENT) {
- if (self->GetLocalElementAccessorPair(index) == NULL)
- old_value = Object::GetElement(isolate, self, index);
- } else if (self->IsJSArray()) {
+ if (object->GetLocalElementAccessorPair(index) == NULL)
+ old_value = Object::GetElement(isolate, object, index);
+ } else if (object->IsJSArray()) {
// Store old array length in case adding an element grows the array.
- old_length_handle = handle(Handle<JSArray>::cast(self)->length(), isolate);
+ old_length_handle = handle(Handle<JSArray>::cast(object)->length(),
+ isolate);
}
// Check for lookup interceptor
- MaybeObject* result = self->HasIndexedInterceptor()
- ? self->SetElementWithInterceptor(
- index, *value, attributes, strict_mode, check_prototype, set_mode)
- : self->SetElementWithoutInterceptor(
- index, *value, attributes, strict_mode, check_prototype, set_mode);
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
+ Handle<Object> result = object->HasIndexedInterceptor()
+ ? SetElementWithInterceptor(object, index, value, attributes, strict_mode,
+ check_prototype,
+ set_mode)
+ : SetElementWithoutInterceptor(object, index, value, attributes,
+ strict_mode,
+ check_prototype,
+ set_mode);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
Handle<String> name = isolate->factory()->Uint32ToString(index);
- PropertyAttributes new_attributes = self->GetLocalElementAttribute(index);
+ PropertyAttributes new_attributes = object->GetLocalElementAttribute(index);
if (old_attributes == ABSENT) {
- if (self->IsJSArray() &&
- !old_length_handle->SameValue(Handle<JSArray>::cast(self)->length())) {
- new_length_handle = handle(Handle<JSArray>::cast(self)->length(),
+ if (object->IsJSArray() &&
+ !old_length_handle->SameValue(
+ Handle<JSArray>::cast(object)->length())) {
+ new_length_handle = handle(Handle<JSArray>::cast(object)->length(),
isolate);
uint32_t old_length = 0;
uint32_t new_length = 0;
CHECK(old_length_handle->ToArrayIndex(&old_length));
CHECK(new_length_handle->ToArrayIndex(&new_length));
- BeginPerformSplice(Handle<JSArray>::cast(self));
- EnqueueChangeRecord(self, "new", name, old_value);
- EnqueueChangeRecord(self, "updated", isolate->factory()->length_string(),
+ BeginPerformSplice(Handle<JSArray>::cast(object));
+ EnqueueChangeRecord(object, "add", name, old_value);
+ EnqueueChangeRecord(object, "update", isolate->factory()->length_string(),
old_length_handle);
- EndPerformSplice(Handle<JSArray>::cast(self));
+ EndPerformSplice(Handle<JSArray>::cast(object));
Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
- EnqueueSpliceRecord(Handle<JSArray>::cast(self), old_length, deleted,
+ EnqueueSpliceRecord(Handle<JSArray>::cast(object), old_length, deleted,
new_length - old_length);
} else {
- EnqueueChangeRecord(self, "new", name, old_value);
+ EnqueueChangeRecord(object, "add", name, old_value);
}
} else if (old_value->IsTheHole()) {
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ EnqueueChangeRecord(object, "reconfigure", name, old_value);
} else {
- Handle<Object> new_value = Object::GetElement(isolate, self, index);
+ Handle<Object> new_value = Object::GetElement(isolate, object, index);
bool value_changed = !old_value->SameValue(*new_value);
if (old_attributes != new_attributes) {
if (!value_changed) old_value = isolate->factory()->the_hole_value();
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ EnqueueChangeRecord(object, "reconfigure", name, old_value);
} else if (value_changed) {
- EnqueueChangeRecord(self, "updated", name, old_value);
+ EnqueueChangeRecord(object, "update", name, old_value);
}
}
- return *hresult;
+ return result;
}
-MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
- Object* value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
- ASSERT(HasDictionaryElements() ||
- HasDictionaryArgumentsElements() ||
- (attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
- Isolate* isolate = GetIsolate();
+Handle<Object> JSObject::SetElementWithoutInterceptor(
+ Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool check_prototype,
+ SetPropertyMode set_mode) {
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements() ||
+ (attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
+ Isolate* isolate = object->GetIsolate();
if (FLAG_trace_external_array_abuse &&
- IsExternalArrayElementsKind(GetElementsKind())) {
- CheckArrayAbuse(this, "external elements write", index);
+ IsExternalArrayElementsKind(object->GetElementsKind())) {
+ CheckArrayAbuse(*object, "external elements write", index);
}
if (FLAG_trace_js_array_abuse &&
- !IsExternalArrayElementsKind(GetElementsKind())) {
- if (IsJSArray()) {
- CheckArrayAbuse(this, "elements write", index, true);
+ !IsExternalArrayElementsKind(object->GetElementsKind())) {
+ if (object->IsJSArray()) {
+ CheckArrayAbuse(*object, "elements write", index, true);
}
}
- switch (GetElementsKind()) {
+ switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- return SetFastElement(index, value, strict_mode, check_prototype);
+ return SetFastElement(object, index, value, strict_mode, check_prototype);
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- return SetFastDoubleElement(index, value, strict_mode, check_prototype);
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- return pixels->SetValue(index, value);
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- ExternalByteArray* array = ExternalByteArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- ExternalUnsignedByteArray* array =
- ExternalUnsignedByteArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- ExternalShortArray* array = ExternalShortArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- ExternalUnsignedShortArray* array =
- ExternalUnsignedShortArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_INT_ELEMENTS: {
- ExternalIntArray* array = ExternalIntArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- ExternalUnsignedIntArray* array =
- ExternalUnsignedIntArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalFloatArray* array = ExternalFloatArray::cast(elements());
- return array->SetValue(index, value);
- }
- case EXTERNAL_DOUBLE_ELEMENTS: {
- ExternalDoubleArray* array = ExternalDoubleArray::cast(elements());
- return array->SetValue(index, value);
+ return SetFastDoubleElement(object, index, value, strict_mode,
+ check_prototype);
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: { \
+ Handle<External##Type##Array> array( \
+ External##Type##Array::cast(object->elements())); \
+ return External##Type##Array::SetValue(array, index, value); \
+ } \
+ case TYPE##_ELEMENTS: { \
+ Handle<Fixed##Type##Array> array( \
+ Fixed##Type##Array::cast(object->elements())); \
+ return Fixed##Type##Array::SetValue(array, index, value); \
}
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+
+#undef TYPED_ARRAY_CASE
+
case DICTIONARY_ELEMENTS:
- return SetDictionaryElement(index, value, attr, strict_mode,
- check_prototype, set_mode);
+ return SetDictionaryElement(object, index, value, attributes, strict_mode,
+ check_prototype,
+ set_mode);
case NON_STRICT_ARGUMENTS_ELEMENTS: {
- FixedArray* parameter_map = FixedArray::cast(elements());
+ Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()));
uint32_t length = parameter_map->length();
- Object* probe =
- (index < length - 2) ? parameter_map->get(index + 2) : NULL;
- if (probe != NULL && !probe->IsTheHole()) {
- Context* context = Context::cast(parameter_map->get(0));
- int context_index = Smi::cast(probe)->value();
+ Handle<Object> probe = index < length - 2 ?
+ Handle<Object>(parameter_map->get(index + 2), isolate) :
+ Handle<Object>();
+ if (!probe.is_null() && !probe->IsTheHole()) {
+ Handle<Context> context(Context::cast(parameter_map->get(0)));
+ int context_index = Handle<Smi>::cast(probe)->value();
ASSERT(!context->get(context_index)->IsTheHole());
- context->set(context_index, value);
+ context->set(context_index, *value);
// Redefining attributes of an aliased element destroys fast aliasing.
- if (set_mode == SET_PROPERTY || attr == NONE) return value;
+ if (set_mode == SET_PROPERTY || attributes == NONE) return value;
parameter_map->set_the_hole(index + 2);
// For elements that are still writable we re-establish slow aliasing.
- if ((attr & READ_ONLY) == 0) {
- MaybeObject* maybe_entry =
- isolate->heap()->AllocateAliasedArgumentsEntry(context_index);
- if (!maybe_entry->ToObject(&value)) return maybe_entry;
+ if ((attributes & READ_ONLY) == 0) {
+ value = Handle<Object>::cast(
+ isolate->factory()->NewAliasedArgumentsEntry(context_index));
}
}
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
if (arguments->IsDictionary()) {
- return SetDictionaryElement(index, value, attr, strict_mode,
- check_prototype, set_mode);
+ return SetDictionaryElement(object, index, value, attributes,
+ strict_mode,
+ check_prototype,
+ set_mode);
} else {
- return SetFastElement(index, value, strict_mode, check_prototype);
+ return SetFastElement(object, index, value, strict_mode,
+ check_prototype);
}
}
}
// All possible cases have been handled above. Add a return to avoid the
// complaints from the compiler.
UNREACHABLE();
- return isolate->heap()->null_value();
+ return isolate->factory()->null_value();
}
@@ -12707,10 +12738,27 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
}
+const double AllocationSite::kPretenureRatio = 0.60;
+
+
+void AllocationSite::ResetPretenureDecision() {
+ set_pretenure_decision(kUndecided);
+ set_memento_found_count(0);
+ set_memento_create_count(0);
+}
+
+
+PretenureFlag AllocationSite::GetPretenureMode() {
+ PretenureDecision mode = pretenure_decision();
+ // Zombie objects "decide" to be untenured.
+ return mode == kTenure ? TENURED : NOT_TENURED;
+}
+
+
bool AllocationSite::IsNestedSite() {
ASSERT(FLAG_trace_track_allocation_sites);
Object* current = GetHeap()->allocation_sites_list();
- while (current != NULL && current->IsAllocationSite()) {
+ while (current->IsAllocationSite()) {
AllocationSite* current_site = AllocationSite::cast(current);
if (current_site->nested_site() == this) {
return true;
@@ -12721,21 +12769,11 @@ bool AllocationSite::IsNestedSite() {
}
-MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
- if (!FLAG_track_allocation_sites || !IsJSArray()) {
- return this;
- }
-
- AllocationMemento* memento = AllocationMemento::FindForJSObject(this);
- if (memento == NULL || !memento->IsValid()) {
- return this;
- }
+MaybeObject* AllocationSite::DigestTransitionFeedback(ElementsKind to_kind) {
+ Isolate* isolate = GetIsolate();
- // Walk through to the Allocation Site
- AllocationSite* site = memento->GetAllocationSite();
- if (site->SitePointsToLiteral() &&
- site->transition_info()->IsJSArray()) {
- JSArray* transition_info = JSArray::cast(site->transition_info());
+ if (SitePointsToLiteral() && transition_info()->IsJSArray()) {
+ JSArray* transition_info = JSArray::cast(this->transition_info());
ElementsKind kind = transition_info->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
if (IsHoleyElementsKind(kind)) {
@@ -12746,9 +12784,9 @@ MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
// function, so we shouldn't make new instances of it very often.
uint32_t length = 0;
CHECK(transition_info->length()->ToArrayIndex(&length));
- if (length <= AllocationSite::kMaximumArrayBytesToPretransition) {
+ if (length <= kMaximumArrayBytesToPretransition) {
if (FLAG_trace_track_allocation_sites) {
- bool is_nested = site->IsNestedSite();
+ bool is_nested = IsNestedSite();
PrintF(
"AllocationSite: JSArray %p boilerplate %s updated %s->%s\n",
reinterpret_cast<void*>(this),
@@ -12756,11 +12794,14 @@ MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
- return transition_info->TransitionElementsKind(to_kind);
+ MaybeObject* result = transition_info->TransitionElementsKind(to_kind);
+ if (result->IsFailure()) return result;
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
}
}
} else {
- ElementsKind kind = site->GetElementsKind();
+ ElementsKind kind = GetElementsKind();
// if kind is holey ensure that to_kind is as well.
if (IsHoleyElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
@@ -12772,15 +12813,73 @@ MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
- site->set_transition_info(Smi::FromInt(to_kind));
+ SetElementsKind(to_kind);
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
}
}
return this;
}
+// static
+void AllocationSite::AddDependentCompilationInfo(Handle<AllocationSite> site,
+ Reason reason,
+ CompilationInfo* info) {
+ DependentCode::DependencyGroup group = site->ToDependencyGroup(reason);
+ Handle<DependentCode> dep(site->dependent_code());
+ Handle<DependentCode> codes =
+ DependentCode::Insert(dep, group, info->object_wrapper());
+ if (*codes != site->dependent_code()) site->set_dependent_code(*codes);
+ info->dependencies(group)->Add(Handle<HeapObject>(*site), info->zone());
+}
+
+
+void JSObject::UpdateAllocationSite(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+ object->UpdateAllocationSite(to_kind));
+}
+
+
+MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
+ if (!IsJSArray()) return this;
+
+ Heap* heap = GetHeap();
+ if (!heap->InNewSpace(this)) return this;
+
+ // Check if there is potentially a memento behind the object. If
+ // the last word of the momento is on another page we return
+ // immediatelly.
+ Address object_address = address();
+ Address memento_address = object_address + JSArray::kSize;
+ Address last_memento_word_address = memento_address + kPointerSize;
+ if (!NewSpacePage::OnSamePage(object_address,
+ last_memento_word_address)) {
+ return this;
+ }
+
+ // Either object is the last object in the new space, or there is another
+ // object of at least word size (the header map word) following it, so
+ // suffices to compare ptr and top here.
+ Address top = heap->NewSpaceTop();
+ ASSERT(memento_address == top ||
+ memento_address + HeapObject::kHeaderSize <= top);
+ if (memento_address == top) return this;
+
+ HeapObject* candidate = HeapObject::FromAddress(memento_address);
+ if (candidate->map() != heap->allocation_memento_map()) return this;
+
+ AllocationMemento* memento = AllocationMemento::cast(candidate);
+ if (!memento->IsValid()) return this;
+
+ // Walk through to the Allocation Site
+ AllocationSite* site = memento->GetAllocationSite();
+ return site->DigestTransitionFeedback(to_kind);
+}
+
+
MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
- ASSERT(!map()->is_observed());
ElementsKind from_kind = map()->elements_kind();
if (IsFastHoleyElementsKind(from_kind)) {
@@ -12788,9 +12887,11 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
}
if (from_kind == to_kind) return this;
-
- MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ // Don't update the site if to_kind isn't fast
+ if (IsFastElementsKind(to_kind)) {
+ MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
Isolate* isolate = GetIsolate();
if (elements() == isolate->heap()->empty_fixed_array() ||
@@ -12866,6 +12967,14 @@ bool Map::IsValidElementsTransition(ElementsKind from_kind,
}
+void JSArray::JSArrayUpdateLengthFromIndex(Handle<JSArray> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION_VOID(array->GetIsolate(),
+ array->JSArrayUpdateLengthFromIndex(index, *value));
+}
+
+
MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
Object* value) {
uint32_t old_len = 0;
@@ -12971,8 +13080,7 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
}
break;
case DICTIONARY_ELEMENTS: {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(FixedArray::cast(elements()));
+ SeededNumberDictionary* dictionary = element_dictionary();
*capacity = dictionary->Capacity();
*used = dictionary->NumberOfElements();
break;
@@ -12992,20 +13100,20 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
}
break;
}
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ {
// External arrays are considered 100% used.
- ExternalArray* external_array = ExternalArray::cast(elements());
+ FixedArrayBase* external_array = FixedArrayBase::cast(elements());
*capacity = external_array->length();
*used = external_array->length();
break;
+ }
}
}
@@ -13071,8 +13179,7 @@ bool JSObject::ShouldConvertToFastDoubleElements(
*has_smi_only_elements = false;
if (FLAG_unbox_double_arrays) {
ASSERT(HasDictionaryElements());
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(elements());
+ SeededNumberDictionary* dictionary = element_dictionary();
bool found_double = false;
for (int i = 0; i < dictionary->Capacity(); i++) {
Object* key = dictionary->KeyAt(i);
@@ -13294,7 +13401,7 @@ int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
if (filter == NONE) return map->NumberOfOwnDescriptors();
if (filter & DONT_ENUM) {
int result = map->EnumLength();
- if (result != Map::kInvalidEnumCache) return result;
+ if (result != kInvalidEnumCacheSentinel) return result;
}
return map->NumberOfDescribedProperties(OWN_DESCRIPTORS, filter);
}
@@ -13426,7 +13533,7 @@ void JSObject::GetLocalPropertyNames(
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < real_size; i++) {
if ((descs->GetDetails(i).attributes() & filter) == 0 &&
- ((filter & SYMBOLIC) == 0 || !descs->GetKey(i)->IsSymbol())) {
+ !FilterKey(descs->GetKey(i), filter)) {
storage->set(index++, descs->GetKey(i));
}
}
@@ -13496,26 +13603,15 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
ASSERT(!storage || storage->length() >= counter);
break;
}
- case EXTERNAL_PIXEL_ELEMENTS: {
- int length = ExternalPixelArray::cast(elements())->length();
- while (counter < length) {
- if (storage != NULL) {
- storage->set(counter, Smi::FromInt(counter));
- }
- counter++;
- }
- ASSERT(!storage || storage->length() >= counter);
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS: {
- int length = ExternalArray::cast(elements())->length();
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ {
+ int length = FixedArrayBase::cast(elements())->length();
while (counter < length) {
if (storage != NULL) {
storage->set(counter, Smi::FromInt(counter));
@@ -13525,6 +13621,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
ASSERT(!storage || storage->length() >= counter);
break;
}
+
case DICTIONARY_ELEMENTS: {
if (storage != NULL) {
element_dictionary()->CopyKeysTo(storage,
@@ -13757,142 +13854,67 @@ class RegExpKey : public HashTableKey {
};
-// Utf8StringKey carries a vector of chars as key.
-class Utf8StringKey : public HashTableKey {
- public:
- explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
- : string_(string), hash_field_(0), seed_(seed) { }
-
- bool IsMatch(Object* string) {
- return String::cast(string)->IsUtf8EqualTo(string_);
- }
-
- uint32_t Hash() {
- if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
- hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
- uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
-
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
-
- MaybeObject* AsObject(Heap* heap) {
- if (hash_field_ == 0) Hash();
- return heap->AllocateInternalizedStringFromUtf8(string_,
- chars_,
- hash_field_);
- }
-
- Vector<const char> string_;
- uint32_t hash_field_;
- int chars_; // Caches the number of characters when computing the hash code.
- uint32_t seed_;
-};
-
-
-template <typename Char>
-class SequentialStringKey : public HashTableKey {
- public:
- explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
- : string_(string), hash_field_(0), seed_(seed) { }
-
- uint32_t Hash() {
- hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
- string_.length(),
- seed_);
-
- uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
-
-
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
-
- Vector<const Char> string_;
- uint32_t hash_field_;
- uint32_t seed_;
-};
+MaybeObject* OneByteStringKey::AsObject(Heap* heap) {
+ if (hash_field_ == 0) Hash();
+ return heap->AllocateOneByteInternalizedString(string_, hash_field_);
+}
+MaybeObject* TwoByteStringKey::AsObject(Heap* heap) {
+ if (hash_field_ == 0) Hash();
+ return heap->AllocateTwoByteInternalizedString(string_, hash_field_);
+}
-class OneByteStringKey : public SequentialStringKey<uint8_t> {
- public:
- OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
- : SequentialStringKey<uint8_t>(str, seed) { }
- bool IsMatch(Object* string) {
- return String::cast(string)->IsOneByteEqualTo(string_);
- }
+template<>
+const uint8_t* SubStringKey<uint8_t>::GetChars() {
+ return string_->IsSeqOneByteString()
+ ? SeqOneByteString::cast(*string_)->GetChars()
+ : ExternalAsciiString::cast(*string_)->GetChars();
+}
- MaybeObject* AsObject(Heap* heap) {
- if (hash_field_ == 0) Hash();
- return heap->AllocateOneByteInternalizedString(string_, hash_field_);
- }
-};
+template<>
+const uint16_t* SubStringKey<uint16_t>::GetChars() {
+ return string_->IsSeqTwoByteString()
+ ? SeqTwoByteString::cast(*string_)->GetChars()
+ : ExternalTwoByteString::cast(*string_)->GetChars();
+}
-class SubStringOneByteStringKey : public HashTableKey {
- public:
- explicit SubStringOneByteStringKey(Handle<SeqOneByteString> string,
- int from,
- int length)
- : string_(string), from_(from), length_(length) { }
- uint32_t Hash() {
- ASSERT(length_ >= 0);
- ASSERT(from_ + length_ <= string_->length());
- uint8_t* chars = string_->GetChars() + from_;
- hash_field_ = StringHasher::HashSequentialString(
- chars, length_, string_->GetHeap()->HashSeed());
- uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
+template<>
+MaybeObject* SubStringKey<uint8_t>::AsObject(Heap* heap) {
+ if (hash_field_ == 0) Hash();
+ Vector<const uint8_t> chars(GetChars() + from_, length_);
+ return heap->AllocateOneByteInternalizedString(chars, hash_field_);
+}
- uint32_t HashForObject(Object* other) {
- return String::cast(other)->Hash();
- }
-
- bool IsMatch(Object* string) {
- Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
- return String::cast(string)->IsOneByteEqualTo(chars);
- }
+template<>
+MaybeObject* SubStringKey<uint16_t>::AsObject(
+ Heap* heap) {
+ if (hash_field_ == 0) Hash();
+ Vector<const uint16_t> chars(GetChars() + from_, length_);
+ return heap->AllocateTwoByteInternalizedString(chars, hash_field_);
+}
- MaybeObject* AsObject(Heap* heap) {
- if (hash_field_ == 0) Hash();
- Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
- return heap->AllocateOneByteInternalizedString(chars, hash_field_);
- }
- private:
- Handle<SeqOneByteString> string_;
- int from_;
- int length_;
- uint32_t hash_field_;
-};
+template<>
+bool SubStringKey<uint8_t>::IsMatch(Object* string) {
+ Vector<const uint8_t> chars(GetChars() + from_, length_);
+ return String::cast(string)->IsOneByteEqualTo(chars);
+}
-class TwoByteStringKey : public SequentialStringKey<uc16> {
- public:
- explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
- : SequentialStringKey<uc16>(str, seed) { }
+template<>
+bool SubStringKey<uint16_t>::IsMatch(Object* string) {
+ Vector<const uint16_t> chars(GetChars() + from_, length_);
+ return String::cast(string)->IsTwoByteEqualTo(chars);
+}
- bool IsMatch(Object* string) {
- return String::cast(string)->IsTwoByteEqualTo(string_);
- }
- MaybeObject* AsObject(Heap* heap) {
- if (hash_field_ == 0) Hash();
- return heap->AllocateTwoByteInternalizedString(string_, hash_field_);
- }
-};
+template class SubStringKey<uint8_t>;
+template class SubStringKey<uint16_t>;
// InternalizedStringKey carries a string/internalized-string object as key.
@@ -14450,7 +14472,7 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
return handle(Smi::FromInt(
ExternalArray::cast(object->elements())->length()), isolate);
} else if (!object->HasFastDoubleElements()) {
- JSObject::EnsureWritableFastElements(object);
+ EnsureWritableFastElements(object);
}
ASSERT(object->HasFastSmiOrObjectElements() ||
object->HasFastDoubleElements());
@@ -14547,24 +14569,13 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
ExternalArrayType JSTypedArray::type() {
switch (elements()->map()->instance_type()) {
- case EXTERNAL_BYTE_ARRAY_TYPE:
- return kExternalByteArray;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return kExternalUnsignedByteArray;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- return kExternalShortArray;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return kExternalUnsignedShortArray;
- case EXTERNAL_INT_ARRAY_TYPE:
- return kExternalIntArray;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return kExternalUnsignedIntArray;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- return kExternalFloatArray;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- return kExternalDoubleArray;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- return kExternalPixelArray;
+#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ return kExternal##Type##Array;
+
+ TYPED_ARRAYS(INSTANCE_TYPE_TO_ARRAY_TYPE)
+#undef INSTANCE_TYPE_TO_ARRAY_TYPE
+
default:
return static_cast<ExternalArrayType>(-1);
}
@@ -14573,24 +14584,13 @@ ExternalArrayType JSTypedArray::type() {
size_t JSTypedArray::element_size() {
switch (elements()->map()->instance_type()) {
- case EXTERNAL_BYTE_ARRAY_TYPE:
- return 1;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return 1;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- return 2;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return 2;
- case EXTERNAL_INT_ARRAY_TYPE:
- return 4;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return 4;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- return 4;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- return 8;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- return 1;
+#define INSTANCE_TYPE_TO_ELEMENT_SIZE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ return size;
+
+ TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENT_SIZE)
+#undef INSTANCE_TYPE_TO_ELEMENT_SIZE
+
default:
UNREACHABLE();
return 0;
@@ -14598,7 +14598,7 @@ size_t JSTypedArray::element_size() {
}
-Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
+Object* ExternalUint8ClampedArray::SetValue(uint32_t index, Object* value) {
uint8_t clamped_value = 0;
if (index < static_cast<uint32_t>(length())) {
if (value->IsSmi()) {
@@ -14633,6 +14633,14 @@ Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
}
+Handle<Object> ExternalUint8ClampedArray::SetValue(
+ Handle<ExternalUint8ClampedArray> array,
+ uint32_t index,
+ Handle<Object> value) {
+ return Handle<Object>(array->SetValue(index, *value), array->GetIsolate());
+}
+
+
template<typename ExternalArrayClass, typename ValueType>
static MaybeObject* ExternalArrayIntSetter(Heap* heap,
ExternalArrayClass* receiver,
@@ -14657,40 +14665,98 @@ static MaybeObject* ExternalArrayIntSetter(Heap* heap,
}
-MaybeObject* ExternalByteArray::SetValue(uint32_t index, Object* value) {
- return ExternalArrayIntSetter<ExternalByteArray, int8_t>
+Handle<Object> ExternalInt8Array::SetValue(Handle<ExternalInt8Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
+MaybeObject* ExternalInt8Array::SetValue(uint32_t index, Object* value) {
+ return ExternalArrayIntSetter<ExternalInt8Array, int8_t>
(GetHeap(), this, index, value);
}
-MaybeObject* ExternalUnsignedByteArray::SetValue(uint32_t index,
+Handle<Object> ExternalUint8Array::SetValue(
+ Handle<ExternalUint8Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
+MaybeObject* ExternalUint8Array::SetValue(uint32_t index,
Object* value) {
- return ExternalArrayIntSetter<ExternalUnsignedByteArray, uint8_t>
+ return ExternalArrayIntSetter<ExternalUint8Array, uint8_t>
(GetHeap(), this, index, value);
}
-MaybeObject* ExternalShortArray::SetValue(uint32_t index,
+Handle<Object> ExternalInt16Array::SetValue(
+ Handle<ExternalInt16Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
+MaybeObject* ExternalInt16Array::SetValue(uint32_t index,
Object* value) {
- return ExternalArrayIntSetter<ExternalShortArray, int16_t>
+ return ExternalArrayIntSetter<ExternalInt16Array, int16_t>
(GetHeap(), this, index, value);
}
-MaybeObject* ExternalUnsignedShortArray::SetValue(uint32_t index,
+Handle<Object> ExternalUint16Array::SetValue(
+ Handle<ExternalUint16Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
+MaybeObject* ExternalUint16Array::SetValue(uint32_t index,
Object* value) {
- return ExternalArrayIntSetter<ExternalUnsignedShortArray, uint16_t>
+ return ExternalArrayIntSetter<ExternalUint16Array, uint16_t>
(GetHeap(), this, index, value);
}
-MaybeObject* ExternalIntArray::SetValue(uint32_t index, Object* value) {
- return ExternalArrayIntSetter<ExternalIntArray, int32_t>
+Handle<Object> ExternalInt32Array::SetValue(Handle<ExternalInt32Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
+MaybeObject* ExternalInt32Array::SetValue(uint32_t index, Object* value) {
+ return ExternalArrayIntSetter<ExternalInt32Array, int32_t>
(GetHeap(), this, index, value);
}
-MaybeObject* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
+Handle<Object> ExternalUint32Array::SetValue(
+ Handle<ExternalUint32Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
+MaybeObject* ExternalUint32Array::SetValue(uint32_t index, Object* value) {
uint32_t cast_value = 0;
Heap* heap = GetHeap();
if (index < static_cast<uint32_t>(length())) {
@@ -14711,7 +14777,17 @@ MaybeObject* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
}
-MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
+Handle<Object> ExternalFloat32Array::SetValue(
+ Handle<ExternalFloat32Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
+MaybeObject* ExternalFloat32Array::SetValue(uint32_t index, Object* value) {
float cast_value = static_cast<float>(OS::nan_value());
Heap* heap = GetHeap();
if (index < static_cast<uint32_t>(length())) {
@@ -14732,7 +14808,17 @@ MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
}
-MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) {
+Handle<Object> ExternalFloat64Array::SetValue(
+ Handle<ExternalFloat64Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
+MaybeObject* ExternalFloat64Array::SetValue(uint32_t index, Object* value) {
double double_value = OS::nan_value();
Heap* heap = GetHeap();
if (index < static_cast<uint32_t>(length())) {
@@ -14880,37 +14966,6 @@ bool StringTable::LookupTwoCharsStringIfExists(uint16_t c1,
}
-MaybeObject* StringTable::LookupUtf8String(Vector<const char> str,
- Object** s) {
- Utf8StringKey key(str, GetHeap()->HashSeed());
- return LookupKey(&key, s);
-}
-
-
-MaybeObject* StringTable::LookupOneByteString(Vector<const uint8_t> str,
- Object** s) {
- OneByteStringKey key(str, GetHeap()->HashSeed());
- return LookupKey(&key, s);
-}
-
-
-MaybeObject* StringTable::LookupSubStringOneByteString(
- Handle<SeqOneByteString> str,
- int from,
- int length,
- Object** s) {
- SubStringOneByteStringKey key(str, from, length);
- return LookupKey(&key, s);
-}
-
-
-MaybeObject* StringTable::LookupTwoByteString(Vector<const uc16> str,
- Object** s) {
- TwoByteStringKey key(str, GetHeap()->HashSeed());
- return LookupKey(&key, s);
-}
-
-
MaybeObject* StringTable::LookupKey(HashTableKey* key, Object** s) {
int entry = FindEntry(key);
@@ -15148,7 +15203,7 @@ MaybeObject* Dictionary<Shape, Key>::Allocate(Heap* heap,
HashTable<Shape, Key>::Allocate(
heap,
at_least_space_for,
- HashTable<Shape, Key>::USE_DEFAULT_MINIMUM_CAPACITY,
+ USE_DEFAULT_MINIMUM_CAPACITY,
pretenure);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -15353,6 +15408,15 @@ void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key) {
}
}
+Handle<SeededNumberDictionary> SeededNumberDictionary::AddNumberEntry(
+ Handle<SeededNumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
+ dictionary->AddNumberEntry(key, *value, details),
+ SeededNumberDictionary);
+}
MaybeObject* SeededNumberDictionary::AddNumberEntry(uint32_t key,
Object* value,
@@ -15443,7 +15507,7 @@ int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
for (int i = 0; i < capacity; i++) {
Object* k = HashTable<Shape, Key>::KeyAt(i);
if (HashTable<Shape, Key>::IsKey(k) &&
- ((filter & SYMBOLIC) == 0 || !k->IsSymbol())) {
+ !FilterKey(k, filter)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
@@ -15576,7 +15640,7 @@ MaybeObject* NameDictionary::TransformPropertiesToFastFor(
// Make sure we preserve dictionary representation if there are too many
// descriptors.
int number_of_elements = NumberOfElements();
- if (number_of_elements > DescriptorArray::kMaxNumberOfDescriptors) return obj;
+ if (number_of_elements > kMaxNumberOfDescriptors) return obj;
if (number_of_elements != NextEnumerationIndex()) {
MaybeObject* maybe_result = GenerateNewEnumerationIndices();
@@ -15714,61 +15778,99 @@ MaybeObject* NameDictionary::TransformPropertiesToFastFor(
}
+Handle<ObjectHashSet> ObjectHashSet::EnsureCapacity(
+ Handle<ObjectHashSet> table,
+ int n,
+ Handle<Object> key,
+ PretenureFlag pretenure) {
+ Handle<HashTable<ObjectHashTableShape<1>, Object*> > table_base = table;
+ CALL_HEAP_FUNCTION(table_base->GetIsolate(),
+ table_base->EnsureCapacity(n, *key, pretenure),
+ ObjectHashSet);
+}
+
+
+Handle<ObjectHashSet> ObjectHashSet::Shrink(Handle<ObjectHashSet> table,
+ Handle<Object> key) {
+ Handle<HashTable<ObjectHashTableShape<1>, Object*> > table_base = table;
+ CALL_HEAP_FUNCTION(table_base->GetIsolate(),
+ table_base->Shrink(*key),
+ ObjectHashSet);
+}
+
+
bool ObjectHashSet::Contains(Object* key) {
ASSERT(IsKey(key));
// If the object does not have an identity hash, it was never used as a key.
- { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
- if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return false;
- }
+ Object* hash = key->GetHash();
+ if (hash->IsUndefined()) return false;
+
return (FindEntry(key) != kNotFound);
}
-MaybeObject* ObjectHashSet::Add(Object* key) {
- ASSERT(IsKey(key));
+Handle<ObjectHashSet> ObjectHashSet::Add(Handle<ObjectHashSet> table,
+ Handle<Object> key) {
+ ASSERT(table->IsKey(*key));
// Make sure the key object has an identity hash code.
- int hash;
- { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
- if (maybe_hash->IsFailure()) return maybe_hash;
- ASSERT(key->GetHash(OMIT_CREATION) == maybe_hash);
- hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
- }
- int entry = FindEntry(key);
+ Handle<Object> object_hash = Object::GetOrCreateHash(key,
+ table->GetIsolate());
+
+ int entry = table->FindEntry(*key);
// Check whether key is already present.
- if (entry != kNotFound) return this;
+ if (entry != kNotFound) return table;
// Check whether the hash set should be extended and add entry.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- ObjectHashSet* table = ObjectHashSet::cast(obj);
- entry = table->FindInsertionEntry(hash);
- table->set(EntryToIndex(entry), key);
- table->ElementAdded();
- return table;
+ Handle<ObjectHashSet> new_table =
+ ObjectHashSet::EnsureCapacity(table, 1, key);
+ entry = new_table->FindInsertionEntry(Smi::cast(*object_hash)->value());
+ new_table->set(EntryToIndex(entry), *key);
+ new_table->ElementAdded();
+ return new_table;
}
-MaybeObject* ObjectHashSet::Remove(Object* key) {
- ASSERT(IsKey(key));
+Handle<ObjectHashSet> ObjectHashSet::Remove(Handle<ObjectHashSet> table,
+ Handle<Object> key) {
+ ASSERT(table->IsKey(*key));
// If the object does not have an identity hash, it was never used as a key.
- { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
- if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return this;
- }
- int entry = FindEntry(key);
+ if (key->GetHash()->IsUndefined()) return table;
+
+ int entry = table->FindEntry(*key);
// Check whether key is actually present.
- if (entry == kNotFound) return this;
+ if (entry == kNotFound) return table;
// Remove entry and try to shrink this hash set.
- set_the_hole(EntryToIndex(entry));
- ElementRemoved();
- return Shrink(key);
+ table->set_the_hole(EntryToIndex(entry));
+ table->ElementRemoved();
+
+ return ObjectHashSet::Shrink(table, key);
+}
+
+
+Handle<ObjectHashTable> ObjectHashTable::EnsureCapacity(
+ Handle<ObjectHashTable> table,
+ int n,
+ Handle<Object> key,
+ PretenureFlag pretenure) {
+ Handle<HashTable<ObjectHashTableShape<2>, Object*> > table_base = table;
+ CALL_HEAP_FUNCTION(table_base->GetIsolate(),
+ table_base->EnsureCapacity(n, *key, pretenure),
+ ObjectHashTable);
+}
+
+
+Handle<ObjectHashTable> ObjectHashTable::Shrink(
+ Handle<ObjectHashTable> table, Handle<Object> key) {
+ Handle<HashTable<ObjectHashTableShape<2>, Object*> > table_base = table;
+ CALL_HEAP_FUNCTION(table_base->GetIsolate(),
+ table_base->Shrink(*key),
+ ObjectHashTable);
}
@@ -15776,10 +15878,9 @@ Object* ObjectHashTable::Lookup(Object* key) {
ASSERT(IsKey(key));
// If the object does not have an identity hash, it was never used as a key.
- { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
- if (maybe_hash->ToObjectUnchecked()->IsUndefined()) {
- return GetHeap()->the_hole_value();
- }
+ Object* hash = key->GetHash();
+ if (hash->IsUndefined()) {
+ return GetHeap()->the_hole_value();
}
int entry = FindEntry(key);
if (entry == kNotFound) return GetHeap()->the_hole_value();
@@ -15787,38 +15888,36 @@ Object* ObjectHashTable::Lookup(Object* key) {
}
-MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
- ASSERT(IsKey(key));
+Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
+ Handle<Object> key,
+ Handle<Object> value) {
+ ASSERT(table->IsKey(*key));
+
+ Isolate* isolate = table->GetIsolate();
// Make sure the key object has an identity hash code.
- int hash;
- { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
- if (maybe_hash->IsFailure()) return maybe_hash;
- ASSERT(key->GetHash(OMIT_CREATION) == maybe_hash);
- hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
- }
- int entry = FindEntry(key);
+ Handle<Object> hash = Object::GetOrCreateHash(key, isolate);
+
+ int entry = table->FindEntry(*key);
// Check whether to perform removal operation.
if (value->IsTheHole()) {
- if (entry == kNotFound) return this;
- RemoveEntry(entry);
- return Shrink(key);
+ if (entry == kNotFound) return table;
+ table->RemoveEntry(entry);
+ return Shrink(table, key);
}
// Key is already in table, just overwrite value.
if (entry != kNotFound) {
- set(EntryToIndex(entry) + 1, value);
- return this;
+ table->set(EntryToIndex(entry) + 1, *value);
+ return table;
}
// Check whether the hash table should be extended.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- ObjectHashTable* table = ObjectHashTable::cast(obj);
- table->AddEntry(table->FindInsertionEntry(hash), key, value);
+ table = EnsureCapacity(table, 1, key);
+ table->AddEntry(table->FindInsertionEntry(Handle<Smi>::cast(hash)->value()),
+ *key,
+ *value);
return table;
}
@@ -16336,26 +16435,25 @@ void JSTypedArray::Neuter() {
}
-Type* PropertyCell::type() {
- return static_cast<Type*>(type_raw());
+HeapType* PropertyCell::type() {
+ return static_cast<HeapType*>(type_raw());
}
-void PropertyCell::set_type(Type* type, WriteBarrierMode ignored) {
+void PropertyCell::set_type(HeapType* type, WriteBarrierMode ignored) {
ASSERT(IsPropertyCell());
set_type_raw(type, ignored);
}
-Handle<Type> PropertyCell::UpdatedType(Handle<PropertyCell> cell,
- Handle<Object> value) {
+Handle<HeapType> PropertyCell::UpdatedType(Handle<PropertyCell> cell,
+ Handle<Object> value) {
Isolate* isolate = cell->GetIsolate();
- Handle<Type> old_type(cell->type(), isolate);
+ Handle<HeapType> old_type(cell->type(), isolate);
// TODO(2803): Do not track ConsString as constant because they cannot be
// embedded into code.
- Handle<Type> new_type(value->IsConsString() || value->IsTheHole()
- ? Type::Any()
- : Type::Constant(value, isolate), isolate);
+ Handle<HeapType> new_type = value->IsConsString() || value->IsTheHole()
+ ? HeapType::Any(isolate) : HeapType::Constant(value, isolate);
if (new_type->Is(old_type)) {
return old_type;
@@ -16364,19 +16462,19 @@ Handle<Type> PropertyCell::UpdatedType(Handle<PropertyCell> cell,
cell->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
- if (old_type->Is(Type::None()) || old_type->Is(Type::Undefined())) {
+ if (old_type->Is(HeapType::None()) || old_type->Is(HeapType::Undefined())) {
return new_type;
}
- return handle(Type::Any(), isolate);
+ return HeapType::Any(isolate);
}
void PropertyCell::SetValueInferType(Handle<PropertyCell> cell,
Handle<Object> value) {
cell->set_value(*value);
- if (!Type::Any()->Is(cell->type())) {
- Handle<Type> new_type = UpdatedType(cell, value);
+ if (!HeapType::Any()->Is(cell->type())) {
+ Handle<HeapType> new_type = UpdatedType(cell, value);
cell->set_type(*new_type);
}
}
@@ -16393,14 +16491,6 @@ void PropertyCell::AddDependentCompilationInfo(CompilationInfo* info) {
}
-void PropertyCell::AddDependentCode(Handle<Code> code) {
- Handle<DependentCode> codes = DependentCode::Insert(
- Handle<DependentCode>(dependent_code()),
- DependentCode::kPropertyCellChangedGroup, code);
- if (*codes != dependent_code()) set_dependent_code(*codes);
-}
-
-
const char* GetBailoutReason(BailoutReason reason) {
ASSERT(reason < kLastErrorMessage);
#define ERROR_MESSAGES_TEXTS(C, T) T,
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index c4ff63544a..05ab695de8 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -37,7 +37,9 @@
#include "property-details.h"
#include "smart-pointers.h"
#include "unicode-inl.h"
-#if V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_A64
+#include "a64/constants-a64.h"
+#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
@@ -96,14 +98,14 @@
// - TransitionArray
// - FixedDoubleArray
// - ExternalArray
-// - ExternalPixelArray
-// - ExternalByteArray
-// - ExternalUnsignedByteArray
-// - ExternalShortArray
-// - ExternalUnsignedShortArray
-// - ExternalIntArray
-// - ExternalUnsignedIntArray
-// - ExternalFloatArray
+// - ExternalUint8ClampedArray
+// - ExternalInt8Array
+// - ExternalUint8Array
+// - ExternalInt16Array
+// - ExternalUint16Array
+// - ExternalInt32Array
+// - ExternalUint32Array
+// - ExternalFloat32Array
// - Name
// - String
// - SeqString
@@ -179,6 +181,12 @@ enum KeyedAccessStoreMode {
};
+enum ContextualMode {
+ NOT_CONTEXTUAL,
+ CONTEXTUAL
+};
+
+
static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
STANDARD_STORE;
STATIC_ASSERT(STANDARD_STORE == 0);
@@ -255,13 +263,6 @@ enum NormalizedMapSharingMode {
};
-// Indicates whether a get method should implicitly create the object looked up.
-enum CreationFlag {
- ALLOW_CREATION,
- OMIT_CREATION
-};
-
-
// Indicates whether transitions can be added to a source map or not.
enum TransitionFlag {
INSERT_TRANSITION,
@@ -304,10 +305,15 @@ enum MarkingParity {
EVEN_MARKING_PARITY
};
+// ICs store extra state in a Code object. The default extra state is
+// kNoExtraICState.
+typedef int ExtraICState;
+static const ExtraICState kNoExtraICState = 0;
+
// Instance size sentinel for objects of variable size.
const int kVariableSizeSentinel = 0;
-const int kStubMajorKeyBits = 6;
+const int kStubMajorKeyBits = 7;
const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// All Maps have a field instance_type containing a InstanceType.
@@ -359,6 +365,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
\
V(SYMBOL_TYPE) \
+ \
V(MAP_TYPE) \
V(CODE_TYPE) \
V(ODDBALL_TYPE) \
@@ -372,15 +379,26 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
/* Note: the order of these external array */ \
/* types is relied upon in */ \
/* Object::IsExternalArray(). */ \
- V(EXTERNAL_BYTE_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE) \
- V(EXTERNAL_SHORT_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE) \
- V(EXTERNAL_INT_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE) \
- V(EXTERNAL_FLOAT_ARRAY_TYPE) \
- V(EXTERNAL_DOUBLE_ARRAY_TYPE) \
- V(EXTERNAL_PIXEL_ARRAY_TYPE) \
+ V(EXTERNAL_INT8_ARRAY_TYPE) \
+ V(EXTERNAL_UINT8_ARRAY_TYPE) \
+ V(EXTERNAL_INT16_ARRAY_TYPE) \
+ V(EXTERNAL_UINT16_ARRAY_TYPE) \
+ V(EXTERNAL_INT32_ARRAY_TYPE) \
+ V(EXTERNAL_UINT32_ARRAY_TYPE) \
+ V(EXTERNAL_FLOAT32_ARRAY_TYPE) \
+ V(EXTERNAL_FLOAT64_ARRAY_TYPE) \
+ V(EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE) \
+ \
+ V(FIXED_INT8_ARRAY_TYPE) \
+ V(FIXED_UINT8_ARRAY_TYPE) \
+ V(FIXED_INT16_ARRAY_TYPE) \
+ V(FIXED_UINT16_ARRAY_TYPE) \
+ V(FIXED_INT32_ARRAY_TYPE) \
+ V(FIXED_UINT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT64_ARRAY_TYPE) \
+ V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
+ \
V(FILLER_TYPE) \
\
V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE) \
@@ -473,7 +491,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
ExternalAsciiString) \
V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \
ExternalTwoByteString::kSize, \
- external_string_with_one_bytei_data, \
+ external_string_with_one_byte_data, \
ExternalStringWithOneByteData) \
V(SHORT_EXTERNAL_STRING_TYPE, \
ExternalTwoByteString::kShortSize, \
@@ -691,7 +709,7 @@ enum InstanceType {
| kNotInternalizedTag,
// Non-string names
- SYMBOL_TYPE = kNotStringTag, // LAST_NAME_TYPE, FIRST_NONSTRING_TYPE
+ SYMBOL_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
// Objects allocated in their own spaces (never in new space).
MAP_TYPE,
@@ -706,17 +724,28 @@ enum InstanceType {
FOREIGN_TYPE,
BYTE_ARRAY_TYPE,
FREE_SPACE_TYPE,
- EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
- EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
- EXTERNAL_SHORT_ARRAY_TYPE,
- EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
- EXTERNAL_INT_ARRAY_TYPE,
- EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
- EXTERNAL_FLOAT_ARRAY_TYPE,
- EXTERNAL_DOUBLE_ARRAY_TYPE,
- EXTERNAL_PIXEL_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
+
+ EXTERNAL_INT8_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
+ EXTERNAL_UINT8_ARRAY_TYPE,
+ EXTERNAL_INT16_ARRAY_TYPE,
+ EXTERNAL_UINT16_ARRAY_TYPE,
+ EXTERNAL_INT32_ARRAY_TYPE,
+ EXTERNAL_UINT32_ARRAY_TYPE,
+ EXTERNAL_FLOAT32_ARRAY_TYPE,
+ EXTERNAL_FLOAT64_ARRAY_TYPE,
+ EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
+
+ FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE
+ FIXED_UINT8_ARRAY_TYPE,
+ FIXED_INT16_ARRAY_TYPE,
+ FIXED_UINT16_ARRAY_TYPE,
+ FIXED_INT32_ARRAY_TYPE,
+ FIXED_UINT32_ARRAY_TYPE,
+ FIXED_FLOAT32_ARRAY_TYPE,
+ FIXED_FLOAT64_ARRAY_TYPE,
+ FIXED_UINT8_CLAMPED_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
+
FIXED_DOUBLE_ARRAY_TYPE,
- CONSTANT_POOL_ARRAY_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
@@ -747,6 +776,7 @@ enum InstanceType {
BREAK_POINT_INFO_TYPE,
FIXED_ARRAY_TYPE,
+ CONSTANT_POOL_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
JS_MESSAGE_OBJECT_TYPE,
@@ -790,8 +820,11 @@ enum InstanceType {
LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE,
FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
// Boundaries for testing for an external array.
- FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
- LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_PIXEL_ARRAY_TYPE,
+ FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_INT8_ARRAY_TYPE,
+ LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE,
+ // Boundaries for testing for a fixed typed array.
+ FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
+ LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_UINT8_CLAMPED_ARRAY_TYPE,
// Boundary for promotion to old data space/old pointer space.
LAST_DATA_TYPE = FILLER_TYPE,
// Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
@@ -867,22 +900,19 @@ enum CompareResult {
class AccessorPair;
class AllocationSite;
-class AllocationSiteContext;
+class AllocationSiteCreationContext;
+class AllocationSiteUsageContext;
class DictionaryElementsAccessor;
class ElementsAccessor;
class Failure;
class FixedArrayBase;
+class GlobalObject;
class ObjectVisitor;
class StringStream;
-class Type;
-
-struct ValueInfo : public Malloced {
- ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
- InstanceType type;
- Object* ptr;
- const char* str;
- double number;
-};
+// We cannot just say "class HeapType;" if it is created from a template... =8-?
+template<class> class TypeImpl;
+struct HeapTypeConfig;
+typedef TypeImpl<HeapTypeConfig> HeapType;
// A template-ized version of the IsXXX functions.
@@ -978,15 +1008,25 @@ class MaybeObject BASE_EMBEDDED {
V(Symbol) \
\
V(ExternalArray) \
- V(ExternalByteArray) \
- V(ExternalUnsignedByteArray) \
- V(ExternalShortArray) \
- V(ExternalUnsignedShortArray) \
- V(ExternalIntArray) \
- V(ExternalUnsignedIntArray) \
- V(ExternalFloatArray) \
- V(ExternalDoubleArray) \
- V(ExternalPixelArray) \
+ V(ExternalInt8Array) \
+ V(ExternalUint8Array) \
+ V(ExternalInt16Array) \
+ V(ExternalUint16Array) \
+ V(ExternalInt32Array) \
+ V(ExternalUint32Array) \
+ V(ExternalFloat32Array) \
+ V(ExternalFloat64Array) \
+ V(ExternalUint8ClampedArray) \
+ V(FixedTypedArrayBase) \
+ V(FixedUint8Array) \
+ V(FixedInt8Array) \
+ V(FixedUint16Array) \
+ V(FixedInt16Array) \
+ V(FixedUint32Array) \
+ V(FixedInt32Array) \
+ V(FixedFloat32Array) \
+ V(FixedFloat64Array) \
+ V(FixedUint8ClampedArray) \
V(ByteArray) \
V(FreeSpace) \
V(JSReceiver) \
@@ -1000,7 +1040,6 @@ class MaybeObject BASE_EMBEDDED {
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
V(DependentCode) \
- V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
V(ConstantPoolArray) \
@@ -1057,110 +1096,126 @@ class MaybeObject BASE_EMBEDDED {
\
V(k32BitValueInRegisterIsNotZeroExtended, \
"32 bit value in register is not zero-extended") \
- V(kAlignmentMarkerExpected, "alignment marker expected") \
+ V(kAlignmentMarkerExpected, "Alignment marker expected") \
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
V(kArgumentsObjectValueInATestContext, \
- "arguments object value in a test context") \
- V(kArrayBoilerplateCreationFailed, "array boilerplate creation failed") \
- V(kArrayIndexConstantValueTooBig, "array index constant value too big") \
- V(kAssignmentToArguments, "assignment to arguments") \
+ "Arguments object value in a test context") \
+ V(kArrayBoilerplateCreationFailed, "Array boilerplate creation failed") \
+ V(kArrayIndexConstantValueTooBig, "Array index constant value too big") \
+ V(kAssignmentToArguments, "Assignment to arguments") \
V(kAssignmentToLetVariableBeforeInitialization, \
- "assignment to let variable before initialization") \
- V(kAssignmentToLOOKUPVariable, "assignment to LOOKUP variable") \
+ "Assignment to let variable before initialization") \
+ V(kAssignmentToLOOKUPVariable, "Assignment to LOOKUP variable") \
V(kAssignmentToParameterFunctionUsesArgumentsObject, \
- "assignment to parameter, function uses arguments object") \
+ "Assignment to parameter, function uses arguments object") \
V(kAssignmentToParameterInArgumentsObject, \
- "assignment to parameter in arguments object") \
+ "Assignment to parameter in arguments object") \
V(kAttemptToUseUndefinedCache, "Attempt to use undefined cache") \
V(kBadValueContextForArgumentsObjectValue, \
- "bad value context for arguments object value") \
+ "Bad value context for arguments object value") \
V(kBadValueContextForArgumentsValue, \
- "bad value context for arguments value") \
- V(kBailedOutDueToDependencyChange, "bailed out due to dependency change") \
- V(kBailoutWasNotPrepared, "bailout was not prepared") \
+ "Bad value context for arguments value") \
+ V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
+ V(kBailoutWasNotPrepared, "Bailout was not prepared") \
V(kBinaryStubGenerateFloatingPointCode, \
"BinaryStub_GenerateFloatingPointCode") \
V(kBothRegistersWereSmisInSelectNonSmi, \
"Both registers were smis in SelectNonSmi") \
V(kCallToAJavaScriptRuntimeFunction, \
- "call to a JavaScript runtime function") \
+ "Call to a JavaScript runtime function") \
V(kCannotTranslatePositionInChangedArea, \
"Cannot translate position in changed area") \
- V(kCodeGenerationFailed, "code generation failed") \
- V(kCodeObjectNotProperlyPatched, "code object not properly patched") \
- V(kCompoundAssignmentToLookupSlot, "compound assignment to lookup slot") \
- V(kContextAllocatedArguments, "context-allocated arguments") \
- V(kDebuggerIsActive, "debugger is active") \
+ V(kCodeGenerationFailed, "Code generation failed") \
+ V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
+ V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot") \
+ V(kContextAllocatedArguments, "Context-allocated arguments") \
+ V(kCopyBuffersOverlap, "Copy buffers overlap") \
+ V(kCouldNotGenerateZero, "Could not generate +0.0") \
+ V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \
+ V(kDebuggerIsActive, "Debugger is active") \
V(kDebuggerStatement, "DebuggerStatement") \
V(kDeclarationInCatchContext, "Declaration in catch context") \
V(kDeclarationInWithContext, "Declaration in with context") \
V(kDefaultNaNModeNotSet, "Default NaN mode not set") \
- V(kDeleteWithGlobalVariable, "delete with global variable") \
- V(kDeleteWithNonGlobalVariable, "delete with non-global variable") \
+ V(kDeleteWithGlobalVariable, "Delete with global variable") \
+ V(kDeleteWithNonGlobalVariable, "Delete with non-global variable") \
V(kDestinationOfCopyNotAligned, "Destination of copy not aligned") \
V(kDontDeleteCellsCannotContainTheHole, \
"DontDelete cells can't contain the hole") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
+ V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \
V(kEval, "eval") \
V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
- V(kExpectedAlignmentMarker, "expected alignment marker") \
+ V(kExpectedAlignmentMarker, "Expected alignment marker") \
+ V(kExpectedAllocationSite, "Expected allocation site") \
+ V(kExpectedFunctionObject, "Expected function object in register") \
+ V(kExpectedHeapNumber, "Expected HeapNumber") \
+ V(kExpectedNativeContext, "Expected native context") \
+ V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
+ V(kExpectedNonNullContext, "Expected non-null context") \
+ V(kExpectedPositiveZero, "Expected +0.0") \
V(kExpectedAllocationSiteInCell, \
"Expected AllocationSite in property cell") \
- V(kExpectedPropertyCellInRegisterA2, \
- "Expected property cell in register a2") \
- V(kExpectedPropertyCellInRegisterEbx, \
- "Expected property cell in register ebx") \
- V(kExpectedPropertyCellInRegisterRbx, \
- "Expected property cell in register rbx") \
+ V(kExpectedFixedArrayInFeedbackVector, \
+ "Expected fixed array in feedback vector") \
+ V(kExpectedFixedArrayInRegisterA2, \
+ "Expected fixed array in register a2") \
+ V(kExpectedFixedArrayInRegisterEbx, \
+ "Expected fixed array in register ebx") \
+ V(kExpectedFixedArrayInRegisterR2, \
+ "Expected fixed array in register r2") \
+ V(kExpectedFixedArrayInRegisterRbx, \
+ "Expected fixed array in register rbx") \
+ V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber") \
V(kExpectingAlignmentForCopyBytes, \
"Expecting alignment for CopyBytes") \
V(kExportDeclaration, "Export declaration") \
V(kExternalStringExpectedButNotFound, \
- "external string expected, but not found") \
- V(kFailedBailedOutLastTime, "failed/bailed out last time") \
+ "External string expected, but not found") \
+ V(kFailedBailedOutLastTime, "Failed/bailed out last time") \
V(kForInStatementIsNotFastCase, "ForInStatement is not fast case") \
V(kForInStatementOptimizationIsDisabled, \
"ForInStatement optimization is disabled") \
V(kForInStatementWithNonLocalEachVariable, \
"ForInStatement with non-local each variable") \
V(kForOfStatement, "ForOfStatement") \
- V(kFrameIsExpectedToBeAligned, "frame is expected to be aligned") \
- V(kFunctionCallsEval, "function calls eval") \
- V(kFunctionIsAGenerator, "function is a generator") \
- V(kFunctionWithIllegalRedeclaration, "function with illegal redeclaration") \
+ V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned") \
+ V(kFunctionCallsEval, "Function calls eval") \
+ V(kFunctionIsAGenerator, "Function is a generator") \
+ V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration") \
V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
V(kGeneratorFailedToResume, "Generator failed to resume") \
- V(kGenerator, "generator") \
+ V(kGenerator, "Generator") \
V(kGlobalFunctionsMustHaveInitialMap, \
"Global functions must have initial map") \
V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
+ V(kHydrogenFilter, "Optimization disabled by filter") \
V(kImportDeclaration, "Import declaration") \
V(kImproperObjectOnPrototypeChainForStore, \
- "improper object on prototype chain for store") \
+ "Improper object on prototype chain for store") \
V(kIndexIsNegative, "Index is negative") \
V(kIndexIsTooLarge, "Index is too large") \
- V(kInlinedRuntimeFunctionClassOf, "inlined runtime function: ClassOf") \
+ V(kInlinedRuntimeFunctionClassOf, "Inlined runtime function: ClassOf") \
V(kInlinedRuntimeFunctionFastAsciiArrayJoin, \
- "inlined runtime function: FastAsciiArrayJoin") \
+ "Inlined runtime function: FastAsciiArrayJoin") \
V(kInlinedRuntimeFunctionGeneratorNext, \
- "inlined runtime function: GeneratorNext") \
+ "Inlined runtime function: GeneratorNext") \
V(kInlinedRuntimeFunctionGeneratorThrow, \
- "inlined runtime function: GeneratorThrow") \
+ "Inlined runtime function: GeneratorThrow") \
V(kInlinedRuntimeFunctionGetFromCache, \
- "inlined runtime function: GetFromCache") \
+ "Inlined runtime function: GetFromCache") \
V(kInlinedRuntimeFunctionIsNonNegativeSmi, \
- "inlined runtime function: IsNonNegativeSmi") \
- V(kInlinedRuntimeFunctionIsRegExpEquivalent, \
- "inlined runtime function: IsRegExpEquivalent") \
+ "Inlined runtime function: IsNonNegativeSmi") \
V(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf, \
- "inlined runtime function: IsStringWrapperSafeForDefaultValueOf") \
- V(kInliningBailedOut, "inlining bailed out") \
+ "Inlined runtime function: IsStringWrapperSafeForDefaultValueOf") \
+ V(kInliningBailedOut, "Inlining bailed out") \
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
- "input GPR is expected to have upper32 cleared") \
+ "Input GPR is expected to have upper32 cleared") \
+ V(kInputStringTooLong, "Input string too long") \
V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
"InstanceofStub unexpected call site cache (check)") \
V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
@@ -1174,10 +1229,11 @@ class MaybeObject BASE_EMBEDDED {
V(kInvalidCaptureReferenced, "Invalid capture referenced") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
+ V(kInvalidFullCodegenState, "invalid full-codegen state") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
- V(kInvalidLeftHandSideInAssignment, "invalid left-hand side in assignment") \
- V(kInvalidLhsInCompoundAssignment, "invalid lhs in compound assignment") \
- V(kInvalidLhsInCountOperation, "invalid lhs in count operation") \
+ V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
+ V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment") \
+ V(kInvalidLhsInCountOperation, "Invalid lhs in count operation") \
V(kInvalidMinLength, "Invalid min_length") \
V(kJSGlobalObjectNativeContextShouldBeANativeContext, \
"JSGlobalObject::native_context should be a native context") \
@@ -1186,14 +1242,17 @@ class MaybeObject BASE_EMBEDDED {
V(kJSObjectWithFastElementsMapHasSlowElements, \
"JSObject with fast elements map has slow elements") \
V(kLetBindingReInitialization, "Let binding re-initialization") \
+ V(kLhsHasBeenClobbered, "lhs has been clobbered") \
V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
+ V(kLiveEditFrameDroppingIsNotSupportedOnA64, \
+ "LiveEdit frame dropping is not supported on a64") \
V(kLiveEditFrameDroppingIsNotSupportedOnArm, \
"LiveEdit frame dropping is not supported on arm") \
V(kLiveEditFrameDroppingIsNotSupportedOnMips, \
"LiveEdit frame dropping is not supported on mips") \
V(kLiveEdit, "LiveEdit") \
V(kLookupVariableInCountOperation, \
- "lookup variable in count operation") \
+ "Lookup variable in count operation") \
V(kMapIsNoLongerInEax, "Map is no longer in eax") \
V(kModuleDeclaration, "Module declaration") \
V(kModuleLiteral, "Module literal") \
@@ -1202,25 +1261,27 @@ class MaybeObject BASE_EMBEDDED {
V(kModuleVariable, "Module variable") \
V(kModuleUrl, "Module url") \
V(kNativeFunctionLiteral, "Native function literal") \
- V(kNoCasesLeft, "no cases left") \
+ V(kNoCasesLeft, "No cases left") \
V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin, \
"No empty arrays here in EmitFastAsciiArrayJoin") \
V(kNonInitializerAssignmentToConst, \
- "non-initializer assignment to const") \
+ "Non-initializer assignment to const") \
V(kNonSmiIndex, "Non-smi index") \
V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal") \
V(kNonSmiValue, "Non-smi value") \
+ V(kNonObject, "Non-object value") \
V(kNotEnoughVirtualRegistersForValues, \
- "not enough virtual registers for values") \
+ "Not enough virtual registers for values") \
V(kNotEnoughSpillSlotsForOsr, \
- "not enough spill slots for OSR") \
+ "Not enough spill slots for OSR") \
V(kNotEnoughVirtualRegistersRegalloc, \
- "not enough virtual registers (regalloc)") \
- V(kObjectFoundInSmiOnlyArray, "object found in smi-only array") \
+ "Not enough virtual registers (regalloc)") \
+ V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
V(kObjectLiteralWithComplexProperty, \
"Object literal with complex property") \
V(kOddballInStringTableIsNotUndefinedOrTheHole, \
- "oddball in string table is not undefined or the hole") \
+ "Oddball in string table is not undefined or the hole") \
+ V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
@@ -1230,38 +1291,53 @@ class MaybeObject BASE_EMBEDDED {
V(kOperandIsNotAString, "Operand is not a string") \
V(kOperandIsNotSmi, "Operand is not smi") \
V(kOperandNotANumber, "Operand not a number") \
- V(kOptimizedTooManyTimes, "optimized too many times") \
+ V(kOptimizationDisabled, "Optimization is disabled") \
+ V(kOptimizedTooManyTimes, "Optimized too many times") \
V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
"Out of virtual registers while trying to allocate temp register") \
- V(kParseScopeError, "parse/scope error") \
- V(kPossibleDirectCallToEval, "possible direct call to eval") \
+ V(kParseScopeError, "Parse/scope error") \
+ V(kPossibleDirectCallToEval, "Possible direct call to eval") \
+ V(kPreconditionsWereNotMet, "Preconditions were not met") \
V(kPropertyAllocationCountFailed, "Property allocation count failed") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kReferenceToAVariableWhichRequiresDynamicLookup, \
- "reference to a variable which requires dynamic lookup") \
+ "Reference to a variable which requires dynamic lookup") \
V(kReferenceToGlobalLexicalVariable, \
- "reference to global lexical variable") \
- V(kReferenceToUninitializedVariable, "reference to uninitialized variable") \
+ "Reference to global lexical variable") \
+ V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
- V(kRegisterWasClobbered, "register was clobbered") \
+ V(kRegisterWasClobbered, "Register was clobbered") \
+ V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
+ V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
+ V(kRhsHasBeenClobbered, "Rhs has been clobbered") \
V(kScopedBlock, "ScopedBlock") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
- V(kStackFrameTypesMustMatch, "stack frame types must match") \
+ V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
+ V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kSwitchStatementMixedOrNonLiteralSwitchLabels, \
"SwitchStatement: mixed or non-literal switch labels") \
V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses") \
+ V(kTheCurrentStackPointerIsBelowCsp, \
+ "The current stack pointer is below csp") \
V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
V(kTheInstructionToPatchShouldBeALoadFromPc, \
"The instruction to patch should be a load from pc") \
+ V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
+ "The instruction to patch should be a ldr literal") \
V(kTheInstructionToPatchShouldBeALui, \
"The instruction to patch should be a lui") \
V(kTheInstructionToPatchShouldBeAnOri, \
"The instruction to patch should be an ori") \
- V(kTooManyParametersLocals, "too many parameters/locals") \
- V(kTooManyParameters, "too many parameters") \
+ V(kTheSourceAndDestinationAreTheSame, \
+ "The source and destination are the same") \
+ V(kTheStackWasCorruptedByMacroAssemblerCall, \
+ "The stack was corrupted by MacroAssembler::Call()") \
+ V(kTooManyParametersLocals, "Too many parameters/locals") \
+ V(kTooManyParameters, "Too many parameters") \
V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR") \
+ V(kToOperand32UnsupportedImmediate, "ToOperand32 unsupported immediate.") \
V(kToOperandIsDoubleRegisterUnimplemented, \
"ToOperand IsDoubleRegister unimplemented") \
V(kToOperandUnsupportedDoubleImmediate, \
@@ -1270,10 +1346,12 @@ class MaybeObject BASE_EMBEDDED {
V(kTryFinallyStatement, "TryFinallyStatement") \
V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
+ V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
V(kUndefinedValueNotLoaded, "Undefined value not loaded") \
V(kUndoAllocationOfNonAllocatedMemory, \
"Undo allocation of non allocated memory") \
V(kUnexpectedAllocationTop, "Unexpected allocation top") \
+ V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
@@ -1300,34 +1378,38 @@ class MaybeObject BASE_EMBEDDED {
"Unexpected initial map for InternalArray function") \
V(kUnexpectedLevelAfterReturnFromApiCall, \
"Unexpected level after return from api call") \
+ V(kUnexpectedNegativeValue, "Unexpected negative value") \
V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
"Unexpected number of pre-allocated property fields") \
+ V(kUnexpectedSmi, "Unexpected smi value") \
V(kUnexpectedStringFunction, "Unexpected String function") \
V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedStringWrapperInstanceSize, \
"Unexpected string wrapper instance size") \
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
"Unexpected type for RegExp data, FixedArray expected") \
+ V(kUnexpectedValue, "Unexpected value") \
V(kUnexpectedUnusedPropertiesOfStringWrapper, \
"Unexpected unused properties of string wrapper") \
+ V(kUnimplemented, "unimplemented") \
V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
- V(kUnknown, "unknown") \
+ V(kUnknown, "Unknown") \
V(kUnsupportedConstCompoundAssignment, \
- "unsupported const compound assignment") \
+ "Unsupported const compound assignment") \
V(kUnsupportedCountOperationWithConst, \
- "unsupported count operation with const") \
- V(kUnsupportedDoubleImmediate, "unsupported double immediate") \
- V(kUnsupportedLetCompoundAssignment, "unsupported let compound assignment") \
+ "Unsupported count operation with const") \
+ V(kUnsupportedDoubleImmediate, "Unsupported double immediate") \
+ V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \
V(kUnsupportedLookupSlotInDeclaration, \
- "unsupported lookup slot in declaration") \
+ "Unsupported lookup slot in declaration") \
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
"Unsupported phi use of const variable") \
- V(kUnsupportedTaggedImmediate, "unsupported tagged immediate") \
+ V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
V(kVariableResolvedToWithContext, "Variable resolved to with context") \
V(kWeShouldNotHaveAnEmptyLexicalContext, \
- "we should not have an empty lexical context") \
+ "We should not have an empty lexical context") \
V(kWithStatement, "WithStatement") \
V(kWrongAddressOrValuePassedToRecordWrite, \
"Wrong address or value passed to RecordWrite") \
@@ -1365,10 +1447,6 @@ class Object : public MaybeObject {
inline bool IsExternal();
inline bool IsAccessorInfo();
- // Returns true if this object is an instance of the specified
- // function template.
- inline bool IsInstanceOf(FunctionTemplateInfo* type);
-
inline bool IsStruct();
#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) inline bool Is##Name();
STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
@@ -1376,6 +1454,7 @@ class Object : public MaybeObject {
INLINE(bool IsSpecObject());
INLINE(bool IsSpecFunction());
+ bool IsCallable();
// Oddball testing.
INLINE(bool IsUndefined());
@@ -1508,11 +1587,19 @@ class Object : public MaybeObject {
// Return the object's prototype (might be Heap::null_value()).
Object* GetPrototype(Isolate* isolate);
+ Map* GetMarkerMap(Isolate* isolate);
+
+ // Returns the permanent hash code associated with this object. May return
+ // undefined if not yet created.
+ Object* GetHash();
// Returns the permanent hash code associated with this object depending on
- // the actual object type. Might return a failure in case no hash was
- // created yet or GC was caused by creation.
- MUST_USE_RESULT MaybeObject* GetHash(CreationFlag flag);
+ // the actual object type. May create and store a hash code if needed and none
+ // exists.
+ // TODO(rafaelw): Remove isolate parameter when objects.cc is fully
+ // handlified.
+ static Handle<Object> GetOrCreateHash(Handle<Object> object,
+ Isolate* isolate);
// Checks whether this object has the same value as the given one. This
// function is implemented according to ES5, section 9.12 and can be used
@@ -2003,8 +2090,13 @@ class JSReceiver: public HeapObject {
inline Object* GetConstructor();
// Retrieves a permanent object identity hash code. The undefined value might
- // be returned in case no hash was created yet and OMIT_CREATION was used.
- inline MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+ // be returned in case no hash was created yet.
+ inline Object* GetIdentityHash();
+
+ // Retrieves a permanent object identity hash code. May create and store a
+ // hash code if needed and none exists.
+ inline static Handle<Object> GetOrCreateIdentityHash(
+ Handle<JSReceiver> object);
// Lookup a property. If found, the result is valid and has
// detailed information.
@@ -2036,6 +2128,9 @@ class JSReceiver: public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
};
+// Forward declaration for JSObject::GetOrCreateHiddenPropertiesHashTable.
+class ObjectHashTable;
+
// The JSObject describes real heap allocated JavaScript objects with
// properties.
// Note that the map of JSObject changes during execution to enable inline
@@ -2090,22 +2185,24 @@ class JSObject: public JSReceiver {
inline bool HasFastHoleyElements();
inline bool HasNonStrictArgumentsElements();
inline bool HasDictionaryElements();
- inline bool HasExternalPixelElements();
+
+ inline bool HasExternalUint8ClampedElements();
inline bool HasExternalArrayElements();
- inline bool HasExternalByteElements();
- inline bool HasExternalUnsignedByteElements();
- inline bool HasExternalShortElements();
- inline bool HasExternalUnsignedShortElements();
- inline bool HasExternalIntElements();
- inline bool HasExternalUnsignedIntElements();
- inline bool HasExternalFloatElements();
- inline bool HasExternalDoubleElements();
+ inline bool HasExternalInt8Elements();
+ inline bool HasExternalUint8Elements();
+ inline bool HasExternalInt16Elements();
+ inline bool HasExternalUint16Elements();
+ inline bool HasExternalInt32Elements();
+ inline bool HasExternalUint32Elements();
+ inline bool HasExternalFloat32Elements();
+ inline bool HasExternalFloat64Elements();
+
+ inline bool HasFixedTypedArrayElements();
+
bool HasFastArgumentsElements();
bool HasDictionaryArgumentsElements();
inline SeededNumberDictionary* element_dictionary(); // Gets slow elements.
- inline bool ShouldTrackAllocationInfo();
-
inline void set_map_and_elements(
Map* map,
FixedArrayBase* value,
@@ -2185,23 +2282,14 @@ class JSObject: public JSReceiver {
// or returns an empty handle if such a map is not yet available.
static Handle<Object> TryMigrateInstance(Handle<JSObject> instance);
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributesTrampoline(
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
-
// Retrieve a value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
- Object* GetNormalizedProperty(LookupResult* result);
+ Object* GetNormalizedProperty(const LookupResult* result);
// Sets the property value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
static void SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
+ const LookupResult* result,
Handle<Object> value);
// Sets the property value in a normalized object given (key, value, details).
@@ -2272,10 +2360,6 @@ class JSObject: public JSReceiver {
// been modified since it was created. May give false positives.
bool IsDirty();
- // If the receiver is a JSGlobalProxy this method will return its prototype,
- // otherwise the result is the receiver itself.
- inline Object* BypassGlobalProxy();
-
// Accessors for hidden properties object.
//
// Hidden properties are not local properties of the object itself.
@@ -2287,11 +2371,9 @@ class JSObject: public JSReceiver {
// Sets a hidden property on this object. Returns this object if successful,
// undefined if called on a detached proxy.
- static Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
+ static Handle<Object> SetHiddenProperty(Handle<JSObject> object,
Handle<Name> key,
Handle<Object> value);
- // Returns a failure if a GC is required.
- MUST_USE_RESULT MaybeObject* SetHiddenProperty(Name* key, Object* value);
// Gets the value of a hidden property with the given key. Returns the hole
// if the property doesn't exist (or if called on a detached proxy),
// otherwise returns the value set for the key.
@@ -2303,8 +2385,7 @@ class JSObject: public JSReceiver {
// Returns true if the object has a property with the hidden string as name.
bool HasHiddenProperties();
- static int GetIdentityHash(Handle<JSObject> object);
- static void SetIdentityHash(Handle<JSObject> object, Smi* hash);
+ static void SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash);
inline void ValidateElements();
@@ -2349,24 +2430,10 @@ class JSObject: public JSReceiver {
AccessorPair* GetLocalPropertyAccessorPair(Name* name);
AccessorPair* GetLocalElementAccessorPair(uint32_t index);
- MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype);
-
- MUST_USE_RESULT MaybeObject* SetDictionaryElement(
- uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode = SET_PROPERTY);
-
- MUST_USE_RESULT MaybeObject* SetFastDoubleElement(
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype = true);
+ static Handle<Object> SetFastElement(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode,
+ bool check_prototype);
static Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
@@ -2378,14 +2445,6 @@ class JSObject: public JSReceiver {
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode,
- SetPropertyMode set_mode = SET_PROPERTY);
-
- // A Failure object is returned if GC is needed.
- MUST_USE_RESULT MaybeObject* SetElement(
- uint32_t index,
- Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype = true,
@@ -2402,6 +2461,11 @@ class JSObject: public JSReceiver {
kDontAllowSmiElements
};
+ static Handle<FixedArray> SetFastElementsCapacityAndLength(
+ Handle<JSObject> object,
+ int capacity,
+ int length,
+ SetFastElementsCapacitySmiMode smi_mode);
// Replace the elements' backing store with fast elements of the given
// capacity. Update the length for JSArrays. Returns the new backing
// store.
@@ -2409,6 +2473,10 @@ class JSObject: public JSReceiver {
int capacity,
int length,
SetFastElementsCapacitySmiMode smi_mode);
+ static void SetFastDoubleElementsCapacityAndLength(
+ Handle<JSObject> object,
+ int capacity,
+ int length);
MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
int capacity,
int length);
@@ -2478,7 +2546,6 @@ class JSObject: public JSReceiver {
ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
- MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
// TODO(mstarzinger): Both public because of ConvertAnsSetLocalProperty().
static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map);
@@ -2548,13 +2615,17 @@ class JSObject: public JSReceiver {
static void SetObserved(Handle<JSObject> object);
// Copy object.
- static Handle<JSObject> Copy(Handle<JSObject> object,
- Handle<AllocationSite> site);
+ enum DeepCopyHints {
+ kNoHints = 0,
+ kObjectIsShallowArray = 1
+ };
+
static Handle<JSObject> Copy(Handle<JSObject> object);
static Handle<JSObject> DeepCopy(Handle<JSObject> object,
- AllocationSiteContext* site_context);
+ AllocationSiteUsageContext* site_context,
+ DeepCopyHints hints = kNoHints);
static Handle<JSObject> DeepWalk(Handle<JSObject> object,
- AllocationSiteContext* site_context);
+ AllocationSiteCreationContext* site_context);
// Casting.
static inline JSObject* cast(Object* obj);
@@ -2631,7 +2702,7 @@ class JSObject: public JSReceiver {
// don't want to be wasteful with long lived objects.
static const int kMaxUncheckedOldFastElementsLength = 500;
- // Note that Heap::MaxRegularSpaceAllocationSize() puts a limit on
+ // Note that Page::kMaxRegularHeapObjectSize puts a limit on
// permissible values (see the ASSERT in heap.cc).
static const int kInitialMaxFastElementArray = 100000;
@@ -2661,14 +2732,15 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> old_value);
- // Deliver change records to observers. May cause GC.
- static void DeliverChangeRecords(Isolate* isolate);
-
private:
friend class DictionaryElementsAccessor;
friend class JSReceiver;
friend class Object;
+ static void UpdateAllocationSite(Handle<JSObject> object,
+ ElementsKind to_kind);
+ MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
+
// Used from Object::GetProperty().
static Handle<Object> GetPropertyWithFailedAccessCheck(
Handle<JSObject> object,
@@ -2696,25 +2768,42 @@ class JSObject: public JSReceiver {
Handle<Object> value,
Handle<JSObject> holder,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
+ static Handle<Object> SetElementWithInterceptor(
+ Handle<JSObject> object,
uint32_t index,
- Object* value,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
- MUST_USE_RESULT MaybeObject* SetElementWithoutInterceptor(
+ static Handle<Object> SetElementWithoutInterceptor(
+ Handle<JSObject> object,
uint32_t index,
- Object* value,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
- MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
+ static Handle<Object> SetElementWithCallbackSetterInPrototypes(
+ Handle<JSObject> object,
uint32_t index,
- Object* value,
+ Handle<Object> value,
bool* found,
StrictModeFlag strict_mode);
+ static Handle<Object> SetDictionaryElement(
+ Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool check_prototype,
+ SetPropertyMode set_mode = SET_PROPERTY);
+ static Handle<Object> SetFastDoubleElement(
+ Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode,
+ bool check_prototype = true);
// Searches the prototype chain for property 'name'. If it is found and
// has a setter, invoke it and set '*done' to true. If it is found and is
@@ -2858,23 +2947,25 @@ class JSObject: public JSReceiver {
Handle<Object> accessor,
PropertyAttributes attributes);
- enum InitializeHiddenProperties {
- CREATE_NEW_IF_ABSENT,
- ONLY_RETURN_INLINE_VALUE
- };
- // If create_if_absent is true, return the hash table backing store
- // for hidden properties. If there is no backing store, allocate one.
- // If create_if_absent is false, return the hash table backing store
- // or the inline stored identity hash, whatever is found.
- MUST_USE_RESULT MaybeObject* GetHiddenPropertiesHashTable(
- InitializeHiddenProperties init_option);
+ // Return the hash table backing store or the inline stored identity hash,
+ // whatever is found.
+ MUST_USE_RESULT Object* GetHiddenPropertiesHashTable();
+
+ // Return the hash table backing store for hidden properties. If there is no
+ // backing store, allocate one.
+ static Handle<ObjectHashTable> GetOrCreateHiddenPropertiesHashtable(
+ Handle<JSObject> object);
+
// Set the hidden property backing store to either a hash table or
// the inline-stored identity hash.
- MUST_USE_RESULT MaybeObject* SetHiddenPropertiesHashTable(
- Object* value);
+ static Handle<Object> SetHiddenPropertiesHashTable(
+ Handle<JSObject> object,
+ Handle<Object> value);
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+ MUST_USE_RESULT Object* GetIdentityHash();
+
+ static Handle<Object> GetOrCreateIdentityHash(Handle<JSObject> object);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2926,6 +3017,9 @@ class FixedArray: public FixedArrayBase {
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
+ // Shrink length and insert filler objects.
+ void Shrink(int length);
+
// Copy operations.
MUST_USE_RESULT inline MaybeObject* Copy();
MUST_USE_RESULT MaybeObject* CopySize(int new_length,
@@ -2946,6 +3040,11 @@ class FixedArray: public FixedArrayBase {
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }
+ // Garbage collection support.
+ Object** RawFieldOfElementAt(int index) {
+ return HeapObject::RawField(this, OffsetOfElementAt(index));
+ }
+
// Casting.
static inline FixedArray* cast(Object* obj);
@@ -3371,10 +3470,6 @@ class DescriptorArray: public FixedArray {
bool IsEqualTo(DescriptorArray* other);
#endif
- // The maximum number of descriptors we want in a descriptor array (should
- // fit in a page).
- static const int kMaxNumberOfDescriptors = 1024 + 512;
-
// Returns the fixed array length required to hold number_of_descriptors
// descriptors.
static int LengthFor(int number_of_descriptors) {
@@ -3484,11 +3579,6 @@ class BaseShape {
template<typename Shape, typename Key>
class HashTable: public FixedArray {
public:
- enum MinimumCapacity {
- USE_DEFAULT_MINIMUM_CAPACITY,
- USE_CUSTOM_MINIMUM_CAPACITY
- };
-
// Wrapper methods
inline uint32_t Hash(Key key) {
if (Shape::UsesSeed) {
@@ -3599,6 +3689,9 @@ class HashTable: public FixedArray {
void Rehash(Key key);
protected:
+ friend class ObjectHashSet;
+ friend class ObjectHashTable;
+
// Find the entry at which to insert element with the given key that
// has the given hash value.
uint32_t FindInsertionEntry(uint32_t hash);
@@ -3714,21 +3807,8 @@ class StringTable: public HashTable<StringTableShape, HashTableKey*> {
// added. The return value is the string table which might have
// been enlarged. If the return value is not a failure, the string
// pointer *s is set to the string found.
- MUST_USE_RESULT MaybeObject* LookupUtf8String(
- Vector<const char> str,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupOneByteString(
- Vector<const uint8_t> str,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupSubStringOneByteString(
- Handle<SeqOneByteString> str,
- int from,
- int length,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupTwoByteString(
- Vector<const uc16> str,
- Object** s);
MUST_USE_RESULT MaybeObject* LookupString(String* key, Object** s);
+ MUST_USE_RESULT MaybeObject* LookupKey(HashTableKey* key, Object** s);
// Looks up a string that is equal to the given string and returns
// true if it is found, assigning the string to the given output
@@ -3740,8 +3820,6 @@ class StringTable: public HashTable<StringTableShape, HashTableKey*> {
static inline StringTable* cast(Object* obj);
private:
- MUST_USE_RESULT MaybeObject* LookupKey(HashTableKey* key, Object** s);
-
template <bool seq_ascii> friend class JsonParser;
DISALLOW_IMPLICIT_CONSTRUCTORS(StringTable);
@@ -3976,6 +4054,11 @@ class SeededNumberDictionary
// Type specific at put (default NONE attributes is used when adding).
MUST_USE_RESULT MaybeObject* AtNumberPut(uint32_t key, Object* value);
+ MUST_USE_RESULT static Handle<SeededNumberDictionary> AddNumberEntry(
+ Handle<SeededNumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value,
+ PropertyDetails details);
MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key,
Object* value,
PropertyDetails details);
@@ -4062,11 +4145,23 @@ class ObjectHashSet: public HashTable<ObjectHashTableShape<1>, Object*> {
// Looks up whether the given key is part of this hash set.
bool Contains(Object* key);
+ static Handle<ObjectHashSet> EnsureCapacity(
+ Handle<ObjectHashSet> table,
+ int n,
+ Handle<Object> key,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Attempt to shrink hash table after removal of key.
+ static Handle<ObjectHashSet> Shrink(Handle<ObjectHashSet> table,
+ Handle<Object> key);
+
// Adds the given key to this hash set.
- MUST_USE_RESULT MaybeObject* Add(Object* key);
+ static Handle<ObjectHashSet> Add(Handle<ObjectHashSet> table,
+ Handle<Object> key);
// Removes the given key from this hash set.
- MUST_USE_RESULT MaybeObject* Remove(Object* key);
+ static Handle<ObjectHashSet> Remove(Handle<ObjectHashSet> table,
+ Handle<Object> key);
};
@@ -4079,13 +4174,25 @@ class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
return reinterpret_cast<ObjectHashTable*>(obj);
}
+ static Handle<ObjectHashTable> EnsureCapacity(
+ Handle<ObjectHashTable> table,
+ int n,
+ Handle<Object> key,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Attempt to shrink hash table after removal of key.
+ static Handle<ObjectHashTable> Shrink(Handle<ObjectHashTable> table,
+ Handle<Object> key);
+
// Looks up the value associated with the given key. The hole value is
// returned in case the key is not present.
Object* Lookup(Object* key);
// Adds (or overwrites) the value associated with the given key. Mapping a
// key to the hole value causes removal of the whole entry.
- MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
+ static Handle<ObjectHashTable> Put(Handle<ObjectHashTable> table,
+ Handle<Object> key,
+ Handle<Object> value);
private:
friend class MarkCompactCollector;
@@ -4500,6 +4607,20 @@ class FreeSpace: public HeapObject {
};
+// V has parameters (Type, type, TYPE, C type, element_size)
+#define TYPED_ARRAYS(V) \
+ V(Uint8, uint8, UINT8, uint8_t, 1) \
+ V(Int8, int8, INT8, int8_t, 1) \
+ V(Uint16, uint16, UINT16, uint16_t, 2) \
+ V(Int16, int16, INT16, int16_t, 2) \
+ V(Uint32, uint32, UINT32, uint32_t, 4) \
+ V(Int32, int32, INT32, int32_t, 4) \
+ V(Float32, float32, FLOAT32, float, 4) \
+ V(Float64, float64, FLOAT64, double, 8) \
+ V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1)
+
+
+
// An ExternalArray represents a fixed-size array of primitive values
// which live outside the JavaScript heap. Its subclasses are used to
// implement the CanvasArray types being defined in the WebGL
@@ -4536,7 +4657,7 @@ class ExternalArray: public FixedArrayBase {
};
-// A ExternalPixelArray represents a fixed-size byte array with special
+// A ExternalUint8ClampedArray represents a fixed-size byte array with special
// semantics used for implementing the CanvasPixelArray object. Please see the
// specification at:
@@ -4544,9 +4665,9 @@ class ExternalArray: public FixedArrayBase {
// multipage/the-canvas-element.html#canvaspixelarray
// In particular, write access clamps the value written to 0 or 255 if the
// value written is outside this range.
-class ExternalPixelArray: public ExternalArray {
+class ExternalUint8ClampedArray: public ExternalArray {
public:
- inline uint8_t* external_pixel_pointer();
+ inline uint8_t* external_uint8_clamped_pointer();
// Setter and getter.
inline uint8_t get_scalar(int index);
@@ -4557,202 +4678,304 @@ class ExternalPixelArray: public ExternalArray {
// undefined and clamps the converted value between 0 and 255.
Object* SetValue(uint32_t index, Object* value);
+ static Handle<Object> SetValue(Handle<ExternalUint8ClampedArray> array,
+ uint32_t index,
+ Handle<Object> value);
+
// Casting.
- static inline ExternalPixelArray* cast(Object* obj);
+ static inline ExternalUint8ClampedArray* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalPixelArray)
- DECLARE_VERIFIER(ExternalPixelArray)
+ DECLARE_PRINTER(ExternalUint8ClampedArray)
+ DECLARE_VERIFIER(ExternalUint8ClampedArray)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalPixelArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint8ClampedArray);
};
-class ExternalByteArray: public ExternalArray {
+class ExternalInt8Array: public ExternalArray {
public:
// Setter and getter.
inline int8_t get_scalar(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, int8_t value);
+ static Handle<Object> SetValue(Handle<ExternalInt8Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
// Casting.
- static inline ExternalByteArray* cast(Object* obj);
+ static inline ExternalInt8Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalByteArray)
- DECLARE_VERIFIER(ExternalByteArray)
+ DECLARE_PRINTER(ExternalInt8Array)
+ DECLARE_VERIFIER(ExternalInt8Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalByteArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt8Array);
};
-class ExternalUnsignedByteArray: public ExternalArray {
+class ExternalUint8Array: public ExternalArray {
public:
// Setter and getter.
inline uint8_t get_scalar(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, uint8_t value);
+ static Handle<Object> SetValue(Handle<ExternalUint8Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
// Casting.
- static inline ExternalUnsignedByteArray* cast(Object* obj);
+ static inline ExternalUint8Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalUnsignedByteArray)
- DECLARE_VERIFIER(ExternalUnsignedByteArray)
+ DECLARE_PRINTER(ExternalUint8Array)
+ DECLARE_VERIFIER(ExternalUint8Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedByteArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint8Array);
};
-class ExternalShortArray: public ExternalArray {
+class ExternalInt16Array: public ExternalArray {
public:
// Setter and getter.
inline int16_t get_scalar(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, int16_t value);
+ static Handle<Object> SetValue(Handle<ExternalInt16Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
// Casting.
- static inline ExternalShortArray* cast(Object* obj);
+ static inline ExternalInt16Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalShortArray)
- DECLARE_VERIFIER(ExternalShortArray)
+ DECLARE_PRINTER(ExternalInt16Array)
+ DECLARE_VERIFIER(ExternalInt16Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalShortArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt16Array);
};
-class ExternalUnsignedShortArray: public ExternalArray {
+class ExternalUint16Array: public ExternalArray {
public:
// Setter and getter.
inline uint16_t get_scalar(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, uint16_t value);
+ static Handle<Object> SetValue(Handle<ExternalUint16Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
// Casting.
- static inline ExternalUnsignedShortArray* cast(Object* obj);
+ static inline ExternalUint16Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalUnsignedShortArray)
- DECLARE_VERIFIER(ExternalUnsignedShortArray)
+ DECLARE_PRINTER(ExternalUint16Array)
+ DECLARE_VERIFIER(ExternalUint16Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedShortArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint16Array);
};
-class ExternalIntArray: public ExternalArray {
+class ExternalInt32Array: public ExternalArray {
public:
// Setter and getter.
inline int32_t get_scalar(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, int32_t value);
+ static Handle<Object> SetValue(Handle<ExternalInt32Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
// Casting.
- static inline ExternalIntArray* cast(Object* obj);
+ static inline ExternalInt32Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalIntArray)
- DECLARE_VERIFIER(ExternalIntArray)
+ DECLARE_PRINTER(ExternalInt32Array)
+ DECLARE_VERIFIER(ExternalInt32Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalIntArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt32Array);
};
-class ExternalUnsignedIntArray: public ExternalArray {
+class ExternalUint32Array: public ExternalArray {
public:
// Setter and getter.
inline uint32_t get_scalar(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, uint32_t value);
+ static Handle<Object> SetValue(Handle<ExternalUint32Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
// Casting.
- static inline ExternalUnsignedIntArray* cast(Object* obj);
+ static inline ExternalUint32Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalUnsignedIntArray)
- DECLARE_VERIFIER(ExternalUnsignedIntArray)
+ DECLARE_PRINTER(ExternalUint32Array)
+ DECLARE_VERIFIER(ExternalUint32Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedIntArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint32Array);
};
-class ExternalFloatArray: public ExternalArray {
+class ExternalFloat32Array: public ExternalArray {
public:
// Setter and getter.
inline float get_scalar(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, float value);
+ static Handle<Object> SetValue(Handle<ExternalFloat32Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
// Casting.
- static inline ExternalFloatArray* cast(Object* obj);
+ static inline ExternalFloat32Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalFloatArray)
- DECLARE_VERIFIER(ExternalFloatArray)
+ DECLARE_PRINTER(ExternalFloat32Array)
+ DECLARE_VERIFIER(ExternalFloat32Array)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloatArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloat32Array);
};
-class ExternalDoubleArray: public ExternalArray {
+class ExternalFloat64Array: public ExternalArray {
public:
// Setter and getter.
inline double get_scalar(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, double value);
+ static Handle<Object> SetValue(Handle<ExternalFloat64Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
// Casting.
- static inline ExternalDoubleArray* cast(Object* obj);
+ static inline ExternalFloat64Array* cast(Object* obj);
// Dispatched behavior.
- DECLARE_PRINTER(ExternalDoubleArray)
- DECLARE_VERIFIER(ExternalDoubleArray)
+ DECLARE_PRINTER(ExternalFloat64Array)
+ DECLARE_VERIFIER(ExternalFloat64Array)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloat64Array);
+};
+
+
+class FixedTypedArrayBase: public FixedArrayBase {
+ public:
+ // Casting:
+ static inline FixedTypedArrayBase* cast(Object* obj);
+
+ static const int kDataOffset = kHeaderSize;
+
+ inline int size();
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalDoubleArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase);
};
+template <class Traits>
+class FixedTypedArray: public FixedTypedArrayBase {
+ public:
+ typedef typename Traits::ElementType ElementType;
+ static const InstanceType kInstanceType = Traits::kInstanceType;
+
+ // Casting:
+ static inline FixedTypedArray<Traits>* cast(Object* obj);
+
+ static inline int ElementOffset(int index) {
+ return kDataOffset + index * sizeof(ElementType);
+ }
+
+ static inline int SizeFor(int length) {
+ return ElementOffset(length);
+ }
+
+ inline ElementType get_scalar(int index);
+ MUST_USE_RESULT inline MaybeObject* get(int index);
+ inline void set(int index, ElementType value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber
+ // and undefined.
+ MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+
+ static Handle<Object> SetValue(Handle<FixedTypedArray<Traits> > array,
+ uint32_t index,
+ Handle<Object> value);
+
+ DECLARE_PRINTER(FixedTypedArray)
+ DECLARE_VERIFIER(FixedTypedArray)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArray);
+};
+
+#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType, size) \
+ class Type##ArrayTraits { \
+ public: \
+ typedef elementType ElementType; \
+ static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
+ static const char* Designator() { return #type " array"; } \
+ static inline MaybeObject* ToObject(Heap* heap, elementType scalar); \
+ static elementType defaultValue() { return 0; } \
+ }; \
+ \
+ typedef FixedTypedArray<Type##ArrayTraits> Fixed##Type##Array;
+
+TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
+
+#undef FIXED_TYPED_ARRAY_TRAITS
+
// DeoptimizationInputData is a fixed array used to hold the deoptimization
// data for code generated by the Hydrogen/Lithium compiler. It also
// contains information about functions that were inlined. If N different
@@ -4768,7 +4991,8 @@ class DeoptimizationInputData: public FixedArray {
static const int kLiteralArrayIndex = 2;
static const int kOsrAstIdIndex = 3;
static const int kOsrPcOffsetIndex = 4;
- static const int kFirstDeoptEntryIndex = 5;
+ static const int kOptimizationIdIndex = 5;
+ static const int kFirstDeoptEntryIndex = 6;
// Offsets of deopt entry elements relative to the start of the entry.
static const int kAstIdRawOffset = 0;
@@ -4791,6 +5015,7 @@ class DeoptimizationInputData: public FixedArray {
DEFINE_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
+ DEFINE_ELEMENT_ACCESSORS(OptimizationId, Smi)
#undef DEFINE_ELEMENT_ACCESSORS
@@ -4886,49 +5111,6 @@ class DeoptimizationOutputData: public FixedArray {
// Forward declaration.
class Cell;
class PropertyCell;
-
-// TypeFeedbackCells is a fixed array used to hold the association between
-// cache cells and AST ids for code generated by the full compiler.
-// The format of the these objects is
-// [i * 2]: Global property cell of ith cache cell.
-// [i * 2 + 1]: Ast ID for ith cache cell.
-class TypeFeedbackCells: public FixedArray {
- public:
- int CellCount() { return length() / 2; }
- static int LengthOfFixedArray(int cell_count) { return cell_count * 2; }
-
- // Accessors for AST ids associated with cache values.
- inline TypeFeedbackId AstId(int index);
- inline void SetAstId(int index, TypeFeedbackId id);
-
- // Accessors for global property cells holding the cache values.
- inline Cell* GetCell(int index);
- inline void SetCell(int index, Cell* cell);
-
- // The object that indicates an uninitialized cache.
- static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
-
- // The object that indicates a megamorphic state.
- static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
-
- // The object that indicates a monomorphic state of Array with
- // ElementsKind
- static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
- ElementsKind elements_kind);
-
- // A raw version of the uninitialized sentinel that's safe to read during
- // garbage collection (e.g., for patching the cache).
- static inline Object* RawUninitializedSentinel(Heap* heap);
-
- // Casting.
- static inline TypeFeedbackCells* cast(Object* obj);
-
- static const int kForInFastCaseMarker = 0;
- static const int kForInSlowCaseMarker = 1;
-};
-
-
-// Forward declaration.
class SafepointEntry;
class TypeFeedbackInfo;
@@ -4950,8 +5132,6 @@ class Code: public HeapObject {
#define IC_KIND_LIST(V) \
V(LOAD_IC) \
V(KEYED_LOAD_IC) \
- V(CALL_IC) \
- V(KEYED_CALL_IC) \
V(STORE_IC) \
V(KEYED_STORE_IC) \
V(BINARY_OP_IC) \
@@ -4979,18 +5159,9 @@ class Code: public HeapObject {
// Types of stubs.
enum StubType {
NORMAL,
- FIELD,
- CONSTANT,
- CALLBACKS,
- INTERCEPTOR,
- TRANSITION,
- NONEXISTENT
+ FAST
};
- typedef int ExtraICState;
-
- static const ExtraICState kNoExtraICState = 0;
-
static const int kPrologueOffsetNotSet = -1;
#ifdef ENABLE_DISASSEMBLER
@@ -5016,13 +5187,15 @@ class Code: public HeapObject {
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
- // [type_feedback_info]: This field stores various things, depending on the
- // kind of the code object.
+ // [raw_type_feedback_info]: This field stores various things, depending on
+ // the kind of the code object.
// FUNCTION => type feedback information.
// STUB => various things, e.g. a SMI
// OPTIMIZED_FUNCTION => the next_code_link for optimized code list.
- DECL_ACCESSORS(type_feedback_info, Object)
- inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value);
+ DECL_ACCESSORS(raw_type_feedback_info, Object)
+ inline Object* type_feedback_info();
+ inline void set_type_feedback_info(
+ Object* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline int stub_info();
inline void set_stub_info(int info);
@@ -5056,24 +5229,10 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
- inline Kind handler_kind() {
- return static_cast<Kind>(arguments_count());
- }
inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
- inline ExtraICState extended_extra_ic_state(); // Only valid for
- // non-call IC stubs.
- static bool needs_extended_extra_ic_state(Kind kind) {
- // TODO(danno): This is a bit of a hack right now since there are still
- // clients of this API that pass "extra" values in for argc. These clients
- // should be retrofitted to used ExtendedExtraICState.
- return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
- kind == BINARY_OP_IC;
- }
-
inline StubType type(); // Only valid for monomorphic IC stubs.
- inline int arguments_count(); // Only valid for call IC stubs.
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
@@ -5083,28 +5242,25 @@ class Code: public HeapObject {
inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
inline bool is_store_stub() { return kind() == STORE_IC; }
inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
- inline bool is_call_stub() { return kind() == CALL_IC; }
- inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; }
inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
inline bool is_keyed_stub();
+ inline void set_raw_kind_specific_flags1(int value);
+ inline void set_raw_kind_specific_flags2(int value);
+
// [major_key]: For kind STUB or BINARY_OP_IC, the major key.
inline int major_key();
inline void set_major_key(int value);
+ inline bool has_major_key();
// For kind STUB or ICs, tells whether or not a code object was generated by
// the optimizing compiler (but it may not be an optimized function).
bool is_crankshafted();
inline void set_is_crankshafted(bool value);
- // For stubs, tells whether they should always exist, so that they can be
- // called from other stubs.
- inline bool is_pregenerated();
- inline void set_is_pregenerated(bool value);
-
// [optimizable]: For FUNCTION kind, tells if it is optimizable.
inline bool optimizable();
inline void set_optimizable(bool value);
@@ -5154,11 +5310,6 @@ class Code: public HeapObject {
inline bool back_edges_patched_for_osr();
inline void set_back_edges_patched_for_osr(bool value);
- // [check type]: For kind CALL_IC, tells how to check if the
- // receiver is valid for the given call.
- inline CheckType check_type();
- inline void set_check_type(CheckType value);
-
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
@@ -5173,6 +5324,10 @@ class Code: public HeapObject {
inline bool marked_for_deoptimization();
inline void set_marked_for_deoptimization(bool flag);
+ // [constant_pool]: The constant pool for this function.
+ inline ConstantPoolArray* constant_pool();
+ inline void set_constant_pool(Object* constant_pool);
+
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
@@ -5180,9 +5335,13 @@ class Code: public HeapObject {
Object* FindNthObject(int n, Map* match_map);
void ReplaceNthObject(int n, Map* match_map, Object* replace_with);
+ // Find the first allocation site in an IC stub.
+ AllocationSite* FindFirstAllocationSite();
+
// Find the first map in an IC stub.
Map* FindFirstMap();
void FindAllMaps(MapHandleList* maps);
+ void FindAllTypes(TypeHandleList* types);
void ReplaceFirstMap(Map* replace);
// Find the first handler in an IC stub.
@@ -5197,25 +5356,14 @@ class Code: public HeapObject {
void ReplaceNthCell(int n, Cell* replace_with);
- class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
- class ExtraICStateKeyedAccessStoreMode:
- public BitField<KeyedAccessStoreMode, 1, 4> {}; // NOLINT
-
- static inline StrictModeFlag GetStrictMode(ExtraICState extra_ic_state) {
- return ExtraICStateStrictMode::decode(extra_ic_state);
- }
-
- static inline KeyedAccessStoreMode GetKeyedAccessStoreMode(
- ExtraICState extra_ic_state) {
- return ExtraICStateKeyedAccessStoreMode::decode(extra_ic_state);
- }
-
- static inline ExtraICState ComputeExtraICState(
- KeyedAccessStoreMode store_mode,
- StrictModeFlag strict_mode) {
- return ExtraICStateKeyedAccessStoreMode::encode(store_mode) |
- ExtraICStateStrictMode::encode(strict_mode);
- }
+ // The entire code object including its header is copied verbatim to the
+ // snapshot so that it can be written in one, fast, memcpy during
+ // deserialization. The deserializer will overwrite some pointers, rather
+ // like a runtime linker, but the random allocation addresses used in the
+ // mksnapshot process would still be present in the unlinked snapshot data,
+ // which would make snapshot production non-reproducible. This method wipes
+ // out the to-be-overwritten header data for reproducible snapshots.
+ inline void WipeOutHeader();
// Flags operations.
static inline Flags ComputeFlags(
@@ -5223,14 +5371,17 @@ class Code: public HeapObject {
InlineCacheState ic_state = UNINITIALIZED,
ExtraICState extra_ic_state = kNoExtraICState,
StubType type = NORMAL,
- int argc = -1,
InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeMonomorphicFlags(
Kind kind,
ExtraICState extra_ic_state = kNoExtraICState,
+ InlineCacheHolderFlag holder = OWN_MAP,
+ StubType type = NORMAL);
+
+ static inline Flags ComputeHandlerFlags(
+ Kind handler_kind,
StubType type = NORMAL,
- int argc = -1,
InlineCacheHolderFlag holder = OWN_MAP);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
@@ -5238,8 +5389,6 @@ class Code: public HeapObject {
static inline Kind ExtractKindFromFlags(Flags flags);
static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
- static inline ExtraICState ExtractExtendedExtraICStateFromFlags(Flags flags);
- static inline int ExtractArgumentsCountFromFlags(Flags flags);
static inline Flags RemoveTypeFromFlags(Flags flags);
@@ -5307,9 +5456,12 @@ class Code: public HeapObject {
DECLARE_VERIFIER(Code)
void ClearInlineCaches();
- void ClearTypeFeedbackCells(Heap* heap);
+ void ClearInlineCaches(Kind kind);
+
+ void ClearTypeFeedbackInfo(Heap* heap);
BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
+ uint32_t TranslateAstIdToPcOffset(BailoutId ast_id);
#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
enum Age {
@@ -5318,8 +5470,9 @@ class Code: public HeapObject {
kNoAgeCodeAge = 0,
CODE_AGE_LIST(DECLARE_CODE_AGE_ENUM)
kAfterLastCodeAge,
+ kFirstCodeAge = kNotExecutedCodeAge,
kLastCodeAge = kAfterLastCodeAge - 1,
- kCodeAgeCount = kAfterLastCodeAge - 1,
+ kCodeAgeCount = kAfterLastCodeAge - kNotExecutedCodeAge - 1,
kIsOldCodeAge = kSexagenarianCodeAge,
kPreAgedCodeAge = kIsOldCodeAge - 1
};
@@ -5335,11 +5488,14 @@ class Code: public HeapObject {
static bool IsYoungSequence(byte* sequence);
bool IsOld();
Age GetAge();
+ // Gets the raw code age, including psuedo code-age values such as
+ // kNotExecutedCodeAge and kExecutedOnceCodeAge.
+ Age GetRawAge();
static inline Code* GetPreAgedCodeAgeStub(Isolate* isolate) {
return GetCodeAgeStub(isolate, kNotExecutedCodeAge, NO_MARKING_PARITY);
}
- void PrintDeoptLocation(int bailout_id);
+ void PrintDeoptLocation(FILE* out, int bailout_id);
bool CanDeoptAt(Address pc);
#ifdef VERIFY_HEAP
@@ -5370,8 +5526,9 @@ class Code: public HeapObject {
kKindSpecificFlags1Offset + kIntSize;
// Note: We might be able to squeeze this into the flags above.
static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize;
+ static const int kConstantPoolOffset = kPrologueOffset + kPointerSize;
- static const int kHeaderPaddingStart = kPrologueOffset + kIntSize;
+ static const int kHeaderPaddingStart = kConstantPoolOffset + kIntSize;
// Add padding to align the instruction start following right after
// the Code object header.
@@ -5380,7 +5537,6 @@ class Code: public HeapObject {
// Byte offsets within kKindSpecificFlags1Offset.
static const int kOptimizableOffset = kKindSpecificFlags1Offset;
- static const int kCheckTypeOffset = kKindSpecificFlags1Offset;
static const int kFullCodeFlags = kOptimizableOffset + 1;
class FullCodeFlagsHasDeoptimizationSupportField:
@@ -5393,14 +5549,12 @@ class Code: public HeapObject {
// Flags layout. BitField<type, shift, size>.
class ICStateField: public BitField<InlineCacheState, 0, 3> {};
- class TypeField: public BitField<StubType, 3, 3> {};
- class CacheHolderField: public BitField<InlineCacheHolderFlag, 6, 1> {};
- class KindField: public BitField<Kind, 7, 4> {};
- class IsPregeneratedField: public BitField<bool, 11, 1> {};
- class ExtraICStateField: public BitField<ExtraICState, 12, 5> {};
- class ExtendedExtraICStateField: public BitField<ExtraICState, 12,
- PlatformSmiTagging::kSmiValueSize - 12 + 1> {}; // NOLINT
- STATIC_ASSERT(ExtraICStateField::kShift == ExtendedExtraICStateField::kShift);
+ class TypeField: public BitField<StubType, 3, 1> {};
+ class CacheHolderField: public BitField<InlineCacheHolderFlag, 5, 1> {};
+ class KindField: public BitField<Kind, 6, 4> {};
+ // TODO(bmeurer): Bit 10 is available for free use. :-)
+ class ExtraICStateField: public BitField<ExtraICState, 11,
+ PlatformSmiTagging::kSmiValueSize - 11 + 1> {}; // NOLINT
// KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
@@ -5434,7 +5588,7 @@ class Code: public HeapObject {
static const int kStubMajorKeyFirstBit = kIsCrankshaftedBit + 1;
static const int kSafepointTableOffsetFirstBit =
kStubMajorKeyFirstBit + kStubMajorKeyBits;
- static const int kSafepointTableOffsetBitCount = 25;
+ static const int kSafepointTableOffsetBitCount = 24;
STATIC_ASSERT(kStubMajorKeyFirstBit + kStubMajorKeyBits <= 32);
STATIC_ASSERT(kSafepointTableOffsetFirstBit +
@@ -5454,20 +5608,9 @@ class Code: public HeapObject {
class BackEdgesPatchedForOSRField: public BitField<bool,
kIsCrankshaftedBit + 1 + 29, 1> {}; // NOLINT
- // Signed field cannot be encoded using the BitField class.
- static const int kArgumentsCountShift = 17;
- static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
- static const int kArgumentsBits =
- PlatformSmiTagging::kSmiValueSize - Code::kArgumentsCountShift + 1;
+ static const int kArgumentsBits = 16;
static const int kMaxArguments = (1 << kArgumentsBits) - 1;
- // ICs can use either argument count or ExtendedExtraIC, since their storage
- // overlaps.
- STATIC_ASSERT(ExtraICStateField::kShift +
- ExtraICStateField::kSize + kArgumentsBits ==
- ExtendedExtraICStateField::kShift +
- ExtendedExtraICStateField::kSize);
-
// This constant should be encodable in an ARM instruction.
static const int kFlagsNotUsedInLookup =
TypeField::kMask | CacheHolderField::kMask;
@@ -5475,6 +5618,8 @@ class Code: public HeapObject {
private:
friend class RelocIterator;
+ void ClearInlineCaches(Kind* kind);
+
// Code aging
byte* FindCodeAgeSequence();
static void GetCodeAgeAndParity(Code* code, Age* age,
@@ -5533,7 +5678,13 @@ class DependentCode: public FixedArray {
// Group of code that depends on global property values in property cells
// not being changed.
kPropertyCellChangedGroup,
- kGroupCount = kPropertyCellChangedGroup + 1
+ // Group of code that depends on tenuring information in AllocationSites
+ // not being changed.
+ kAllocationSiteTenuringChangedGroup,
+ // Group of code that depends on element transition information in
+ // AllocationSites not being changed.
+ kAllocationSiteTransitionChangedGroup,
+ kGroupCount = kAllocationSiteTransitionChangedGroup + 1
};
// Array for holding the index of the first code object of each group.
@@ -5561,6 +5712,9 @@ class DependentCode: public FixedArray {
void DeoptimizeDependentCodeGroup(Isolate* isolate,
DependentCode::DependencyGroup group);
+ bool MarkCodeForDeoptimization(Isolate* isolate,
+ DependentCode::DependencyGroup group);
+
// The following low-level accessors should only be used by this class
// and the mark compact collector.
inline int number_of_entries(DependencyGroup group);
@@ -5627,17 +5781,20 @@ class Map: public HeapObject {
inline uint32_t bit_field3();
inline void set_bit_field3(uint32_t bits);
- class EnumLengthBits: public BitField<int, 0, 11> {};
- class NumberOfOwnDescriptorsBits: public BitField<int, 11, 11> {};
- class IsShared: public BitField<bool, 22, 1> {};
- class FunctionWithPrototype: public BitField<bool, 23, 1> {};
- class DictionaryMap: public BitField<bool, 24, 1> {};
- class OwnsDescriptors: public BitField<bool, 25, 1> {};
- class IsObserved: public BitField<bool, 26, 1> {};
- class Deprecated: public BitField<bool, 27, 1> {};
- class IsFrozen: public BitField<bool, 28, 1> {};
- class IsUnstable: public BitField<bool, 29, 1> {};
- class IsMigrationTarget: public BitField<bool, 30, 1> {};
+ class EnumLengthBits: public BitField<int,
+ 0, kDescriptorIndexBitCount> {}; // NOLINT
+ class NumberOfOwnDescriptorsBits: public BitField<int,
+ kDescriptorIndexBitCount, kDescriptorIndexBitCount> {}; // NOLINT
+ STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20);
+ class IsShared: public BitField<bool, 20, 1> {};
+ class FunctionWithPrototype: public BitField<bool, 21, 1> {};
+ class DictionaryMap: public BitField<bool, 22, 1> {};
+ class OwnsDescriptors: public BitField<bool, 23, 1> {};
+ class HasInstanceCallHandler: public BitField<bool, 24, 1> {};
+ class Deprecated: public BitField<bool, 25, 1> {};
+ class IsFrozen: public BitField<bool, 26, 1> {};
+ class IsUnstable: public BitField<bool, 27, 1> {};
+ class IsMigrationTarget: public BitField<bool, 28, 1> {};
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
@@ -5696,12 +5853,12 @@ class Map: public HeapObject {
}
// Tells whether the instance has a call-as-function handler.
- inline void set_has_instance_call_handler() {
- set_bit_field(bit_field() | (1 << kHasInstanceCallHandler));
+ inline void set_is_observed() {
+ set_bit_field(bit_field() | (1 << kIsObserved));
}
- inline bool has_instance_call_handler() {
- return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
+ inline bool is_observed() {
+ return ((1 << kIsObserved) & bit_field()) != 0;
}
inline void set_is_extensible(bool value);
@@ -5710,10 +5867,6 @@ class Map: public HeapObject {
inline void set_elements_kind(ElementsKind elements_kind) {
ASSERT(elements_kind < kElementsKindCount);
ASSERT(kElementsKindCount <= (1 << kElementsKindBitCount));
- ASSERT(!is_observed() ||
- elements_kind == DICTIONARY_ELEMENTS ||
- elements_kind == NON_STRICT_ARGUMENTS_ELEMENTS ||
- IsExternalArrayElementsKind(elements_kind));
set_bit_field2((bit_field2() & ~kElementsKindMask) |
(elements_kind << kElementsKindShift));
ASSERT(this->elements_kind() == elements_kind);
@@ -5754,6 +5907,10 @@ class Map: public HeapObject {
return IsExternalArrayElementsKind(elements_kind());
}
+ inline bool has_fixed_typed_array_elements() {
+ return IsFixedTypedArrayElementsKind(elements_kind());
+ }
+
inline bool has_dictionary_elements() {
return IsDictionaryElementsKind(elements_kind());
}
@@ -5766,6 +5923,10 @@ class Map: public HeapObject {
static bool IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind);
+ // Returns true if the current map doesn't have DICTIONARY_ELEMENTS but if a
+ // map with DICTIONARY_ELEMENTS was found in the prototype chain.
+ bool DictionaryElementsInPrototypeChainOnly();
+
inline bool HasTransitionArray();
inline bool HasElementsTransition();
inline Map* elements_transition_map();
@@ -5793,6 +5954,8 @@ class Map: public HeapObject {
Map* FindUpdatedMap(int verbatim, int length, DescriptorArray* descriptors);
Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
+ inline int GetInObjectPropertyOffset(int index);
+
int NumberOfFields();
bool InstancesNeedRewriting(Map* target,
@@ -5925,6 +6088,8 @@ class Map: public HeapObject {
Name* name,
LookupResult* result);
+ inline PropertyDetails GetLastDescriptorDetails();
+
// The size of transition arrays are limited so they do not end up in large
// object space. Otherwise ClearNonLiveTransitions would leak memory while
// applying in-place right trimming.
@@ -5952,7 +6117,7 @@ class Map: public HeapObject {
}
void SetEnumLength(int length) {
- if (length != kInvalidEnumCache) {
+ if (length != kInvalidEnumCacheSentinel) {
ASSERT(length >= 0);
ASSERT(length == 0 || instance_descriptors()->HasEnumCache());
ASSERT(length <= NumberOfOwnDescriptors());
@@ -5962,8 +6127,8 @@ class Map: public HeapObject {
inline bool owns_descriptors();
inline void set_owns_descriptors(bool is_shared);
- inline bool is_observed();
- inline void set_is_observed(bool is_observed);
+ inline bool has_instance_call_handler();
+ inline void set_has_instance_call_handler();
inline void freeze();
inline bool is_frozen();
inline void mark_unstable();
@@ -5977,7 +6142,10 @@ class Map: public HeapObject {
// deprecated, it is directly returned. Otherwise, the non-deprecated version
// is found by re-transitioning from the root of the transition tree using the
// descriptor array of the map. Returns NULL if no updated map is found.
- Map* CurrentMapForDeprecated();
+ // This method also applies any pending migrations along the prototype chain.
+ static Handle<Map> CurrentMapForDeprecated(Handle<Map> map);
+ // Same as above, but does not touch the prototype chain.
+ static Handle<Map> CurrentMapForDeprecatedInternal(Handle<Map> map);
static Handle<Map> RawCopy(Handle<Map> map, int instance_size);
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
@@ -6122,6 +6290,16 @@ class Map: public HeapObject {
bool IsJSObjectMap() {
return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
+ bool IsJSGlobalProxyMap() {
+ return instance_type() == JS_GLOBAL_PROXY_TYPE;
+ }
+ bool IsJSGlobalObjectMap() {
+ return instance_type() == JS_GLOBAL_OBJECT_TYPE;
+ }
+ bool IsGlobalObjectMap() {
+ const InstanceType type = instance_type();
+ return type == JS_GLOBAL_OBJECT_TYPE || type == JS_BUILTINS_OBJECT_TYPE;
+ }
// Fires when the layout of an object with a leaf map changes.
// This includes adding transitions to the leaf map or changing
@@ -6170,9 +6348,6 @@ class Map: public HeapObject {
static const int kMaxPreAllocatedPropertyFields = 255;
- // Constant for denoting that the enum cache is not yet initialized.
- static const int kInvalidEnumCache = EnumLengthBits::kMax;
-
// Layout description.
static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
@@ -6222,7 +6397,7 @@ class Map: public HeapObject {
static const int kHasNamedInterceptor = 3;
static const int kHasIndexedInterceptor = 4;
static const int kIsUndetectable = 5;
- static const int kHasInstanceCallHandler = 6;
+ static const int kIsObserved = 6;
static const int kIsAccessCheckNeeded = 7;
// Bit positions for bit field 2
@@ -6426,16 +6601,9 @@ class Script: public Struct {
V(Math, ceil, MathCeil) \
V(Math, abs, MathAbs) \
V(Math, log, MathLog) \
- V(Math, sin, MathSin) \
- V(Math, cos, MathCos) \
- V(Math, tan, MathTan) \
- V(Math, asin, MathASin) \
- V(Math, acos, MathACos) \
- V(Math, atan, MathATan) \
V(Math, exp, MathExp) \
V(Math, sqrt, MathSqrt) \
V(Math, pow, MathPow) \
- V(Math, random, MathRandom) \
V(Math, max, MathMax) \
V(Math, min, MathMin) \
V(Math, imul, MathImul)
@@ -6467,14 +6635,16 @@ class SharedFunctionInfo: public HeapObject {
// and a shared literals array or Smi(0) if none.
DECL_ACCESSORS(optimized_code_map, Object)
- // Returns index i of the entry with the specified context. At position
- // i - 1 is the context, position i the code, and i + 1 the literals array.
- // Returns -1 when no matching entry is found.
- int SearchOptimizedCodeMap(Context* native_context);
+ // Returns index i of the entry with the specified context and OSR entry.
+ // At position i - 1 is the context, position i the code, and i + 1 the
+ // literals array. Returns -1 when no matching entry is found.
+ int SearchOptimizedCodeMap(Context* native_context, BailoutId osr_ast_id);
// Installs optimized code from the code map on the given closure. The
// index has to be consistent with a search result as defined above.
- void InstallFromOptimizedCodeMap(JSFunction* function, int index);
+ FixedArray* GetLiteralsFromOptimizedCodeMap(int index);
+
+ Code* GetCodeFromOptimizedCodeMap(int index);
// Clear optimized code map.
void ClearOptimizedCodeMap();
@@ -6488,18 +6658,28 @@ class SharedFunctionInfo: public HeapObject {
// Add a new entry to the optimized code map.
MUST_USE_RESULT MaybeObject* AddToOptimizedCodeMap(Context* native_context,
Code* code,
- FixedArray* literals);
+ FixedArray* literals,
+ BailoutId osr_ast_id);
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
- Handle<FixedArray> literals);
+ Handle<FixedArray> literals,
+ BailoutId osr_ast_id);
// Layout description of the optimized code map.
static const int kNextMapIndex = 0;
static const int kEntriesStart = 1;
- static const int kEntryLength = 3;
- static const int kFirstContextSlot = FixedArray::kHeaderSize + kPointerSize;
- static const int kFirstCodeSlot = FixedArray::kHeaderSize + 2 * kPointerSize;
+ static const int kContextOffset = 0;
+ static const int kCachedCodeOffset = 1;
+ static const int kLiteralsOffset = 2;
+ static const int kOsrAstIdOffset = 3;
+ static const int kEntryLength = 4;
+ static const int kFirstContextSlot = FixedArray::kHeaderSize +
+ (kEntriesStart + kContextOffset) * kPointerSize;
+ static const int kFirstCodeSlot = FixedArray::kHeaderSize +
+ (kEntriesStart + kCachedCodeOffset) * kPointerSize;
+ static const int kFirstOsrAstIdSlot = FixedArray::kHeaderSize +
+ (kEntriesStart + kOsrAstIdOffset) * kPointerSize;
static const int kSecondEntryIndex = kEntryLength + kEntriesStart;
static const int kInitialLength = kEntriesStart + kEntryLength;
@@ -6746,6 +6926,9 @@ class SharedFunctionInfo: public HeapObject {
// global object.
DECL_BOOLEAN_ACCESSORS(native)
+ // Indicate that this builtin needs to be inlined in crankshaft.
+ DECL_BOOLEAN_ACCESSORS(inline_builtin)
+
// Indicates that the function was created by the Function function.
// Though it's anonymous, toString should treat it as if it had the name
// "anonymous". We don't set the name itself so that the system does not
@@ -6835,6 +7018,9 @@ class SharedFunctionInfo: public HeapObject {
set_dont_optimize(reason != kNoReason);
}
+ // Check whether or not this function is inlineable.
+ bool IsInlineable();
+
// Source size of this function.
int SourceSize();
@@ -6852,12 +7038,6 @@ class SharedFunctionInfo: public HeapObject {
void ResetForNewContext(int new_ic_age);
- // Helper to compile the shared code. Returns true on success, false on
- // failure (e.g., stack overflow during compilation). This is only used by
- // the debugger, it is not possible to compile without a context otherwise.
- static bool CompileLazy(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag);
-
// Casting.
static inline SharedFunctionInfo* cast(Object* obj);
@@ -6985,6 +7165,7 @@ class SharedFunctionInfo: public HeapObject {
kUsesArguments,
kHasDuplicateParameters,
kNative,
+ kInlineBuiltin,
kBoundFunction,
kIsAnonymous,
kNameShouldPrintAsAnonymous,
@@ -7187,32 +7368,20 @@ class JSFunction: public JSObject {
// Mark this function for lazy recompilation. The function will be
// recompiled the next time it is executed.
- void MarkForLazyRecompilation();
- void MarkForConcurrentRecompilation();
- void MarkInRecompileQueue();
-
- // Helpers to compile this function. Returns true on success, false on
- // failure (e.g., stack overflow during compilation).
- static bool EnsureCompiled(Handle<JSFunction> function,
- ClearExceptionFlag flag);
- static bool CompileLazy(Handle<JSFunction> function,
- ClearExceptionFlag flag);
- static Handle<Code> CompileOsr(Handle<JSFunction> function,
- BailoutId osr_ast_id,
- ClearExceptionFlag flag);
+ void MarkForOptimization();
+ void MarkForConcurrentOptimization();
+ void MarkInOptimizationQueue();
+
static bool CompileOptimized(Handle<JSFunction> function,
ClearExceptionFlag flag);
// Tells whether or not the function is already marked for lazy
// recompilation.
- inline bool IsMarkedForLazyRecompilation();
- inline bool IsMarkedForConcurrentRecompilation();
+ inline bool IsMarkedForOptimization();
+ inline bool IsMarkedForConcurrentOptimization();
// Tells whether or not the function is on the concurrent recompilation queue.
- inline bool IsInRecompileQueue();
-
- // Check whether or not this function is inlineable.
- bool IsInlineable();
+ inline bool IsInOptimizationQueue();
// [literals_or_bindings]: Fixed array holding either
// the materialized literals or the bindings of a bound function.
@@ -7240,6 +7409,7 @@ class JSFunction: public JSObject {
inline Map* initial_map();
inline void set_initial_map(Map* value);
inline bool has_initial_map();
+ static void EnsureHasInitialMap(Handle<JSFunction> function);
// Get and set the prototype property on a JSFunction. If the
// function has an initial map the prototype is set on the initial
@@ -7346,6 +7516,8 @@ class JSGlobalProxy : public JSObject {
// Casting.
static inline JSGlobalProxy* cast(Object* obj);
+ inline bool IsDetachedFrom(GlobalObject* global);
+
// Dispatched behavior.
DECLARE_PRINTER(JSGlobalProxy)
DECLARE_VERIFIER(JSGlobalProxy)
@@ -7415,6 +7587,8 @@ class JSGlobalObject: public GlobalObject {
static Handle<PropertyCell> EnsurePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name);
+ inline bool IsDetached();
+
// Dispatched behavior.
DECLARE_PRINTER(JSGlobalObject)
DECLARE_VERIFIER(JSGlobalObject)
@@ -7602,9 +7776,6 @@ class JSMessageObject: public JSObject {
// [script]: the script from which the error message originated.
DECL_ACCESSORS(script, Object)
- // [stack_trace]: the stack trace for this error message.
- DECL_ACCESSORS(stack_trace, Object)
-
// [stack_frames]: an array of stack frames for this error object.
DECL_ACCESSORS(stack_frames, Object)
@@ -7627,8 +7798,7 @@ class JSMessageObject: public JSObject {
static const int kTypeOffset = JSObject::kHeaderSize;
static const int kArgumentsOffset = kTypeOffset + kPointerSize;
static const int kScriptOffset = kArgumentsOffset + kPointerSize;
- static const int kStackTraceOffset = kScriptOffset + kPointerSize;
- static const int kStackFramesOffset = kStackTraceOffset + kPointerSize;
+ static const int kStackFramesOffset = kScriptOffset + kPointerSize;
static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
static const int kSize = kEndPositionOffset + kPointerSize;
@@ -7987,7 +8157,7 @@ class TypeFeedbackInfo: public Struct {
inline void set_inlined_type_change_checksum(int checksum);
inline bool matches_inlined_type_change_checksum(int checksum);
- DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
+ DECL_ACCESSORS(feedback_vector, FixedArray)
static inline TypeFeedbackInfo* cast(Object* obj);
@@ -7997,8 +8167,30 @@ class TypeFeedbackInfo: public Struct {
static const int kStorage1Offset = HeapObject::kHeaderSize;
static const int kStorage2Offset = kStorage1Offset + kPointerSize;
- static const int kTypeFeedbackCellsOffset = kStorage2Offset + kPointerSize;
- static const int kSize = kTypeFeedbackCellsOffset + kPointerSize;
+ static const int kFeedbackVectorOffset =
+ kStorage2Offset + kPointerSize;
+ static const int kSize = kFeedbackVectorOffset + kPointerSize;
+
+ // The object that indicates an uninitialized cache.
+ static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
+
+ // The object that indicates a cache in pre-monomorphic state.
+ static inline Handle<Object> PremonomorphicSentinel(Isolate* isolate);
+
+ // The object that indicates a megamorphic state.
+ static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+
+ // The object that indicates a monomorphic state of Array with
+ // ElementsKind
+ static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
+ ElementsKind elements_kind);
+
+ // A raw version of the uninitialized sentinel that's safe to read during
+ // garbage collection (e.g., for patching the cache).
+ static inline Object* RawUninitializedSentinel(Heap* heap);
+
+ static const int kForInFastCaseMarker = 0;
+ static const int kForInSlowCaseMarker = 1;
private:
static const int kTypeChangeChecksumBits = 7;
@@ -8028,31 +8220,126 @@ enum AllocationSiteMode {
class AllocationSite: public Struct {
public:
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
+ static const double kPretenureRatio;
+ static const int kPretenureMinimumCreated = 100;
+
+ // Values for pretenure decision field.
+ enum PretenureDecision {
+ kUndecided = 0,
+ kDontTenure = 1,
+ kTenure = 2,
+ kZombie = 3,
+ kLastPretenureDecisionValue = kZombie
+ };
DECL_ACCESSORS(transition_info, Object)
// nested_site threads a list of sites that represent nested literals
// walked in a particular order. So [[1, 2], 1, 2] will have one
// nested_site, but [[1, 2], 3, [4]] will have a list of two.
DECL_ACCESSORS(nested_site, Object)
+ DECL_ACCESSORS(pretenure_data, Smi)
+ DECL_ACCESSORS(pretenure_create_count, Smi)
DECL_ACCESSORS(dependent_code, DependentCode)
DECL_ACCESSORS(weak_next, Object)
inline void Initialize();
- bool HasNestedSites() {
- return nested_site()->IsAllocationSite();
- }
-
// This method is expensive, it should only be called for reporting.
bool IsNestedSite();
+ // transition_info bitfields, for constructed array transition info.
+ class ElementsKindBits: public BitField<ElementsKind, 0, 15> {};
+ class UnusedBits: public BitField<int, 15, 14> {};
+ class DoNotInlineBit: public BitField<bool, 29, 1> {};
+
+ // Bitfields for pretenure_data
+ class MementoFoundCountBits: public BitField<int, 0, 27> {};
+ class PretenureDecisionBits: public BitField<PretenureDecision, 27, 2> {};
+ class DeoptDependentCodeBit: public BitField<bool, 29, 1> {};
+ STATIC_ASSERT(PretenureDecisionBits::kMax >= kLastPretenureDecisionValue);
+
+ // Increments the mementos found counter and returns true when the first
+ // memento was found for a given allocation site.
+ inline bool IncrementMementoFoundCount();
+
+ inline void IncrementMementoCreateCount();
+
+ PretenureFlag GetPretenureMode();
+
+ void ResetPretenureDecision();
+
+ PretenureDecision pretenure_decision() {
+ int value = pretenure_data()->value();
+ return PretenureDecisionBits::decode(value);
+ }
+
+ void set_pretenure_decision(PretenureDecision decision) {
+ int value = pretenure_data()->value();
+ set_pretenure_data(
+ Smi::FromInt(PretenureDecisionBits::update(value, decision)),
+ SKIP_WRITE_BARRIER);
+ }
+
+ bool deopt_dependent_code() {
+ int value = pretenure_data()->value();
+ return DeoptDependentCodeBit::decode(value);
+ }
+
+ void set_deopt_dependent_code(bool deopt) {
+ int value = pretenure_data()->value();
+ set_pretenure_data(
+ Smi::FromInt(DeoptDependentCodeBit::update(value, deopt)),
+ SKIP_WRITE_BARRIER);
+ }
+
+ int memento_found_count() {
+ int value = pretenure_data()->value();
+ return MementoFoundCountBits::decode(value);
+ }
+
+ inline void set_memento_found_count(int count);
+
+ int memento_create_count() {
+ return pretenure_create_count()->value();
+ }
+
+ void set_memento_create_count(int count) {
+ set_pretenure_create_count(Smi::FromInt(count), SKIP_WRITE_BARRIER);
+ }
+
+ // The pretenuring decision is made during gc, and the zombie state allows
+ // us to recognize when an allocation site is just being kept alive because
+ // a later traversal of new space may discover AllocationMementos that point
+ // to this AllocationSite.
+ bool IsZombie() {
+ return pretenure_decision() == kZombie;
+ }
+
+ inline void MarkZombie();
+
+ inline bool DigestPretenuringFeedback();
+
ElementsKind GetElementsKind() {
ASSERT(!SitePointsToLiteral());
- return static_cast<ElementsKind>(Smi::cast(transition_info())->value());
+ int value = Smi::cast(transition_info())->value();
+ return ElementsKindBits::decode(value);
}
void SetElementsKind(ElementsKind kind) {
- set_transition_info(Smi::FromInt(static_cast<int>(kind)));
+ int value = Smi::cast(transition_info())->value();
+ set_transition_info(Smi::FromInt(ElementsKindBits::update(value, kind)),
+ SKIP_WRITE_BARRIER);
+ }
+
+ bool CanInlineCall() {
+ int value = Smi::cast(transition_info())->value();
+ return DoNotInlineBit::decode(value) == 0;
+ }
+
+ void SetDoNotInlineCall() {
+ int value = Smi::cast(transition_info())->value();
+ set_transition_info(Smi::FromInt(DoNotInlineBit::update(value, true)),
+ SKIP_WRITE_BARRIER);
}
bool SitePointsToLiteral() {
@@ -8062,6 +8349,17 @@ class AllocationSite: public Struct {
return transition_info()->IsJSArray() || transition_info()->IsJSObject();
}
+ MaybeObject* DigestTransitionFeedback(ElementsKind to_kind);
+
+ enum Reason {
+ TENURING,
+ TRANSITIONS
+ };
+
+ static void AddDependentCompilationInfo(Handle<AllocationSite> site,
+ Reason reason,
+ CompilationInfo* info);
+
DECLARE_PRINTER(AllocationSite)
DECLARE_VERIFIER(AllocationSite)
@@ -8073,15 +8371,30 @@ class AllocationSite: public Struct {
static const int kTransitionInfoOffset = HeapObject::kHeaderSize;
static const int kNestedSiteOffset = kTransitionInfoOffset + kPointerSize;
- static const int kDependentCodeOffset = kNestedSiteOffset + kPointerSize;
+ static const int kPretenureDataOffset = kNestedSiteOffset + kPointerSize;
+ static const int kPretenureCreateCountOffset =
+ kPretenureDataOffset + kPointerSize;
+ static const int kDependentCodeOffset =
+ kPretenureCreateCountOffset + kPointerSize;
static const int kWeakNextOffset = kDependentCodeOffset + kPointerSize;
static const int kSize = kWeakNextOffset + kPointerSize;
+ // During mark compact we need to take special care for the dependent code
+ // field.
+ static const int kPointerFieldsBeginOffset = kTransitionInfoOffset;
+ static const int kPointerFieldsEndOffset = kDependentCodeOffset;
+
+ // For other visitors, use the fixed body descriptor below.
typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
kDependentCodeOffset + kPointerSize,
kSize> BodyDescriptor;
private:
+ inline DependentCode::DependencyGroup ToDependencyGroup(Reason reason);
+ bool PretenuringDecisionMade() {
+ return pretenure_decision() != kUndecided;
+ }
+
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSite);
};
@@ -8093,7 +8406,10 @@ class AllocationMemento: public Struct {
DECL_ACCESSORS(allocation_site, Object)
- bool IsValid() { return allocation_site()->IsAllocationSite(); }
+ bool IsValid() {
+ return allocation_site()->IsAllocationSite() &&
+ !AllocationSite::cast(allocation_site())->IsZombie();
+ }
AllocationSite* GetAllocationSite() {
ASSERT(IsValid());
return AllocationSite::cast(allocation_site());
@@ -8102,9 +8418,6 @@ class AllocationMemento: public Struct {
DECLARE_PRINTER(AllocationMemento)
DECLARE_VERIFIER(AllocationMemento)
- // Returns NULL if no AllocationMemento is available for object.
- static AllocationMemento* FindForJSObject(JSObject* object,
- bool in_GC = false);
static inline AllocationMemento* cast(Object* obj);
private:
@@ -8318,7 +8631,7 @@ class Name: public HeapObject {
// kMaxCachedArrayIndexLength.
STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
- static const int kContainsCachedArrayIndexMask =
+ static const unsigned int kContainsCachedArrayIndexMask =
(~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
kIsNotArrayIndexMask;
@@ -8340,6 +8653,11 @@ class Symbol: public Name {
// [name]: the print name of a symbol, or undefined if none.
DECL_ACCESSORS(name, Object)
+ DECL_ACCESSORS(flags, Smi)
+
+ // [is_private]: whether this is a private symbol.
+ DECL_BOOLEAN_ACCESSORS(is_private)
+
// Casting.
static inline Symbol* cast(Object* obj);
@@ -8349,12 +8667,14 @@ class Symbol: public Name {
// Layout description.
static const int kNameOffset = Name::kSize;
- static const int kSize = kNameOffset + kPointerSize;
+ static const int kFlagsOffset = kNameOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
- typedef FixedBodyDescriptor<kNameOffset, kNameOffset + kPointerSize, kSize>
- BodyDescriptor;
+ typedef FixedBodyDescriptor<kNameOffset, kFlagsOffset, kSize> BodyDescriptor;
private:
+ static const int kPrivateBit = 0;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
};
@@ -8482,11 +8802,6 @@ class String: public Name {
// ASCII and two byte string types.
bool MarkAsUndetectable();
- // Return a substring.
- MUST_USE_RESULT MaybeObject* SubString(int from,
- int to,
- PretenureFlag pretenure = NOT_TENURED);
-
// String equality operations.
inline bool Equals(String* other);
bool IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match = false);
@@ -8575,7 +8890,6 @@ class String: public Name {
static const int kMaxShortPrintLength = 1024;
// Support for regular expressions.
- const uc16* GetTwoByteData();
const uc16* GetTwoByteData(unsigned start);
// Helper function for flattening strings.
@@ -9209,8 +9523,8 @@ class Cell: public HeapObject {
class PropertyCell: public Cell {
public:
// [type]: type of the global property.
- Type* type();
- void set_type(Type* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ HeapType* type();
+ void set_type(HeapType* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [dependent_code]: dependent code that depends on the type of the global
// property.
@@ -9225,8 +9539,8 @@ class PropertyCell: public Cell {
// Computes the new type of the cell's contents for the given value, but
// without actually modifying the 'type' field.
- static Handle<Type> UpdatedType(Handle<PropertyCell> cell,
- Handle<Object> value);
+ static Handle<HeapType> UpdatedType(Handle<PropertyCell> cell,
+ Handle<Object> value);
void AddDependentCompilationInfo(CompilationInfo* info);
@@ -9358,9 +9672,9 @@ class JSProxy: public JSReceiver {
uint32_t index,
DeleteMode mode);
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
- static Handle<Object> GetIdentityHash(Handle<JSProxy> proxy,
- CreationFlag flag);
+ MUST_USE_RESULT Object* GetIdentityHash();
+
+ static Handle<Object> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
};
@@ -9503,6 +9817,9 @@ class JSArrayBuffer: public JSObject {
inline bool is_external();
inline void set_is_external(bool value);
+ inline bool should_be_freed();
+ inline void set_should_be_freed(bool value);
+
// [weak_next]: linked list of array buffers.
DECL_ACCESSORS(weak_next, Object)
@@ -9532,6 +9849,7 @@ class JSArrayBuffer: public JSObject {
private:
// Bit position in a flag
static const int kIsExternalBit = 0;
+ static const int kShouldBeFreed = 1;
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
};
@@ -9669,6 +9987,10 @@ class JSArray: public JSObject {
// is set to a smi. This matches the set function on FixedArray.
inline void set_length(Smi* length);
+ static void JSArrayUpdateLengthFromIndex(Handle<JSArray> array,
+ uint32_t index,
+ Handle<Object> value);
+
MUST_USE_RESULT MaybeObject* JSArrayUpdateLengthFromIndex(uint32_t index,
Object* value);
@@ -10143,6 +10465,10 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kLengthOffset = kFlagOffset + kPointerSize;
static const int kSize = kLengthOffset + kPointerSize;
+ // Returns true if |object| is an instance of this function template.
+ bool IsTemplateFor(Object* object);
+ bool IsTemplateFor(Map* map);
+
private:
// Bit position in the flag, from least significant bit position.
static const int kHiddenPrototypeBit = 0;
@@ -10324,6 +10650,7 @@ class BreakPointInfo: public Struct {
V(kStringTable, "string_table", "(Internalized strings)") \
V(kExternalStringsTable, "external_strings_table", "(External strings)") \
V(kStrongRootList, "strong_root_list", "(Strong roots)") \
+ V(kSmiRootList, "smi_root_list", "(Smi roots)") \
V(kInternalizedString, "internalized_string", "(Internal string)") \
V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
V(kTop, "top", "(Isolate)") \
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index e9c0254522..d21507084c 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -106,10 +106,10 @@ void OptimizingCompilerThread::Run() {
}
-RecompileJob* OptimizingCompilerThread::NextInput() {
+OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
if (input_queue_length_ == 0) return NULL;
- RecompileJob* job = input_queue_[InputQueueIndex(0)];
+ OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
ASSERT_NE(NULL, job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
@@ -118,13 +118,13 @@ RecompileJob* OptimizingCompilerThread::NextInput() {
void OptimizingCompilerThread::CompileNext() {
- RecompileJob* job = NextInput();
+ OptimizedCompileJob* job = NextInput();
ASSERT_NE(NULL, job);
// The function may have already been optimized by OSR. Simply continue.
- RecompileJob::Status status = job->OptimizeGraph();
+ OptimizedCompileJob::Status status = job->OptimizeGraph();
USE(status); // Prevent an unused-variable error in release mode.
- ASSERT(status != RecompileJob::FAILED);
+ ASSERT(status != OptimizedCompileJob::FAILED);
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
@@ -134,13 +134,18 @@ void OptimizingCompilerThread::CompileNext() {
}
-static void DisposeRecompileJob(RecompileJob* job,
- bool restore_function_code) {
+static void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
+ bool restore_function_code) {
// The recompile job is allocated in the CompilationInfo's zone.
CompilationInfo* info = job->info();
if (restore_function_code) {
if (info->is_osr()) {
- if (!job->IsWaitingForInstall()) BackEdgeTable::RemoveStackCheck(info);
+ if (!job->IsWaitingForInstall()) {
+ // Remove stack check that guards OSR entry on original code.
+ Handle<Code> code = info->unoptimized_code();
+ uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
+ BackEdgeTable::RemoveStackCheck(code, offset);
+ }
} else {
Handle<JSFunction> function = info->closure();
function->ReplaceCode(function->shared()->code());
@@ -151,25 +156,25 @@ static void DisposeRecompileJob(RecompileJob* job,
void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
- RecompileJob* job;
+ OptimizedCompileJob* job;
while ((job = NextInput())) {
// This should not block, since we have one signal on the input queue
// semaphore corresponding to each element in the input queue.
input_queue_semaphore_.Wait();
// OSR jobs are dealt with separately.
if (!job->info()->is_osr()) {
- DisposeRecompileJob(job, restore_function_code);
+ DisposeOptimizedCompileJob(job, restore_function_code);
}
}
}
void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
- RecompileJob* job;
+ OptimizedCompileJob* job;
while (output_queue_.Dequeue(&job)) {
// OSR jobs are dealt with separately.
if (!job->info()->is_osr()) {
- DisposeRecompileJob(job, restore_function_code);
+ DisposeOptimizedCompileJob(job, restore_function_code);
}
}
}
@@ -178,7 +183,7 @@ void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
for (int i = 0; i < osr_buffer_capacity_; i++) {
if (osr_buffer_[i] != NULL) {
- DisposeRecompileJob(osr_buffer_[i], restore_function_code);
+ DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
osr_buffer_[i] = NULL;
}
}
@@ -236,9 +241,10 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
ASSERT(!IsOptimizerThread());
HandleScope handle_scope(isolate_);
- RecompileJob* job;
+ OptimizedCompileJob* job;
while (output_queue_.Dequeue(&job)) {
CompilationInfo* info = job->info();
+ Handle<JSFunction> function(*info->closure());
if (info->is_osr()) {
if (FLAG_trace_osr) {
PrintF("[COSR - ");
@@ -247,26 +253,25 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
info->osr_ast_id().ToInt());
}
job->WaitForInstall();
- BackEdgeTable::RemoveStackCheck(info);
+ // Remove stack check that guards OSR entry on original code.
+ Handle<Code> code = info->unoptimized_code();
+ uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
+ BackEdgeTable::RemoveStackCheck(code, offset);
} else {
- Compiler::InstallOptimizedCode(job);
+ Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
+ function->ReplaceCode(
+ code.is_null() ? function->shared()->code() : *code);
}
}
}
-void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
+void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
ASSERT(IsQueueAvailable());
ASSERT(!IsOptimizerThread());
CompilationInfo* info = job->info();
if (info->is_osr()) {
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Queueing ");
- info->closure()->PrintName();
- PrintF(" for concurrent on-stack replacement.\n");
- }
osr_attempts_++;
- BackEdgeTable::AddStackCheck(info);
AddToOsrBuffer(job);
// Add job to the front of the input queue.
LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
@@ -276,7 +281,6 @@ void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
input_queue_[InputQueueIndex(0)] = job;
input_queue_length_++;
} else {
- info->closure()->MarkInRecompileQueue();
// Add job to the back of the input queue.
LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
ASSERT_LT(input_queue_length_, input_queue_capacity_);
@@ -300,14 +304,14 @@ void OptimizingCompilerThread::Unblock() {
}
-RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
- Handle<JSFunction> function, uint32_t osr_pc_offset) {
+OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
+ Handle<JSFunction> function, BailoutId osr_ast_id) {
ASSERT(!IsOptimizerThread());
for (int i = 0; i < osr_buffer_capacity_; i++) {
- RecompileJob* current = osr_buffer_[i];
+ OptimizedCompileJob* current = osr_buffer_[i];
if (current != NULL &&
current->IsWaitingForInstall() &&
- current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ current->info()->HasSameOsrEntry(function, osr_ast_id)) {
osr_hits_++;
osr_buffer_[i] = NULL;
return current;
@@ -318,12 +322,12 @@ RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
- uint32_t osr_pc_offset) {
+ BailoutId osr_ast_id) {
ASSERT(!IsOptimizerThread());
for (int i = 0; i < osr_buffer_capacity_; i++) {
- RecompileJob* current = osr_buffer_[i];
+ OptimizedCompileJob* current = osr_buffer_[i];
if (current != NULL &&
- current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ current->info()->HasSameOsrEntry(function, osr_ast_id)) {
return !current->IsWaitingForInstall();
}
}
@@ -334,7 +338,7 @@ bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
ASSERT(!IsOptimizerThread());
for (int i = 0; i < osr_buffer_capacity_; i++) {
- RecompileJob* current = osr_buffer_[i];
+ OptimizedCompileJob* current = osr_buffer_[i];
if (current != NULL && *current->info()->closure() == function) {
return !current->IsWaitingForInstall();
}
@@ -343,26 +347,26 @@ bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
}
-void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
+void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
ASSERT(!IsOptimizerThread());
// Find the next slot that is empty or has a stale job.
+ OptimizedCompileJob* stale = NULL;
while (true) {
- RecompileJob* stale = osr_buffer_[osr_buffer_cursor_];
+ stale = osr_buffer_[osr_buffer_cursor_];
if (stale == NULL || stale->IsWaitingForInstall()) break;
osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
}
// Add to found slot and dispose the evicted job.
- RecompileJob* evicted = osr_buffer_[osr_buffer_cursor_];
- if (evicted != NULL) {
- ASSERT(evicted->IsWaitingForInstall());
- CompilationInfo* info = evicted->info();
+ if (stale != NULL) {
+ ASSERT(stale->IsWaitingForInstall());
+ CompilationInfo* info = stale->info();
if (FLAG_trace_osr) {
PrintF("[COSR - Discarded ");
info->closure()->PrintName();
PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
}
- DisposeRecompileJob(evicted, false);
+ DisposeOptimizedCompileJob(stale, false);
}
osr_buffer_[osr_buffer_cursor_] = job;
osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
@@ -370,8 +374,13 @@ void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
#ifdef DEBUG
+bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
+ return isolate->concurrent_recompilation_enabled() &&
+ isolate->optimizing_compiler_thread()->IsOptimizerThread();
+}
+
+
bool OptimizingCompilerThread::IsOptimizerThread() {
- if (!FLAG_concurrent_recompilation) return false;
LockGuard<Mutex> lock_guard(&thread_id_mutex_);
return ThreadId::Current().ToInteger() == thread_id_;
}
diff --git a/deps/v8/src/optimizing-compiler-thread.h b/deps/v8/src/optimizing-compiler-thread.h
index 754aecebf5..eae1f608f9 100644
--- a/deps/v8/src/optimizing-compiler-thread.h
+++ b/deps/v8/src/optimizing-compiler-thread.h
@@ -40,7 +40,7 @@ namespace v8 {
namespace internal {
class HOptimizedGraphBuilder;
-class RecompileJob;
+class OptimizedCompileJob;
class SharedFunctionInfo;
class OptimizingCompilerThread : public Thread {
@@ -62,10 +62,10 @@ class OptimizingCompilerThread : public Thread {
osr_attempts_(0),
blocked_jobs_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
- input_queue_ = NewArray<RecompileJob*>(input_queue_capacity_);
+ input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
if (FLAG_concurrent_osr) {
// Allocate and mark OSR buffer slots as empty.
- osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_capacity_);
+ osr_buffer_ = NewArray<OptimizedCompileJob*>(osr_buffer_capacity_);
for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL;
}
}
@@ -75,12 +75,12 @@ class OptimizingCompilerThread : public Thread {
void Run();
void Stop();
void Flush();
- void QueueForOptimization(RecompileJob* optimizing_compiler);
+ void QueueForOptimization(OptimizedCompileJob* optimizing_compiler);
void Unblock();
void InstallOptimizedFunctions();
- RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
- uint32_t osr_pc_offset);
- bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);
+ OptimizedCompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
+ BailoutId osr_ast_id);
+ bool IsQueuedForOSR(Handle<JSFunction> function, BailoutId osr_ast_id);
bool IsQueuedForOSR(JSFunction* function);
@@ -96,7 +96,12 @@ class OptimizingCompilerThread : public Thread {
AddToOsrBuffer(NULL);
}
+ static bool Enabled(int max_available) {
+ return (FLAG_concurrent_recompilation && max_available > 1);
+ }
+
#ifdef DEBUG
+ static bool IsOptimizerThread(Isolate* isolate);
bool IsOptimizerThread();
#endif
@@ -107,11 +112,11 @@ class OptimizingCompilerThread : public Thread {
void FlushOutputQueue(bool restore_function_code);
void FlushOsrBuffer(bool restore_function_code);
void CompileNext();
- RecompileJob* NextInput();
+ OptimizedCompileJob* NextInput();
// Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
// Tasks evicted from the cyclic buffer are discarded.
- void AddToOsrBuffer(RecompileJob* compiler);
+ void AddToOsrBuffer(OptimizedCompileJob* compiler);
inline int InputQueueIndex(int i) {
int result = (i + input_queue_shift_) % input_queue_capacity_;
@@ -130,17 +135,17 @@ class OptimizingCompilerThread : public Thread {
Semaphore input_queue_semaphore_;
// Circular queue of incoming recompilation tasks (including OSR).
- RecompileJob** input_queue_;
+ OptimizedCompileJob** input_queue_;
int input_queue_capacity_;
int input_queue_length_;
int input_queue_shift_;
Mutex input_queue_mutex_;
// Queue of recompilation tasks ready to be installed (excluding OSR).
- UnboundQueue<RecompileJob*> output_queue_;
+ UnboundQueue<OptimizedCompileJob*> output_queue_;
// Cyclic buffer of recompilation tasks for OSR.
- RecompileJob** osr_buffer_;
+ OptimizedCompileJob** osr_buffer_;
int osr_buffer_capacity_;
int osr_buffer_cursor_;
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index d84649d86b..409059778a 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -46,49 +46,6 @@
namespace v8 {
namespace internal {
-// PositionStack is used for on-stack allocation of token positions for
-// new expressions. Please look at ParseNewExpression.
-
-class PositionStack {
- public:
- explicit PositionStack(bool* ok) : top_(NULL), ok_(ok) {}
- ~PositionStack() {
- ASSERT(!*ok_ || is_empty());
- USE(ok_);
- }
-
- class Element {
- public:
- Element(PositionStack* stack, int value) {
- previous_ = stack->top();
- value_ = value;
- stack->set_top(this);
- }
-
- private:
- Element* previous() { return previous_; }
- int value() { return value_; }
- friend class PositionStack;
- Element* previous_;
- int value_;
- };
-
- bool is_empty() { return top_ == NULL; }
- int pop() {
- ASSERT(!is_empty());
- int result = top_->value();
- top_ = top_->previous();
- return result;
- }
-
- private:
- Element* top() { return top_; }
- void set_top(Element* value) { top_ = value; }
- Element* top_;
- bool* ok_;
-};
-
-
RegExpBuilder::RegExpBuilder(Zone* zone)
: zone_(zone),
pending_empty_(false),
@@ -256,12 +213,12 @@ Handle<String> Parser::LookupSymbol(int symbol_id) {
// if there is some preparser data.
if (static_cast<unsigned>(symbol_id)
>= static_cast<unsigned>(symbol_cache_.length())) {
- if (scanner().is_literal_ascii()) {
+ if (scanner()->is_literal_ascii()) {
return isolate()->factory()->InternalizeOneByteString(
- Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
+ Vector<const uint8_t>::cast(scanner()->literal_ascii_string()));
} else {
return isolate()->factory()->InternalizeTwoByteString(
- scanner().literal_utf16_string());
+ scanner()->literal_utf16_string());
}
}
return LookupCachedSymbol(symbol_id);
@@ -277,12 +234,12 @@ Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
}
Handle<String> result = symbol_cache_.at(symbol_id);
if (result.is_null()) {
- if (scanner().is_literal_ascii()) {
+ if (scanner()->is_literal_ascii()) {
result = isolate()->factory()->InternalizeOneByteString(
- Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
+ Vector<const uint8_t>::cast(scanner()->literal_ascii_string()));
} else {
result = isolate()->factory()->InternalizeTwoByteString(
- scanner().literal_utf16_string());
+ scanner()->literal_utf16_string());
}
symbol_cache_.at(symbol_id) = result;
return result;
@@ -463,56 +420,6 @@ class TargetScope BASE_EMBEDDED {
// ----------------------------------------------------------------------------
-// FunctionState and BlockState together implement the parser's scope stack.
-// The parser's current scope is in top_scope_. The BlockState and
-// FunctionState constructors push on the scope stack and the destructors
-// pop. They are also used to hold the parser's per-function and per-block
-// state.
-
-class Parser::BlockState BASE_EMBEDDED {
- public:
- BlockState(Parser* parser, Scope* scope)
- : parser_(parser),
- outer_scope_(parser->top_scope_) {
- parser->top_scope_ = scope;
- }
-
- ~BlockState() { parser_->top_scope_ = outer_scope_; }
-
- private:
- Parser* parser_;
- Scope* outer_scope_;
-};
-
-
-Parser::FunctionState::FunctionState(Parser* parser,
- Scope* scope,
- Isolate* isolate)
- : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
- next_handler_index_(0),
- expected_property_count_(0),
- generator_object_variable_(NULL),
- parser_(parser),
- outer_function_state_(parser->current_function_state_),
- outer_scope_(parser->top_scope_),
- saved_ast_node_id_(isolate->ast_node_id()),
- factory_(isolate, parser->zone()) {
- parser->top_scope_ = scope;
- parser->current_function_state_ = this;
- isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt());
-}
-
-
-Parser::FunctionState::~FunctionState() {
- parser_->top_scope_ = outer_scope_;
- parser_->current_function_state_ = outer_function_state_;
- if (outer_function_state_ != NULL) {
- parser_->isolate()->set_ast_node_id(saved_ast_node_id_);
- }
-}
-
-
-// ----------------------------------------------------------------------------
// The CHECK_OK macro is a convenient macro to enforce error
// handling for functions that may fail (by returning !*ok).
//
@@ -535,21 +442,168 @@ Parser::FunctionState::~FunctionState() {
// ----------------------------------------------------------------------------
// Implementation of Parser
+bool ParserTraits::IsEvalOrArguments(Handle<String> identifier) const {
+ return identifier.is_identical_to(
+ parser_->isolate()->factory()->eval_string()) ||
+ identifier.is_identical_to(
+ parser_->isolate()->factory()->arguments_string());
+}
+
+
+void ParserTraits::ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<const char*> args) {
+ MessageLocation location(parser_->script_,
+ source_location.beg_pos,
+ source_location.end_pos);
+ Factory* factory = parser_->isolate()->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
+ elements->set(i, *arg_string);
+ }
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = factory->NewSyntaxError(message, array);
+ parser_->isolate()->Throw(*result, &location);
+}
+
+
+void ParserTraits::ReportMessage(const char* message,
+ Vector<Handle<String> > args) {
+ Scanner::Location source_location = parser_->scanner()->location();
+ ReportMessageAt(source_location, message, args);
+}
+
+
+void ParserTraits::ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<Handle<String> > args) {
+ MessageLocation location(parser_->script_,
+ source_location.beg_pos,
+ source_location.end_pos);
+ Factory* factory = parser_->isolate()->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ elements->set(i, *args[i]);
+ }
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = factory->NewSyntaxError(message, array);
+ parser_->isolate()->Throw(*result, &location);
+}
+
+
+Handle<String> ParserTraits::GetSymbol(Scanner* scanner) {
+ int symbol_id = -1;
+ if (parser_->pre_parse_data() != NULL) {
+ symbol_id = parser_->pre_parse_data()->GetSymbolIdentifier();
+ }
+ return parser_->LookupSymbol(symbol_id);
+}
+
+
+Handle<String> ParserTraits::NextLiteralString(Scanner* scanner,
+ PretenureFlag tenured) {
+ if (scanner->is_next_literal_ascii()) {
+ return parser_->isolate_->factory()->NewStringFromAscii(
+ scanner->next_literal_ascii_string(), tenured);
+ } else {
+ return parser_->isolate_->factory()->NewStringFromTwoByte(
+ scanner->next_literal_utf16_string(), tenured);
+ }
+}
+
+
+Expression* ParserTraits::ThisExpression(
+ Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ return factory->NewVariableProxy(scope->receiver());
+}
+
+
+Expression* ParserTraits::ExpressionFromLiteral(
+ Token::Value token, int pos,
+ Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ Factory* isolate_factory = parser_->isolate()->factory();
+ switch (token) {
+ case Token::NULL_LITERAL:
+ return factory->NewLiteral(isolate_factory->null_value(), pos);
+ case Token::TRUE_LITERAL:
+ return factory->NewLiteral(isolate_factory->true_value(), pos);
+ case Token::FALSE_LITERAL:
+ return factory->NewLiteral(isolate_factory->false_value(), pos);
+ case Token::NUMBER: {
+ ASSERT(scanner->is_literal_ascii());
+ double value = StringToDouble(parser_->isolate()->unicode_cache(),
+ scanner->literal_ascii_string(),
+ ALLOW_HEX | ALLOW_OCTAL |
+ ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
+ return factory->NewNumberLiteral(value, pos);
+ }
+ default:
+ ASSERT(false);
+ }
+ return NULL;
+}
+
+
+Expression* ParserTraits::ExpressionFromIdentifier(
+ Handle<String> name, int pos, Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ if (parser_->fni_ != NULL) parser_->fni_->PushVariableName(name);
+ // The name may refer to a module instance object, so its type is unknown.
+#ifdef DEBUG
+ if (FLAG_print_interface_details)
+ PrintF("# Variable %s ", name->ToAsciiArray());
+#endif
+ Interface* interface = Interface::NewUnknown(parser_->zone());
+ return scope->NewUnresolved(factory, name, interface, pos);
+}
+
+
+Expression* ParserTraits::ExpressionFromString(
+ int pos, Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ Handle<String> symbol = GetSymbol(scanner);
+ if (parser_->fni_ != NULL) parser_->fni_->PushLiteralName(symbol);
+ return factory->NewLiteral(symbol, pos);
+}
+
+
+Expression* ParserTraits::ParseArrayLiteral(bool* ok) {
+ return parser_->ParseArrayLiteral(ok);
+}
+
+
+Expression* ParserTraits::ParseObjectLiteral(bool* ok) {
+ return parser_->ParseObjectLiteral(ok);
+}
+
+
+Expression* ParserTraits::ParseExpression(bool accept_IN, bool* ok) {
+ return parser_->ParseExpression(accept_IN, ok);
+}
+
+
+Expression* ParserTraits::ParseV8Intrinsic(bool* ok) {
+ return parser_->ParseV8Intrinsic(ok);
+}
+
+
Parser::Parser(CompilationInfo* info)
- : ParserBase(&scanner_, info->isolate()->stack_guard()->real_climit()),
+ : ParserBase<ParserTraits>(&scanner_,
+ info->isolate()->stack_guard()->real_climit(),
+ info->extension(),
+ this),
isolate_(info->isolate()),
symbol_cache_(0, info->zone()),
script_(info->script()),
scanner_(isolate_->unicode_cache()),
reusable_preparser_(NULL),
- top_scope_(NULL),
original_scope_(NULL),
- current_function_state_(NULL),
target_stack_(NULL),
- extension_(info->extension()),
pre_parse_data_(NULL),
fni_(NULL),
- parenthesized_function_(false),
zone_(info->zone()),
info_(info) {
ASSERT(!script_.is_null());
@@ -600,7 +654,7 @@ FunctionLiteral* Parser::ParseProgram() {
} else if (info()->script()->name()->IsString()) {
String* name = String::cast(info()->script()->name());
SmartArrayPointer<char> name_chars = name->ToCString();
- PrintF("[parsing script: %s", *name_chars);
+ PrintF("[parsing script: %s", name_chars.get());
} else {
PrintF("[parsing script");
}
@@ -612,14 +666,14 @@ FunctionLiteral* Parser::ParseProgram() {
FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
Handle<String> source) {
- ASSERT(top_scope_ == NULL);
+ ASSERT(scope_ == NULL);
ASSERT(target_stack_ == NULL);
if (pre_parse_data_ != NULL) pre_parse_data_->Initialize();
Handle<String> no_name = isolate()->factory()->empty_string();
FunctionLiteral* result = NULL;
- { Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+ { Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
info->SetGlobalScope(scope);
if (!info->context().is_null()) {
scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
@@ -645,19 +699,19 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
ParsingModeScope parsing_mode(this, mode);
// Enters 'scope'.
- FunctionState function_state(this, scope, isolate());
+ FunctionState function_state(&function_state_, &scope_, scope, zone());
- top_scope_->SetLanguageMode(info->language_mode());
+ scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
- int beg_pos = scanner().location().beg_pos;
+ int beg_pos = scanner()->location().beg_pos;
ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok);
- if (ok && !top_scope_->is_classic_mode()) {
- CheckOctalLiteral(beg_pos, scanner().location().end_pos, &ok);
+ if (ok && !scope_->is_classic_mode()) {
+ CheckOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
}
if (ok && is_extended_mode()) {
- CheckConflictingVarDeclarations(top_scope_, &ok);
+ CheckConflictingVarDeclarations(scope_, &ok);
}
if (ok && info->parse_restriction() == ONLY_SINGLE_FUNCTION_LITERAL) {
@@ -673,7 +727,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
if (ok) {
result = factory()->NewFunctionLiteral(
no_name,
- top_scope_,
+ scope_,
body,
function_state.materialized_literal_count(),
function_state.expected_property_count(),
@@ -686,6 +740,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
FunctionLiteral::kNotGenerator,
0);
result->set_ast_properties(factory()->visitor()->ast_properties());
+ result->set_slot_processor(factory()->visitor()->slot_processor());
result->set_dont_optimize_reason(
factory()->visitor()->dont_optimize_reason());
} else if (stack_overflow()) {
@@ -729,7 +784,7 @@ FunctionLiteral* Parser::ParseLazy() {
if (FLAG_trace_parse && result != NULL) {
double ms = timer.Elapsed().InMillisecondsF();
SmartArrayPointer<char> name_chars = result->debug_name()->ToCString();
- PrintF("[parsing function: %s - took %0.3f ms]\n", *name_chars, ms);
+ PrintF("[parsing function: %s - took %0.3f ms]\n", name_chars.get(), ms);
}
return result;
}
@@ -738,7 +793,7 @@ FunctionLiteral* Parser::ParseLazy() {
FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
scanner_.Initialize(source);
- ASSERT(top_scope_ == NULL);
+ ASSERT(scope_ == NULL);
ASSERT(target_stack_ == NULL);
Handle<String> name(String::cast(shared_info->name()));
@@ -752,14 +807,14 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
{
// Parse the function literal.
- Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+ Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
info()->SetGlobalScope(scope);
if (!info()->closure().is_null()) {
scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
zone());
}
original_scope_ = scope;
- FunctionState function_state(this, scope, isolate());
+ FunctionState function_state(&function_state_, &scope_, scope, zone());
ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
ASSERT(scope->language_mode() != EXTENDED_MODE ||
info()->is_extended_mode());
@@ -772,6 +827,7 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
: FunctionLiteral::DECLARATION;
bool ok = true;
result = ParseFunctionLiteral(name,
+ Scanner::Location::invalid(),
false, // Strict mode name already checked.
shared_info->is_generator(),
RelocInfo::kNoPosition,
@@ -794,62 +850,6 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
}
-Handle<String> Parser::GetSymbol() {
- int symbol_id = -1;
- if (pre_parse_data() != NULL) {
- symbol_id = pre_parse_data()->GetSymbolIdentifier();
- }
- return LookupSymbol(symbol_id);
-}
-
-
-void Parser::ReportMessage(const char* message, Vector<const char*> args) {
- Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, message, args);
-}
-
-
-void Parser::ReportMessage(const char* message, Vector<Handle<String> > args) {
- Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, message, args);
-}
-
-
-void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<const char*> args) {
- MessageLocation location(script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
- elements->set(i, *arg_string);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(message, array);
- isolate()->Throw(*result, &location);
-}
-
-
-void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<Handle<String> > args) {
- MessageLocation location(script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- elements->set(i, *args[i]);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(message, array);
- isolate()->Throw(*result, &location);
-}
-
-
void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
int end_token,
bool is_eval,
@@ -872,7 +872,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
directive_prologue = false;
}
- Scanner::Location token_loc = scanner().peek_location();
+ Scanner::Location token_loc = scanner()->peek_location();
Statement* stat;
if (is_global && !is_eval) {
stat = ParseModuleElement(NULL, CHECK_OK);
@@ -895,7 +895,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
Handle<String> directive = Handle<String>::cast(literal->value());
// Check "use strict" directive (ES5 14.1).
- if (top_scope_->is_classic_mode() &&
+ if (scope_->is_classic_mode() &&
directive->Equals(isolate()->heap()->use_strict_string()) &&
token_loc.end_pos - token_loc.beg_pos ==
isolate()->heap()->use_strict_string()->length() + 2) {
@@ -904,16 +904,16 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
// add this scope in DoParseProgram(), but that requires adaptations
// all over the code base, so we go with a quick-fix for now.
// In the same manner, we have to patch the parsing mode.
- if (is_eval && !top_scope_->is_eval_scope()) {
- ASSERT(top_scope_->is_global_scope());
- Scope* scope = NewScope(top_scope_, EVAL_SCOPE);
- scope->set_start_position(top_scope_->start_position());
- scope->set_end_position(top_scope_->end_position());
- top_scope_ = scope;
+ if (is_eval && !scope_->is_eval_scope()) {
+ ASSERT(scope_->is_global_scope());
+ Scope* scope = NewScope(scope_, EVAL_SCOPE);
+ scope->set_start_position(scope_->start_position());
+ scope->set_end_position(scope_->end_position());
+ scope_ = scope;
mode_ = PARSE_EAGERLY;
}
// TODO(ES6): Fix entering extended mode, once it is specified.
- top_scope_->SetLanguageMode(allow_harmony_scoping()
+ scope_->SetLanguageMode(allow_harmony_scoping()
? EXTENDED_MODE : STRICT_MODE);
// "use strict" is the only directive for now.
directive_prologue = false;
@@ -962,14 +962,14 @@ Statement* Parser::ParseModuleElement(ZoneStringList* labels,
// Handle 'module' as a context-sensitive keyword.
if (FLAG_harmony_modules &&
peek() == Token::IDENTIFIER &&
- !scanner().HasAnyLineTerminatorBeforeNext() &&
+ !scanner()->HasAnyLineTerminatorBeforeNext() &&
stmt != NULL) {
ExpressionStatement* estmt = stmt->AsExpressionStatement();
if (estmt != NULL &&
estmt->expression()->AsVariableProxy() != NULL &&
estmt->expression()->AsVariableProxy()->name()->Equals(
isolate()->heap()->module_string()) &&
- !scanner().literal_contains_escapes()) {
+ !scanner()->literal_contains_escapes()) {
return ParseModuleDeclaration(NULL, ok);
}
}
@@ -984,7 +984,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
// 'module' Identifier Module
int pos = peek_position();
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ Handle<String> name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -994,7 +994,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
Module* module = ParseModule(CHECK_OK);
VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface());
Declaration* declaration =
- factory()->NewModuleDeclaration(proxy, module, top_scope_, pos);
+ factory()->NewModuleDeclaration(proxy, module, scope_, pos);
Declare(declaration, true, CHECK_OK);
#ifdef DEBUG
@@ -1052,14 +1052,14 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
#ifdef DEBUG
if (FLAG_print_interface_details) PrintF("# Literal ");
#endif
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+ Scope* scope = NewScope(scope_, MODULE_SCOPE);
Expect(Token::LBRACE, CHECK_OK);
- scope->set_start_position(scanner().location().beg_pos);
+ scope->set_start_position(scanner()->location().beg_pos);
scope->SetLanguageMode(EXTENDED_MODE);
{
- BlockState block_state(this, scope);
+ BlockState block_state(&scope_, scope);
TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
@@ -1073,7 +1073,7 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
}
Expect(Token::RBRACE, CHECK_OK);
- scope->set_end_position(scanner().location().end_pos);
+ scope->set_end_position(scanner()->location().end_pos);
body->set_scope(scope);
// Check that all exports are bound.
@@ -1082,8 +1082,8 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
!it.done(); it.Advance()) {
if (scope->LocalLookup(it.name()) == NULL) {
Handle<String> name(it.name());
- ReportMessage("module_export_undefined",
- Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("module_export_undefined",
+ Vector<Handle<String> >(&name, 1));
*ok = false;
return NULL;
}
@@ -1122,7 +1122,8 @@ Module* Parser::ParseModulePath(bool* ok) {
member->interface()->Print();
}
#endif
- ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("invalid_module_path",
+ Vector<Handle<String> >(&name, 1));
return NULL;
}
result = member;
@@ -1137,14 +1138,14 @@ Module* Parser::ParseModuleVariable(bool* ok) {
// Identifier
int pos = peek_position();
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ Handle<String> name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
#ifdef DEBUG
if (FLAG_print_interface_details)
PrintF("# Module variable %s ", name->ToAsciiArray());
#endif
- VariableProxy* proxy = top_scope_->NewUnresolved(
+ VariableProxy* proxy = scope_->NewUnresolved(
factory(), name, Interface::NewModule(zone()),
- scanner().location().beg_pos);
+ scanner()->location().beg_pos);
return factory()->NewModuleVariable(proxy, pos);
}
@@ -1166,7 +1167,7 @@ Module* Parser::ParseModuleUrl(bool* ok) {
// Create an empty literal as long as the feature isn't finished.
USE(symbol);
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+ Scope* scope = NewScope(scope_, MODULE_SCOPE);
Block* body = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
body->set_scope(scope);
Interface* interface = scope->interface();
@@ -1232,12 +1233,13 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
module->interface()->Print();
}
#endif
- ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("invalid_module_path",
+ Vector<Handle<String> >(&name, 1));
return NULL;
}
VariableProxy* proxy = NewUnresolved(names[i], LET, interface);
Declaration* declaration =
- factory()->NewImportDeclaration(proxy, module, top_scope_, pos);
+ factory()->NewImportDeclaration(proxy, module, scope_, pos);
Declare(declaration, true, CHECK_OK);
}
@@ -1262,13 +1264,14 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
switch (peek()) {
case Token::IDENTIFIER: {
int pos = position();
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ Handle<String> name =
+ ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
// Handle 'module' as a context-sensitive keyword.
if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("module"))) {
names.Add(name, zone());
while (peek() == Token::COMMA) {
Consume(Token::COMMA);
- name = ParseIdentifier(CHECK_OK);
+ name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
names.Add(name, zone());
}
ExpectSemicolon(CHECK_OK);
@@ -1291,12 +1294,12 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
default:
*ok = false;
- ReportUnexpectedToken(scanner().current_token());
+ ReportUnexpectedToken(scanner()->current_token());
return NULL;
}
// Extract declared names into export declarations and interface.
- Interface* interface = top_scope_->interface();
+ Interface* interface = scope_->interface();
for (int i = 0; i < names.length(); ++i) {
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -1311,8 +1314,8 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// TODO(rossberg): Rethink whether we actually need to store export
// declarations (for compilation?).
// ExportDeclaration* declaration =
- // factory()->NewExportDeclaration(proxy, top_scope_, position);
- // top_scope_->AddDeclaration(declaration);
+ // factory()->NewExportDeclaration(proxy, scope_, position);
+ // scope_->AddDeclaration(declaration);
}
ASSERT(result != NULL);
@@ -1438,9 +1441,8 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// In Harmony mode, this case also handles the extension:
// Statement:
// GeneratorDeclaration
- if (!top_scope_->is_classic_mode()) {
- ReportMessageAt(scanner().peek_location(), "strict_function",
- Vector<const char*>::empty());
+ if (!scope_->is_classic_mode()) {
+ ReportMessageAt(scanner()->peek_location(), "strict_function");
*ok = false;
return NULL;
}
@@ -1521,7 +1523,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// In harmony mode we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
- const char* elms[2] = { "Variable", *c_string };
+ const char* elms[2] = { "Variable", c_string.get() };
Vector<const char*> args(elms, 2);
ReportMessage("redeclaration", args);
*ok = false;
@@ -1619,7 +1621,8 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
var->interface()->Print();
}
#endif
- ReportMessage("module_type_error", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("module_type_error",
+ Vector<Handle<String> >(&name, 1));
}
}
}
@@ -1633,11 +1636,12 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
Statement* Parser::ParseNativeDeclaration(bool* ok) {
int pos = peek_position();
Expect(Token::FUNCTION, CHECK_OK);
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ // Allow "eval" or "arguments" for backward compatibility.
+ Handle<String> name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
bool done = (peek() == Token::RPAREN);
while (!done) {
- ParseIdentifier(CHECK_OK);
+ ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
done = (peek() == Token::RPAREN);
if (!done) {
Expect(Token::COMMA, CHECK_OK);
@@ -1657,7 +1661,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// other functions are set up when entering the surrounding scope.
VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue());
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, VAR, top_scope_, pos);
+ factory()->NewVariableDeclaration(proxy, VAR, scope_, pos);
Declare(declaration, true, CHECK_OK);
NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral(
name, extension_, RelocInfo::kNoPosition);
@@ -1681,6 +1685,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
Handle<String> name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
FunctionLiteral* fun = ParseFunctionLiteral(name,
+ scanner()->location(),
is_strict_reserved,
is_generator,
pos,
@@ -1692,10 +1697,10 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
// In extended mode, a function behaves as a lexical binding, except in the
// global scope.
VariableMode mode =
- is_extended_mode() && !top_scope_->is_global_scope() ? LET : VAR;
+ is_extended_mode() && !scope_->is_global_scope() ? LET : VAR;
VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
Declaration* declaration =
- factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_, pos);
+ factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
Declare(declaration, true, CHECK_OK);
if (names) names->Add(name, zone());
return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
@@ -1703,7 +1708,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
- if (top_scope_->is_extended_mode()) return ParseScopedBlock(labels, ok);
+ if (scope_->is_extended_mode()) return ParseScopedBlock(labels, ok);
// Block ::
// '{' Statement* '}'
@@ -1736,12 +1741,12 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
// Construct block expecting 16 statements.
Block* body =
factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
- Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
+ Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
// Parse the statements and collect escaping labels.
Expect(Token::LBRACE, CHECK_OK);
- block_scope->set_start_position(scanner().location().beg_pos);
- { BlockState block_state(this, block_scope);
+ block_scope->set_start_position(scanner()->location().beg_pos);
+ { BlockState block_state(&scope_, block_scope);
TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
@@ -1754,7 +1759,7 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
}
}
Expect(Token::RBRACE, CHECK_OK);
- block_scope->set_end_position(scanner().location().end_pos);
+ block_scope->set_end_position(scanner()->location().end_pos);
block_scope = block_scope->FinalizeBlockScope();
body->set_scope(block_scope);
return body;
@@ -1775,12 +1780,6 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
}
-bool Parser::IsEvalOrArguments(Handle<String> string) {
- return string.is_identical_to(isolate()->factory()->eval_string()) ||
- string.is_identical_to(isolate()->factory()->arguments_string());
-}
-
-
// If the variable declaration declares exactly one non-const
// variable, then *out is set to that variable. In all other cases,
// *out is untouched; in particular, it is the caller's responsibility
@@ -1829,7 +1828,7 @@ Block* Parser::ParseVariableDeclarations(
// existing pages. Therefore we keep allowing const with the old
// non-harmony semantics in classic mode.
Consume(Token::CONST);
- switch (top_scope_->language_mode()) {
+ switch (scope_->language_mode()) {
case CLASSIC_MODE:
mode = CONST;
init_op = Token::INIT_CONST;
@@ -1900,16 +1899,9 @@ Block* Parser::ParseVariableDeclarations(
// Parse variable name.
if (nvars > 0) Consume(Token::COMMA);
- name = ParseIdentifier(CHECK_OK);
+ name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
- // Strict mode variables may not be named eval or arguments
- if (!declaration_scope->is_classic_mode() && IsEvalOrArguments(name)) {
- ReportMessage("strict_var_name", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
// Declare variable.
// Note that we *always* must treat the initial value via a separate init
// assignment for variables and constants because the value must be assigned
@@ -1929,12 +1921,11 @@ Block* Parser::ParseVariableDeclarations(
is_const ? Interface::NewConst() : Interface::NewValue();
VariableProxy* proxy = NewUnresolved(name, mode, interface);
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, mode, top_scope_, pos);
+ factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
Declare(declaration, mode != VAR, CHECK_OK);
nvars++;
if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
- ReportMessageAt(scanner().location(), "too_many_variables",
- Vector<const char*>::empty());
+ ReportMessageAt(scanner()->location(), "too_many_variables");
*ok = false;
return NULL;
}
@@ -1949,7 +1940,7 @@ Block* Parser::ParseVariableDeclarations(
//
// var v; v = x;
//
- // In particular, we need to re-lookup 'v' (in top_scope_, not
+ // In particular, we need to re-lookup 'v' (in scope_, not
// declaration_scope) as it may be a different 'v' than the 'v' in the
// declaration (e.g., if we are inside a 'with' statement or 'catch'
// block).
@@ -1967,7 +1958,7 @@ Block* Parser::ParseVariableDeclarations(
// one - there is no re-lookup (see the last parameter of the
// Declare() call above).
- Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
+ Scope* initialization_scope = is_const ? declaration_scope : scope_;
Expression* value = NULL;
int pos = -1;
// Harmony consts have non-optional initializers.
@@ -2145,7 +2136,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// make later anyway so we should go back and fix this then.
if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
SmartArrayPointer<char> c_string = label->ToCString(DISALLOW_NULLS);
- const char* elms[2] = { "Label", *c_string };
+ const char* elms[2] = { "Label", c_string.get() };
Vector<const char*> args(elms, 2);
ReportMessage("redeclaration", args);
*ok = false;
@@ -2158,7 +2149,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// Remove the "ghost" variable that turned out to be a label
// from the top scope. This way, we don't try to resolve it
// during the scope processing.
- top_scope_->RemoveUnresolved(var);
+ scope_->RemoveUnresolved(var);
Expect(Token::COLON, CHECK_OK);
return ParseStatement(labels, ok);
}
@@ -2168,12 +2159,12 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// no line-terminator between the two words.
if (extension_ != NULL &&
peek() == Token::FUNCTION &&
- !scanner().HasAnyLineTerminatorBeforeNext() &&
+ !scanner()->HasAnyLineTerminatorBeforeNext() &&
expr != NULL &&
expr->AsVariableProxy() != NULL &&
expr->AsVariableProxy()->name()->Equals(
isolate()->heap()->native_string()) &&
- !scanner().literal_contains_escapes()) {
+ !scanner()->literal_contains_escapes()) {
return ParseNativeDeclaration(ok);
}
@@ -2181,11 +2172,11 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// Only expect semicolon in the former case.
if (!FLAG_harmony_modules ||
peek() != Token::IDENTIFIER ||
- scanner().HasAnyLineTerminatorBeforeNext() ||
+ scanner()->HasAnyLineTerminatorBeforeNext() ||
expr->AsVariableProxy() == NULL ||
!expr->AsVariableProxy()->name()->Equals(
isolate()->heap()->module_string()) ||
- scanner().literal_contains_escapes()) {
+ scanner()->literal_contains_escapes()) {
ExpectSemicolon(CHECK_OK);
}
return factory()->NewExpressionStatement(expr, pos);
@@ -2222,9 +2213,10 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
- label = ParseIdentifier(CHECK_OK);
+ // ECMA allows "eval" or "arguments" as labels even in strict mode.
+ label = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
}
IterationStatement* target = NULL;
target = LookupContinueTarget(label, CHECK_OK);
@@ -2236,7 +2228,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
message = "unknown_label";
args = Vector<Handle<String> >(&label, 1);
}
- ReportMessageAt(scanner().location(), message, args);
+ ParserTraits::ReportMessageAt(scanner()->location(), message, args);
*ok = false;
return NULL;
}
@@ -2253,9 +2245,10 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::BREAK, CHECK_OK);
Handle<String> label;
Token::Value tok = peek();
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
- label = ParseIdentifier(CHECK_OK);
+ // ECMA allows "eval" or "arguments" as labels even in strict mode.
+ label = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
}
// Parse labeled break statements that target themselves into
// empty statements, e.g. 'l1: l2: l3: break l2;'
@@ -2273,7 +2266,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
message = "unknown_label";
args = Vector<Handle<String> >(&label, 1);
}
- ReportMessageAt(scanner().location(), message, args);
+ ParserTraits::ReportMessageAt(scanner()->location(), message, args);
*ok = false;
return NULL;
}
@@ -2295,7 +2288,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Token::Value tok = peek();
Statement* result;
Expression* return_value;
- if (scanner().HasAnyLineTerminatorBeforeNext() ||
+ if (scanner()->HasAnyLineTerminatorBeforeNext() ||
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
@@ -2306,7 +2299,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
ExpectSemicolon(CHECK_OK);
if (is_generator()) {
Expression* generator = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Expression* yield = factory()->NewYield(
generator, return_value, Yield::FINAL, pos);
result = factory()->NewExpressionStatement(yield, pos);
@@ -2319,7 +2312,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// function. See ECMA-262, section 12.9, page 67.
//
// To be consistent with KJS we report the syntax error at runtime.
- Scope* declaration_scope = top_scope_->DeclarationScope();
+ Scope* declaration_scope = scope_->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_eval_scope()) {
Handle<String> message = isolate()->factory()->illegal_return_string();
@@ -2338,7 +2331,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::WITH, CHECK_OK);
int pos = position();
- if (!top_scope_->is_classic_mode()) {
+ if (!scope_->is_classic_mode()) {
ReportMessage("strict_mode_with", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2348,13 +2341,13 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- top_scope_->DeclarationScope()->RecordWithStatement();
- Scope* with_scope = NewScope(top_scope_, WITH_SCOPE);
+ scope_->DeclarationScope()->RecordWithStatement();
+ Scope* with_scope = NewScope(scope_, WITH_SCOPE);
Statement* stmt;
- { BlockState block_state(this, with_scope);
- with_scope->set_start_position(scanner().peek_location().beg_pos);
+ { BlockState block_state(&scope_, with_scope);
+ with_scope->set_start_position(scanner()->peek_location().beg_pos);
stmt = ParseStatement(labels, CHECK_OK);
- with_scope->set_end_position(scanner().location().end_pos);
+ with_scope->set_end_position(scanner()->location().end_pos);
}
return factory()->NewWithStatement(with_scope, expr, stmt, pos);
}
@@ -2428,7 +2421,7 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
int pos = position();
- if (scanner().HasAnyLineTerminatorBeforeNext()) {
+ if (scanner()->HasAnyLineTerminatorBeforeNext()) {
ReportMessage("newline_after_throw", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2483,35 +2476,27 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- catch_scope = NewScope(top_scope_, CATCH_SCOPE);
- catch_scope->set_start_position(scanner().location().beg_pos);
- name = ParseIdentifier(CHECK_OK);
-
- if (!top_scope_->is_classic_mode() && IsEvalOrArguments(name)) {
- ReportMessage("strict_catch_variable", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ catch_scope = NewScope(scope_, CATCH_SCOPE);
+ catch_scope->set_start_position(scanner()->location().beg_pos);
+ name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- if (peek() == Token::LBRACE) {
- Target target(&this->target_stack_, &catch_collector);
- VariableMode mode = is_extended_mode() ? LET : VAR;
- catch_variable =
- catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
+ Target target(&this->target_stack_, &catch_collector);
+ VariableMode mode = is_extended_mode() ? LET : VAR;
+ catch_variable =
+ catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
- BlockState block_state(this, catch_scope);
- catch_block = ParseBlock(NULL, CHECK_OK);
- } else {
- Expect(Token::LBRACE, CHECK_OK);
- }
- catch_scope->set_end_position(scanner().location().end_pos);
+ BlockState block_state(&scope_, catch_scope);
+ catch_block = ParseBlock(NULL, CHECK_OK);
+
+ catch_scope->set_end_position(scanner()->location().end_pos);
tok = peek();
}
Block* finally_block = NULL;
- if (tok == Token::FINALLY || catch_block == NULL) {
+ ASSERT(tok == Token::FINALLY || catch_block != NULL);
+ if (tok == Token::FINALLY) {
Consume(Token::FINALLY);
finally_block = ParseBlock(NULL, CHECK_OK);
}
@@ -2524,7 +2509,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL && finally_block != NULL) {
// If we have both, create an inner try/catch.
ASSERT(catch_scope != NULL && catch_variable != NULL);
- int index = current_function_state_->NextHandlerIndex();
+ int index = function_state_->NextHandlerIndex();
TryCatchStatement* statement = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block,
RelocInfo::kNoPosition);
@@ -2538,12 +2523,12 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
ASSERT(catch_scope != NULL && catch_variable != NULL);
- int index = current_function_state_->NextHandlerIndex();
+ int index = function_state_->NextHandlerIndex();
result = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block, pos);
} else {
ASSERT(finally_block != NULL);
- int index = current_function_state_->NextHandlerIndex();
+ int index = function_state_->NextHandlerIndex();
result = factory()->NewTryFinallyStatement(
index, try_block, finally_block, pos);
// Combine the jump targets of the try block and the possible catch block.
@@ -2623,13 +2608,10 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
if (for_of != NULL) {
Factory* heap_factory = isolate()->factory();
- Handle<String> iterator_str = heap_factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR(".iterator"));
- Handle<String> result_str = heap_factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR(".result"));
- Variable* iterator =
- top_scope_->DeclarationScope()->NewTemporary(iterator_str);
- Variable* result = top_scope_->DeclarationScope()->NewTemporary(result_str);
+ Variable* iterator = scope_->DeclarationScope()->NewTemporary(
+ heap_factory->dot_iterator_string());
+ Variable* result = scope_->DeclarationScope()->NewTemporary(
+ heap_factory->dot_result_string());
Expression* assign_iterator;
Expression* next_result;
@@ -2695,13 +2677,13 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* init = NULL;
// Create an in-between scope for let-bound iteration variables.
- Scope* saved_scope = top_scope_;
- Scope* for_scope = NewScope(top_scope_, BLOCK_SCOPE);
- top_scope_ = for_scope;
+ Scope* saved_scope = scope_;
+ Scope* for_scope = NewScope(scope_, BLOCK_SCOPE);
+ scope_ = for_scope;
Expect(Token::FOR, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- for_scope->set_start_position(scanner().location().beg_pos);
+ for_scope->set_start_position(scanner()->location().beg_pos);
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || peek() == Token::CONST) {
bool is_const = peek() == Token::CONST;
@@ -2724,15 +2706,15 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
- top_scope_->NewUnresolved(factory(), name, interface);
+ scope_->NewUnresolved(factory(), name, interface);
Statement* body = ParseStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, each, enumerable, body);
Block* result =
factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
result->AddStatement(variable_statement, zone());
result->AddStatement(loop, zone());
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
ASSERT(for_scope == NULL);
// Parsed for-in loop w/ variable/const declaration.
@@ -2770,20 +2752,20 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Handle<String> tempstr =
heap_factory->NewConsString(heap_factory->dot_for_string(), name);
Handle<String> tempname = heap_factory->InternalizeString(tempstr);
- Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
+ Variable* temp = scope_->DeclarationScope()->NewTemporary(tempname);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
// The expression does not see the loop variable.
- top_scope_ = saved_scope;
+ scope_ = saved_scope;
Expression* enumerable = ParseExpression(true, CHECK_OK);
- top_scope_ = for_scope;
+ scope_ = for_scope;
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
- top_scope_->NewUnresolved(factory(), name, Interface::NewValue());
+ scope_->NewUnresolved(factory(), name, Interface::NewValue());
Statement* body = ParseStatement(NULL, CHECK_OK);
Block* body_block =
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
@@ -2795,8 +2777,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
body_block->AddStatement(assignment_statement, zone());
body_block->AddStatement(body, zone());
InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
body_block->set_scope(for_scope);
// Parsed for-in loop w/ let declaration.
@@ -2829,8 +2811,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* body = ParseStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, expression, enumerable, body);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
ASSERT(for_scope == NULL);
// Parsed for-in loop.
@@ -2864,8 +2846,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
Statement* body = ParseStatement(NULL, CHECK_OK);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
if (for_scope != NULL) {
// Rewrite a for statement of the form
@@ -2940,9 +2922,9 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
expression = NewThrowReferenceError(message);
}
- if (!top_scope_->is_classic_mode()) {
+ if (!scope_->is_classic_mode()) {
// Assignment to eval or arguments is disallowed in strict mode.
- CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
+ CheckStrictModeLValue(expression, CHECK_OK);
}
MarkAsLValue(expression);
@@ -2960,7 +2942,7 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
property != NULL &&
property->obj()->AsVariableProxy() != NULL &&
property->obj()->AsVariableProxy()->is_this()) {
- current_function_state_->AddProperty();
+ function_state_->AddProperty();
}
// If we assign a function literal to a property we pretenure the
@@ -2996,11 +2978,11 @@ Expression* Parser::ParseYieldExpression(bool* ok) {
Yield::Kind kind =
Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND;
Expression* generator_object = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Expression* expression = ParseAssignmentExpression(false, CHECK_OK);
Yield* yield = factory()->NewYield(generator_object, expression, kind, pos);
if (kind == Yield::DELEGATING) {
- yield->set_index(current_function_state_->NextHandlerIndex());
+ yield->set_index(function_state_->NextHandlerIndex());
}
return yield;
}
@@ -3027,14 +3009,6 @@ Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
}
-int ParserBase::Precedence(Token::Value tok, bool accept_IN) {
- if (tok == Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
-
- return Token::Precedence(tok);
-}
-
-
// Precedence >= 4
Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
ASSERT(prec >= 4);
@@ -3172,7 +3146,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
}
// "delete identifier" is a syntax error in strict mode.
- if (op == Token::DELETE && !top_scope_->is_classic_mode()) {
+ if (op == Token::DELETE && !scope_->is_classic_mode()) {
VariableProxy* operand = expression->AsVariableProxy();
if (operand != NULL && !operand->is_this()) {
ReportMessage("strict_delete", Vector<const char*>::empty());
@@ -3220,9 +3194,9 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
expression = NewThrowReferenceError(message);
}
- if (!top_scope_->is_classic_mode()) {
+ if (!scope_->is_classic_mode()) {
// Prefix expression operand in strict mode may not be eval or arguments.
- CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
+ CheckStrictModeLValue(expression, CHECK_OK);
}
MarkAsLValue(expression);
@@ -3242,7 +3216,7 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
// LeftHandSideExpression ('++' | '--')?
Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
Token::IsCountOp(peek())) {
// Signal a reference error if the expression is an invalid
// left-hand side expression. We could report this as a syntax
@@ -3254,9 +3228,9 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
expression = NewThrowReferenceError(message);
}
- if (!top_scope_->is_classic_mode()) {
+ if (!scope_->is_classic_mode()) {
// Postfix expression operand in strict mode may not be eval or arguments.
- CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
+ CheckStrictModeLValue(expression, CHECK_OK);
}
MarkAsLValue(expression);
@@ -3275,12 +3249,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
// LeftHandSideExpression ::
// (NewExpression | MemberExpression) ...
- Expression* result;
- if (peek() == Token::NEW) {
- result = ParseNewExpression(CHECK_OK);
- } else {
- result = ParseMemberExpression(CHECK_OK);
- }
+ Expression* result = ParseMemberWithNewPrefixesExpression(CHECK_OK);
while (true) {
switch (peek()) {
@@ -3295,7 +3264,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
case Token::LPAREN: {
int pos;
- if (scanner().current_token() == Token::IDENTIFIER) {
+ if (scanner()->current_token() == Token::IDENTIFIER) {
// For call of an identifier we want to report position of
// the identifier as position of the call in the stack trace.
pos = position();
@@ -3325,7 +3294,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
VariableProxy* callee = result->AsVariableProxy();
if (callee != NULL &&
callee->IsVariable(isolate()->factory()->eval_string())) {
- top_scope_->DeclarationScope()->RecordEvalCall();
+ scope_->DeclarationScope()->RecordEvalCall();
}
result = factory()->NewCall(result, args, pos);
if (fni_ != NULL) fni_->RemoveLastFunction();
@@ -3349,70 +3318,73 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
}
-Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
+Expression* Parser::ParseMemberWithNewPrefixesExpression(bool* ok) {
// NewExpression ::
// ('new')+ MemberExpression
- // The grammar for new expressions is pretty warped. The keyword
- // 'new' can either be a part of the new expression (where it isn't
- // followed by an argument list) or a part of the member expression,
- // where it must be followed by an argument list. To accommodate
- // this, we parse the 'new' keywords greedily and keep track of how
- // many we have parsed. This information is then passed on to the
- // member expression parser, which is only allowed to match argument
- // lists as long as it has 'new' prefixes left
- Expect(Token::NEW, CHECK_OK);
- PositionStack::Element pos(stack, position());
-
- Expression* result;
- if (peek() == Token::NEW) {
- result = ParseNewPrefix(stack, CHECK_OK);
- } else {
- result = ParseMemberWithNewPrefixesExpression(stack, CHECK_OK);
- }
-
- if (!stack->is_empty()) {
- int last = stack->pop();
- result = factory()->NewCallNew(
- result, new(zone()) ZoneList<Expression*>(0, zone()), last);
- }
- return result;
-}
+ // The grammar for new expressions is pretty warped. We can have several 'new'
+ // keywords following each other, and then a MemberExpression. When we see '('
+ // after the MemberExpression, it's associated with the rightmost unassociated
+ // 'new' to create a NewExpression with arguments. However, a NewExpression
+ // can also occur without arguments.
+ // Examples of new expression:
+ // new foo.bar().baz means (new (foo.bar)()).baz
+ // new foo()() means (new foo())()
+ // new new foo()() means (new (new foo())())
+ // new new foo means new (new foo)
+ // new new foo() means new (new foo())
+ // new new foo().bar().baz means (new (new foo()).bar()).baz
-Expression* Parser::ParseNewExpression(bool* ok) {
- PositionStack stack(ok);
- return ParseNewPrefix(&stack, ok);
+ if (peek() == Token::NEW) {
+ Consume(Token::NEW);
+ int new_pos = position();
+ Expression* result = ParseMemberWithNewPrefixesExpression(CHECK_OK);
+ if (peek() == Token::LPAREN) {
+ // NewExpression with arguments.
+ ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
+ result = factory()->NewCallNew(result, args, new_pos);
+ // The expression can still continue with . or [ after the arguments.
+ result = ParseMemberExpressionContinuation(result, CHECK_OK);
+ return result;
+ }
+ // NewExpression without arguments.
+ return factory()->NewCallNew(
+ result, new(zone()) ZoneList<Expression*>(0, zone()), new_pos);
+ }
+ // No 'new' keyword.
+ return ParseMemberExpression(ok);
}
Expression* Parser::ParseMemberExpression(bool* ok) {
- return ParseMemberWithNewPrefixesExpression(NULL, ok);
-}
-
-
-Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
- bool* ok) {
// MemberExpression ::
// (PrimaryExpression | FunctionLiteral)
// ('[' Expression ']' | '.' Identifier | Arguments)*
+ // The '[' Expression ']' and '.' Identifier parts are parsed by
+ // ParseMemberExpressionContinuation, and the Arguments part is parsed by the
+ // caller.
+
// Parse the initial primary or function expression.
Expression* result = NULL;
if (peek() == Token::FUNCTION) {
- Expect(Token::FUNCTION, CHECK_OK);
+ Consume(Token::FUNCTION);
int function_token_position = position();
bool is_generator = allow_generators() && Check(Token::MUL);
Handle<String> name;
bool is_strict_reserved_name = false;
+ Scanner::Location function_name_location = Scanner::Location::invalid();
if (peek_any_identifier()) {
name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
CHECK_OK);
+ function_name_location = scanner()->location();
}
FunctionLiteral::FunctionType function_type = name.is_null()
? FunctionLiteral::ANONYMOUS_EXPRESSION
: FunctionLiteral::NAMED_EXPRESSION;
result = ParseFunctionLiteral(name,
+ function_name_location,
is_strict_reserved_name,
is_generator,
function_token_position,
@@ -3422,13 +3394,22 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
result = ParsePrimaryExpression(CHECK_OK);
}
+ result = ParseMemberExpressionContinuation(result, CHECK_OK);
+ return result;
+}
+
+
+Expression* Parser::ParseMemberExpressionContinuation(Expression* expression,
+ bool* ok) {
+ // Parses this part of MemberExpression:
+ // ('[' Expression ']' | '.' Identifier)*
while (true) {
switch (peek()) {
case Token::LBRACK: {
Consume(Token::LBRACK);
int pos = position();
Expression* index = ParseExpression(true, CHECK_OK);
- result = factory()->NewProperty(result, index, pos);
+ expression = factory()->NewProperty(expression, index, pos);
if (fni_ != NULL) {
if (index->IsPropertyName()) {
fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
@@ -3444,23 +3425,17 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
Consume(Token::PERIOD);
int pos = position();
Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = factory()->NewProperty(
- result, factory()->NewLiteral(name, pos), pos);
+ expression = factory()->NewProperty(
+ expression, factory()->NewLiteral(name, pos), pos);
if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
- case Token::LPAREN: {
- if ((stack == NULL) || stack->is_empty()) return result;
- // Consume one of the new prefixes (already parsed).
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
- int pos = stack->pop();
- result = factory()->NewCallNew(result, args, pos);
- break;
- }
default:
- return result;
+ return expression;
}
}
+ ASSERT(false);
+ return NULL;
}
@@ -3478,167 +3453,15 @@ DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
}
-void Parser::ReportUnexpectedToken(Token::Value token) {
- // We don't report stack overflows here, to avoid increasing the
- // stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram/ParseJson.
- if (token == Token::ILLEGAL && stack_overflow()) return;
- // Four of the tokens are treated specially
- switch (token) {
- case Token::EOS:
- return ReportMessage("unexpected_eos", Vector<const char*>::empty());
- case Token::NUMBER:
- return ReportMessage("unexpected_token_number",
- Vector<const char*>::empty());
- case Token::STRING:
- return ReportMessage("unexpected_token_string",
- Vector<const char*>::empty());
- case Token::IDENTIFIER:
- return ReportMessage("unexpected_token_identifier",
- Vector<const char*>::empty());
- case Token::FUTURE_RESERVED_WORD:
- return ReportMessage("unexpected_reserved",
- Vector<const char*>::empty());
- case Token::YIELD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- return ReportMessage(top_scope_->is_classic_mode() ?
- "unexpected_token_identifier" :
- "unexpected_strict_reserved",
- Vector<const char*>::empty());
- default:
- const char* name = Token::String(token);
- ASSERT(name != NULL);
- ReportMessage("unexpected_token", Vector<const char*>(&name, 1));
- }
-}
-
-
void Parser::ReportInvalidPreparseData(Handle<String> name, bool* ok) {
SmartArrayPointer<char> name_string = name->ToCString(DISALLOW_NULLS);
- const char* element[1] = { *name_string };
+ const char* element[1] = { name_string.get() };
ReportMessage("invalid_preparser_data",
Vector<const char*>(element, 1));
*ok = false;
}
-Expression* Parser::ParsePrimaryExpression(bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- int pos = peek_position();
- Expression* result = NULL;
- switch (peek()) {
- case Token::THIS: {
- Consume(Token::THIS);
- result = factory()->NewVariableProxy(top_scope_->receiver());
- break;
- }
-
- case Token::NULL_LITERAL:
- Consume(Token::NULL_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->null_value(), pos);
- break;
-
- case Token::TRUE_LITERAL:
- Consume(Token::TRUE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->true_value(), pos);
- break;
-
- case Token::FALSE_LITERAL:
- Consume(Token::FALSE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->false_value(), pos);
- break;
-
- case Token::IDENTIFIER:
- case Token::YIELD:
- case Token::FUTURE_STRICT_RESERVED_WORD: {
- Handle<String> name = ParseIdentifier(CHECK_OK);
- if (fni_ != NULL) fni_->PushVariableName(name);
- // The name may refer to a module instance object, so its type is unknown.
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Variable %s ", name->ToAsciiArray());
-#endif
- Interface* interface = Interface::NewUnknown(zone());
- result = top_scope_->NewUnresolved(factory(), name, interface, pos);
- break;
- }
-
- case Token::NUMBER: {
- Consume(Token::NUMBER);
- ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(isolate()->unicode_cache(),
- scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTAL |
- ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
- result = factory()->NewNumberLiteral(value, pos);
- break;
- }
-
- case Token::STRING: {
- Consume(Token::STRING);
- Handle<String> symbol = GetSymbol();
- result = factory()->NewLiteral(symbol, pos);
- if (fni_ != NULL) fni_->PushLiteralName(symbol);
- break;
- }
-
- case Token::ASSIGN_DIV:
- result = ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case Token::DIV:
- result = ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case Token::LBRACK:
- result = ParseArrayLiteral(CHECK_OK);
- break;
-
- case Token::LBRACE:
- result = ParseObjectLiteral(CHECK_OK);
- break;
-
- case Token::LPAREN:
- Consume(Token::LPAREN);
- // Heuristically try to detect immediately called functions before
- // seeing the call parentheses.
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- break;
-
- case Token::MOD:
- if (allow_natives_syntax() || extension_ != NULL) {
- result = ParseV8Intrinsic(CHECK_OK);
- break;
- }
- // If we're not allowing special syntax we fall-through to the
- // default case.
-
- default: {
- Token::Value tok = Next();
- ReportUnexpectedToken(tok);
- *ok = false;
- return NULL;
- }
- }
-
- return result;
-}
-
-
Expression* Parser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
@@ -3661,63 +3484,9 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
Expect(Token::RBRACK, CHECK_OK);
// Update the scope information before the pre-parsing bailout.
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- // Allocate a fixed array to hold all the object literals.
- Handle<JSArray> array =
- isolate()->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
- isolate()->factory()->SetElementsCapacityAndLength(
- array, values->length(), values->length());
-
- // Fill in the literals.
- Heap* heap = isolate()->heap();
- bool is_simple = true;
- int depth = 1;
- bool is_holey = false;
- for (int i = 0, n = values->length(); i < n; i++) {
- MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() + 1 > depth) {
- depth = m_literal->depth() + 1;
- }
- Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
- if (boilerplate_value->IsTheHole()) {
- is_holey = true;
- } else if (boilerplate_value->IsUninitialized()) {
- is_simple = false;
- JSObject::SetOwnElement(
- array, i, handle(Smi::FromInt(0), isolate()), kNonStrictMode);
- } else {
- JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode);
- }
- }
-
- Handle<FixedArrayBase> element_values(array->elements());
-
- // Simple and shallow arrays can be lazily copied, we transform the
- // elements array to a copy-on-write array.
- if (is_simple && depth == 1 && values->length() > 0 &&
- array->HasFastSmiOrObjectElements()) {
- element_values->set_map(heap->fixed_cow_array_map());
- }
-
- // Remember both the literal's constant values as well as the ElementsKind
- // in a 2-element FixedArray.
- Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(2, TENURED);
-
- ElementsKind kind = array->GetElementsKind();
- kind = is_holey ? GetHoleyElementsKind(kind) : GetPackedElementsKind(kind);
+ int literal_index = function_state_->NextMaterializedLiteralIndex();
- literals->set(0, Smi::FromInt(kind));
- literals->set(1, *element_values);
-
- return factory()->NewArrayLiteral(
- literals, values, literal_index, is_simple, depth, pos);
-}
-
-
-bool Parser::IsBoilerplateProperty(ObjectLiteral::Property* property) {
- return property != NULL &&
- property->kind() != ObjectLiteral::Property::PROTOTYPE;
+ return factory()->NewArrayLiteral(values, literal_index, pos);
}
@@ -3764,89 +3533,6 @@ Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
}
-Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
- if (expression->AsLiteral() != NULL) {
- return expression->AsLiteral()->value();
- }
- if (CompileTimeValue::IsCompileTimeValue(expression)) {
- return CompileTimeValue::GetValue(isolate(), expression);
- }
- return isolate()->factory()->uninitialized_value();
-}
-
-
-void Parser::BuildObjectLiteralConstantProperties(
- ZoneList<ObjectLiteral::Property*>* properties,
- Handle<FixedArray> constant_properties,
- bool* is_simple,
- bool* fast_elements,
- int* depth,
- bool* may_store_doubles) {
- int position = 0;
- // Accumulate the value in local variables and store it at the end.
- bool is_simple_acc = true;
- int depth_acc = 1;
- uint32_t max_element_index = 0;
- uint32_t elements = 0;
- for (int i = 0; i < properties->length(); i++) {
- ObjectLiteral::Property* property = properties->at(i);
- if (!IsBoilerplateProperty(property)) {
- is_simple_acc = false;
- continue;
- }
- MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() >= depth_acc) {
- depth_acc = m_literal->depth() + 1;
- }
-
- // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
- // value for COMPUTED properties, the real value is filled in at
- // runtime. The enumeration order is maintained.
- Handle<Object> key = property->key()->value();
- Handle<Object> value = GetBoilerplateValue(property->value());
-
- // Ensure objects that may, at any point in time, contain fields with double
- // representation are always treated as nested objects. This is true for
- // computed fields (value is undefined), and smi and double literals
- // (value->IsNumber()).
- // TODO(verwaest): Remove once we can store them inline.
- if (FLAG_track_double_fields &&
- (value->IsNumber() || value->IsUninitialized())) {
- *may_store_doubles = true;
- }
-
- is_simple_acc = is_simple_acc && !value->IsUninitialized();
-
- // Keep track of the number of elements in the object literal and
- // the largest element index. If the largest element index is
- // much larger than the number of elements, creating an object
- // literal with fast elements will be a waste of space.
- uint32_t element_index = 0;
- if (key->IsString()
- && Handle<String>::cast(key)->AsArrayIndex(&element_index)
- && element_index > max_element_index) {
- max_element_index = element_index;
- elements++;
- } else if (key->IsSmi()) {
- int key_value = Smi::cast(*key)->value();
- if (key_value > 0
- && static_cast<uint32_t>(key_value) > max_element_index) {
- max_element_index = key_value;
- }
- elements++;
- }
-
- // Add name, value pair to the fixed array.
- constant_properties->set(position++, *key);
- constant_properties->set(position++, *value);
- }
- *fast_elements =
- (max_element_index <= 32) || ((2 * elements) >= max_element_index);
- *is_simple = is_simple_acc;
- *depth = depth_acc;
-}
-
-
Expression* Parser::ParseObjectLiteral(bool* ok) {
// ObjectLiteral ::
// '{' (
@@ -3860,7 +3546,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
int number_of_boilerplate_properties = 0;
bool has_function = false;
- ObjectLiteralChecker checker(this, top_scope_->language_mode());
+ ObjectLiteralChecker checker(this, scope_->language_mode());
Expect(Token::LBRACE, CHECK_OK);
@@ -3906,6 +3592,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
: GetSymbol();
FunctionLiteral* value =
ParseFunctionLiteral(name,
+ scanner()->location(),
false, // reserved words are allowed here
false, // not a generator
RelocInfo::kNoPosition,
@@ -3915,7 +3602,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// Specification only allows zero parameters for get and one for set.
ObjectLiteral::Property* property =
factory()->NewObjectLiteralProperty(is_getter, value, next_pos);
- if (IsBoilerplateProperty(property)) {
+ if (ObjectLiteral::IsBoilerplateProperty(property)) {
number_of_boilerplate_properties++;
}
properties->Add(property, zone());
@@ -3946,9 +3633,9 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
case Token::NUMBER: {
Consume(Token::NUMBER);
- ASSERT(scanner().is_literal_ascii());
+ ASSERT(scanner()->is_literal_ascii());
double value = StringToDouble(isolate()->unicode_cache(),
- scanner().literal_ascii_string(),
+ scanner()->literal_ascii_string(),
ALLOW_HEX | ALLOW_OCTAL |
ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
key = factory()->NewNumberLiteral(value, next_pos);
@@ -3975,19 +3662,21 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Expression* value = ParseAssignmentExpression(true, CHECK_OK);
ObjectLiteral::Property* property =
- new(zone()) ObjectLiteral::Property(key, value, isolate());
+ factory()->NewObjectLiteralProperty(key, value);
// Mark top-level object literals that contain function literals and
// pretenure the literal so it can be added as a constant function
// property.
- if (top_scope_->DeclarationScope()->is_global_scope() &&
+ if (scope_->DeclarationScope()->is_global_scope() &&
value->AsFunctionLiteral() != NULL) {
has_function = true;
value->AsFunctionLiteral()->set_pretenure();
}
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
- if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
+ if (ObjectLiteral::IsBoilerplateProperty(property)) {
+ number_of_boilerplate_properties++;
+ }
properties->Add(property, zone());
// TODO(1240767): Consider allowing trailing comma.
@@ -4001,53 +3690,16 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Expect(Token::RBRACE, CHECK_OK);
// Computation of literal_index must happen before pre parse bailout.
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- Handle<FixedArray> constant_properties = isolate()->factory()->NewFixedArray(
- number_of_boilerplate_properties * 2, TENURED);
-
- bool is_simple = true;
- bool fast_elements = true;
- int depth = 1;
- bool may_store_doubles = false;
- BuildObjectLiteralConstantProperties(properties,
- constant_properties,
- &is_simple,
- &fast_elements,
- &depth,
- &may_store_doubles);
- return factory()->NewObjectLiteral(constant_properties,
- properties,
+ int literal_index = function_state_->NextMaterializedLiteralIndex();
+
+ return factory()->NewObjectLiteral(properties,
literal_index,
- is_simple,
- fast_elements,
- depth,
- may_store_doubles,
+ number_of_boilerplate_properties,
has_function,
pos);
}
-Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
- int pos = peek_position();
- if (!scanner().ScanRegExpPattern(seen_equal)) {
- Next();
- ReportMessage("unterminated_regexp", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- Handle<String> js_pattern = NextLiteralString(TENURED);
- scanner().ScanRegExpFlags();
- Handle<String> js_flags = NextLiteralString(TENURED);
- Next();
-
- return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
-}
-
-
ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
@@ -4059,8 +3711,7 @@ ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
Expression* argument = ParseAssignmentExpression(true, CHECK_OK);
result->Add(argument, zone());
if (result->length() > Code::kMaxArguments) {
- ReportMessageAt(scanner().location(), "too_many_arguments",
- Vector<const char*>::empty());
+ ReportMessageAt(scanner()->location(), "too_many_arguments");
*ok = false;
return NULL;
}
@@ -4167,6 +3818,7 @@ class SingletonLogger : public ParserRecorder {
FunctionLiteral* Parser::ParseFunctionLiteral(
Handle<String> function_name,
+ Scanner::Location function_name_location,
bool name_is_strict_reserved,
bool is_generator,
int function_token_pos,
@@ -4216,14 +3868,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// one relative to the deserialized scope chain. Otherwise we must be
// compiling a function in an inner declaration scope in the eval, e.g. a
// nested function, and hoisting works normally relative to that.
- Scope* declaration_scope = top_scope_->DeclarationScope();
+ Scope* declaration_scope = scope_->DeclarationScope();
Scope* original_declaration_scope = original_scope_->DeclarationScope();
Scope* scope =
function_type == FunctionLiteral::DECLARATION && !is_extended_mode() &&
(original_scope_ == original_declaration_scope ||
declaration_scope != original_declaration_scope)
? NewScope(declaration_scope, FUNCTION_SCOPE)
- : NewScope(top_scope_, FUNCTION_SCOPE);
+ : NewScope(scope_, FUNCTION_SCOPE);
ZoneList<Statement*>* body = NULL;
int materialized_literal_count = -1;
int expected_property_count = -1;
@@ -4236,60 +3888,61 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionLiteral::IsGeneratorFlag generator = is_generator
? FunctionLiteral::kIsGenerator
: FunctionLiteral::kNotGenerator;
+ DeferredFeedbackSlotProcessor* slot_processor;
AstProperties ast_properties;
BailoutReason dont_optimize_reason = kNoReason;
// Parse function body.
- { FunctionState function_state(this, scope, isolate());
- top_scope_->SetScopeName(function_name);
+ { FunctionState function_state(&function_state_, &scope_, scope, zone());
+ scope_->SetScopeName(function_name);
if (is_generator) {
// For generators, allocating variables in contexts is currently a win
// because it minimizes the work needed to suspend and resume an
// activation.
- top_scope_->ForceContextAllocation();
+ scope_->ForceContextAllocation();
// Calling a generator returns a generator object. That object is stored
// in a temporary variable, a definition that is used by "yield"
- // expressions. Presence of a variable for the generator object in the
- // FunctionState indicates that this function is a generator.
- Handle<String> tempname = isolate()->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR(".generator_object"));
- Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
+ // expressions. This also marks the FunctionState as a generator.
+ Variable* temp = scope_->DeclarationScope()->NewTemporary(
+ isolate()->factory()->dot_generator_object_string());
function_state.set_generator_object_variable(temp);
}
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
- scope->set_start_position(scanner().location().beg_pos);
- Scanner::Location name_loc = Scanner::Location::invalid();
- Scanner::Location dupe_loc = Scanner::Location::invalid();
+ scope->set_start_position(scanner()->location().beg_pos);
+
+ // We don't yet know if the function will be strict, so we cannot yet
+ // produce errors for parameter names or duplicates. However, we remember
+ // the locations of these errors if they occur and produce the errors later.
+ Scanner::Location eval_args_error_log = Scanner::Location::invalid();
+ Scanner::Location dupe_error_loc = Scanner::Location::invalid();
Scanner::Location reserved_loc = Scanner::Location::invalid();
bool done = (peek() == Token::RPAREN);
while (!done) {
bool is_strict_reserved = false;
Handle<String> param_name =
- ParseIdentifierOrStrictReservedWord(&is_strict_reserved,
- CHECK_OK);
+ ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
// Store locations for possible future error reports.
- if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
- name_loc = scanner().location();
- }
- if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
- duplicate_parameters = FunctionLiteral::kHasDuplicateParameters;
- dupe_loc = scanner().location();
+ if (!eval_args_error_log.IsValid() && IsEvalOrArguments(param_name)) {
+ eval_args_error_log = scanner()->location();
}
if (!reserved_loc.IsValid() && is_strict_reserved) {
- reserved_loc = scanner().location();
+ reserved_loc = scanner()->location();
+ }
+ if (!dupe_error_loc.IsValid() && scope_->IsDeclared(param_name)) {
+ duplicate_parameters = FunctionLiteral::kHasDuplicateParameters;
+ dupe_error_loc = scanner()->location();
}
- top_scope_->DeclareParameter(param_name, VAR);
+ scope_->DeclareParameter(param_name, VAR);
num_parameters++;
if (num_parameters > Code::kMaxArguments) {
- ReportMessageAt(scanner().location(), "too_many_parameters",
- Vector<const char*>::empty());
+ ReportMessageAt(scanner()->location(), "too_many_parameters");
*ok = false;
return NULL;
}
@@ -4311,13 +3964,13 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
if (is_extended_mode()) fvar_init_op = Token::INIT_CONST_HARMONY;
VariableMode fvar_mode = is_extended_mode() ? CONST_HARMONY : CONST;
- fvar = new(zone()) Variable(top_scope_,
+ fvar = new(zone()) Variable(scope_,
function_name, fvar_mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
VariableProxy* proxy = factory()->NewVariableProxy(fvar);
VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
- proxy, fvar_mode, top_scope_, RelocInfo::kNoPosition);
- top_scope_->DeclareFunctionVar(fvar_declaration);
+ proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
+ scope_->DeclareFunctionVar(fvar_declaration);
}
// Determine whether the function will be lazily compiled.
@@ -4332,7 +3985,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// These are all things we can know at this point, without looking at the
// function itself.
bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
- top_scope_->AllowsLazyCompilation() &&
+ scope_->AllowsLazyCompilation() &&
!parenthesized_function_);
parenthesized_function_ = false; // The bit was set for this function only.
@@ -4350,7 +4003,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// to check.
ReportInvalidPreparseData(function_name, CHECK_OK);
}
- scanner().SeekForward(entry.end_pos() - 1);
+ scanner()->SeekForward(entry.end_pos() - 1);
scope->set_end_position(entry.end_pos());
Expect(Token::RBRACE, CHECK_OK);
@@ -4358,7 +4011,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
scope->end_position() - function_block_pos);
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
- top_scope_->SetLanguageMode(entry.language_mode());
+ scope_->SetLanguageMode(entry.language_mode());
} else {
is_lazily_compiled = false;
}
@@ -4380,8 +4033,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (arg != NULL) {
args = Vector<const char*>(&arg, 1);
}
- ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
- logger.message(), args);
+ ParserTraits::ReportMessageAt(
+ Scanner::Location(logger.start(), logger.end()),
+ logger.message(),
+ args);
*ok = false;
return NULL;
}
@@ -4391,7 +4046,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
scope->end_position() - function_block_pos);
materialized_literal_count = logger.literals();
expected_property_count = logger.properties();
- top_scope_->SetLanguageMode(logger.language_mode());
+ scope_->SetLanguageMode(logger.language_mode());
}
}
@@ -4399,7 +4054,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
body = new(zone()) ZoneList<Statement*>(8, zone());
if (fvar != NULL) {
- VariableProxy* fproxy = top_scope_->NewUnresolved(
+ VariableProxy* fproxy = scope_->NewUnresolved(
factory(), function_name, Interface::NewConst());
fproxy->BindTo(fvar);
body->Add(factory()->NewExpressionStatement(
@@ -4419,11 +4074,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject),
arguments, pos);
VariableProxy* init_proxy = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Assignment* assignment = factory()->NewAssignment(
Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition);
VariableProxy* get_proxy = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Yield* yield = factory()->NewYield(
get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition);
body->Add(factory()->NewExpressionStatement(
@@ -4434,7 +4089,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (is_generator) {
VariableProxy* get_proxy = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Expression *undefined = factory()->NewLiteral(
isolate()->factory()->undefined_value(), RelocInfo::kNoPosition);
Yield* yield = factory()->NewYield(
@@ -4448,46 +4103,34 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
handler_count = function_state.handler_count();
Expect(Token::RBRACE, CHECK_OK);
- scope->set_end_position(scanner().location().end_pos);
+ scope->set_end_position(scanner()->location().end_pos);
}
- // Validate strict mode.
- if (!top_scope_->is_classic_mode()) {
+ // Validate strict mode. We can do this only after parsing the function,
+ // since the function can declare itself strict.
+ if (!scope_->is_classic_mode()) {
if (IsEvalOrArguments(function_name)) {
- int start_pos = scope->start_position();
- int position = function_token_pos != RelocInfo::kNoPosition
- ? function_token_pos : (start_pos > 0 ? start_pos - 1 : start_pos);
- Scanner::Location location = Scanner::Location(position, start_pos);
- ReportMessageAt(location,
- "strict_function_name", Vector<const char*>::empty());
+ ReportMessageAt(function_name_location, "strict_eval_arguments");
*ok = false;
return NULL;
}
- if (name_loc.IsValid()) {
- ReportMessageAt(name_loc, "strict_param_name",
- Vector<const char*>::empty());
+ if (name_is_strict_reserved) {
+ ReportMessageAt(function_name_location, "unexpected_strict_reserved");
*ok = false;
return NULL;
}
- if (dupe_loc.IsValid()) {
- ReportMessageAt(dupe_loc, "strict_param_dupe",
- Vector<const char*>::empty());
+ if (eval_args_error_log.IsValid()) {
+ ReportMessageAt(eval_args_error_log, "strict_eval_arguments");
*ok = false;
return NULL;
}
- if (name_is_strict_reserved) {
- int start_pos = scope->start_position();
- int position = function_token_pos != RelocInfo::kNoPosition
- ? function_token_pos : (start_pos > 0 ? start_pos - 1 : start_pos);
- Scanner::Location location = Scanner::Location(position, start_pos);
- ReportMessageAt(location, "strict_reserved_word",
- Vector<const char*>::empty());
+ if (dupe_error_loc.IsValid()) {
+ ReportMessageAt(dupe_error_loc, "strict_param_dupe");
*ok = false;
return NULL;
}
if (reserved_loc.IsValid()) {
- ReportMessageAt(reserved_loc, "strict_reserved_word",
- Vector<const char*>::empty());
+ ReportMessageAt(reserved_loc, "unexpected_strict_reserved");
*ok = false;
return NULL;
}
@@ -4496,6 +4139,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
CHECK_OK);
}
ast_properties = *factory()->visitor()->ast_properties();
+ slot_processor = factory()->visitor()->slot_processor();
dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
}
@@ -4519,6 +4163,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
pos);
function_literal->set_function_token_position(function_token_pos);
function_literal->set_ast_properties(&ast_properties);
+ function_literal->set_slot_processor(slot_processor);
function_literal->set_dont_optimize_reason(dont_optimize_reason);
if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
@@ -4529,7 +4174,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
SingletonLogger* logger) {
HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
- ASSERT_EQ(Token::LBRACE, scanner().current_token());
+ ASSERT_EQ(Token::LBRACE, scanner()->current_token());
if (reusable_preparser_ == NULL) {
intptr_t stack_limit = isolate()->stack_guard()->real_climit();
@@ -4544,7 +4189,7 @@ PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
allow_harmony_numeric_literals());
}
PreParser::PreParseResult result =
- reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
+ reusable_preparser_->PreParseLazyFunction(scope_->language_mode(),
is_generator(),
logger);
return result;
@@ -4557,13 +4202,14 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
int pos = peek_position();
Expect(Token::MOD, CHECK_OK);
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ // Allow "eval" or "arguments" for backward compatibility.
+ Handle<String> name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
if (extension_ != NULL) {
// The extension structures are only accessible while parsing the
// very first time not when reparsing because of lazy compilation.
- top_scope_->DeclarationScope()->ForceEagerCompilation();
+ scope_->DeclarationScope()->ForceEagerCompilation();
}
const Runtime::Function* function = Runtime::FunctionForName(name);
@@ -4595,7 +4241,8 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
// Check that the function is defined if it's an inline runtime call.
if (function == NULL && name->Get(0) == '_') {
- ReportMessage("not_defined", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("not_defined",
+ Vector<Handle<String> >(&name, 1));
*ok = false;
return NULL;
}
@@ -4605,52 +4252,6 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
}
-bool ParserBase::peek_any_identifier() {
- Token::Value next = peek();
- return next == Token::IDENTIFIER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD ||
- next == Token::YIELD;
-}
-
-
-bool ParserBase::CheckContextualKeyword(Vector<const char> keyword) {
- if (peek() == Token::IDENTIFIER &&
- scanner()->is_next_contextual_keyword(keyword)) {
- Consume(Token::IDENTIFIER);
- return true;
- }
- return false;
-}
-
-
-void ParserBase::ExpectSemicolon(bool* ok) {
- // Check for automatic semicolon insertion according to
- // the rules given in ECMA-262, section 7.9, page 21.
- Token::Value tok = peek();
- if (tok == Token::SEMICOLON) {
- Next();
- return;
- }
- if (scanner()->HasAnyLineTerminatorBeforeNext() ||
- tok == Token::RBRACE ||
- tok == Token::EOS) {
- return;
- }
- Expect(Token::SEMICOLON, ok);
-}
-
-
-void ParserBase::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
- Expect(Token::IDENTIFIER, ok);
- if (!*ok) return;
- if (!scanner()->is_literal_contextual_keyword(keyword)) {
- ReportUnexpectedToken(scanner()->current_token());
- *ok = false;
- }
-}
-
-
Literal* Parser::GetLiteralUndefined(int position) {
return factory()->NewLiteral(
isolate()->factory()->undefined_value(), position);
@@ -4663,56 +4264,6 @@ Literal* Parser::GetLiteralTheHole(int position) {
}
-// Parses an identifier that is valid for the current scope, in particular it
-// fails on strict mode future reserved keywords in a strict scope.
-Handle<String> Parser::ParseIdentifier(bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER ||
- (top_scope_->is_classic_mode() &&
- (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !is_generator())))) {
- return GetSymbol();
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
-}
-
-
-// Parses and identifier or a strict mode future reserved word, and indicate
-// whether it is strict mode future reserved.
-Handle<String> Parser::ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved, bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
- *is_strict_reserved = false;
- } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !is_generator())) {
- *is_strict_reserved = true;
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
- return GetSymbol();
-}
-
-
-Handle<String> Parser::ParseIdentifierName(bool* ok) {
- Token::Value next = Next();
- if (next != Token::IDENTIFIER &&
- next != Token::FUTURE_RESERVED_WORD &&
- next != Token::FUTURE_STRICT_RESERVED_WORD &&
- !Token::IsKeyword(next)) {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
- return GetSymbol();
-}
-
-
void Parser::MarkAsLValue(Expression* expression) {
VariableProxy* proxy = expression != NULL
? expression->AsVariableProxy()
@@ -4725,27 +4276,14 @@ void Parser::MarkAsLValue(Expression* expression) {
// Checks LHS expression for assignment and prefix/postfix increment/decrement
// in strict mode.
void Parser::CheckStrictModeLValue(Expression* expression,
- const char* error,
bool* ok) {
- ASSERT(!top_scope_->is_classic_mode());
+ ASSERT(!scope_->is_classic_mode());
VariableProxy* lhs = expression != NULL
? expression->AsVariableProxy()
: NULL;
if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) {
- ReportMessage(error, Vector<const char*>::empty());
- *ok = false;
- }
-}
-
-
-// Checks whether an octal literal was last seen between beg_pos and end_pos.
-// If so, reports an error. Only called for strict mode.
-void ParserBase::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- Scanner::Location octal = scanner()->octal_position();
- if (octal.IsValid() && beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal");
- scanner()->clear_octal_position();
+ ReportMessage("strict_eval_arguments", Vector<const char*>::empty());
*ok = false;
}
}
@@ -4758,34 +4296,18 @@ void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
// errors. See ES5 16 for a definition of early errors.
Handle<String> name = decl->proxy()->name();
SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
- const char* elms[2] = { "Variable", *c_string };
+ const char* elms[2] = { "Variable", c_string.get() };
Vector<const char*> args(elms, 2);
int position = decl->proxy()->position();
Scanner::Location location = position == RelocInfo::kNoPosition
? Scanner::Location::invalid()
: Scanner::Location(position, position + 1);
- ReportMessageAt(location, "redeclaration", args);
+ ParserTraits::ReportMessageAt(location, "redeclaration", args);
*ok = false;
}
}
-// This function reads an identifier name and determines whether or not it
-// is 'get' or 'set'.
-Handle<String> Parser::ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Handle<String> result = ParseIdentifierName(ok);
- if (!*ok) return Handle<String>();
- if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
- const char* token = scanner().literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
- }
- return result;
-}
-
-
// ----------------------------------------------------------------------------
// Parser support
@@ -5703,13 +5225,6 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
// ----------------------------------------------------------------------------
// The Parser interface.
-ParserMessage::~ParserMessage() {
- for (int i = 0; i < args().length(); i++)
- DeleteArray(args()[i]);
- DeleteArray(args().start());
-}
-
-
ScriptDataImpl::~ScriptDataImpl() {
if (owns_store_) store_.Dispose();
}
@@ -5841,7 +5356,7 @@ bool Parser::Parse() {
Scanner::Location loc = pre_parse_data->MessageLocation();
const char* message = pre_parse_data->BuildMessage();
Vector<const char*> args = pre_parse_data->BuildArgs();
- ReportMessageAt(loc, message, args);
+ ParserTraits::ReportMessageAt(loc, message, args);
DeleteArray(message);
for (int i = 0; i < args.length(); i++) {
DeleteArray(args[i]);
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 79ce68b615..85a219e04e 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -47,24 +47,6 @@ class Target;
template <typename T> class ZoneListWrapper;
-class ParserMessage : public Malloced {
- public:
- ParserMessage(Scanner::Location loc, const char* message,
- Vector<const char*> args)
- : loc_(loc),
- message_(message),
- args_(args) { }
- ~ParserMessage();
- Scanner::Location location() { return loc_; }
- const char* message() { return message_; }
- Vector<const char*> args() { return args_; }
- private:
- Scanner::Location loc_;
- const char* message_;
- Vector<const char*> args_;
-};
-
-
class FunctionEntry BASE_EMBEDDED {
public:
enum {
@@ -422,10 +404,92 @@ class RegExpParser BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// JAVASCRIPT PARSING
-// Forward declaration.
+class Parser;
class SingletonLogger;
-class Parser : public ParserBase {
+class ParserTraits {
+ public:
+ struct Type {
+ typedef v8::internal::Parser* Parser;
+
+ // Types used by FunctionState and BlockState.
+ typedef v8::internal::Scope Scope;
+ typedef AstNodeFactory<AstConstructionVisitor> Factory;
+ typedef Variable GeneratorVariable;
+ typedef v8::internal::Zone Zone;
+
+ // Return types for traversing functions.
+ typedef Handle<String> Identifier;
+ typedef v8::internal::Expression* Expression;
+ };
+
+ explicit ParserTraits(Parser* parser) : parser_(parser) {}
+
+ // Custom operations executed when FunctionStates are created and destructed.
+ template<typename FS>
+ static void SetUpFunctionState(FS* function_state, Zone* zone) {
+ Isolate* isolate = zone->isolate();
+ function_state->isolate_ = isolate;
+ function_state->saved_ast_node_id_ = isolate->ast_node_id();
+ isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt());
+ }
+
+ template<typename FS>
+ static void TearDownFunctionState(FS* function_state) {
+ if (function_state->outer_function_state_ != NULL) {
+ function_state->isolate_->set_ast_node_id(
+ function_state->saved_ast_node_id_);
+ }
+ }
+
+ // Helper functions for recursive descent.
+ bool IsEvalOrArguments(Handle<String> identifier) const;
+
+ // Reporting errors.
+ void ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<const char*> args);
+ void ReportMessage(const char* message, Vector<Handle<String> > args);
+ void ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<Handle<String> > args);
+
+ // "null" return type creators.
+ static Handle<String> EmptyIdentifier() {
+ return Handle<String>();
+ }
+ static Expression* EmptyExpression() {
+ return NULL;
+ }
+
+ // Producing data during the recursive descent.
+ Handle<String> GetSymbol(Scanner* scanner = NULL);
+ Handle<String> NextLiteralString(Scanner* scanner,
+ PretenureFlag tenured);
+ Expression* ThisExpression(Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ Expression* ExpressionFromLiteral(
+ Token::Value token, int pos, Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ Expression* ExpressionFromIdentifier(
+ Handle<String> name, int pos, Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ Expression* ExpressionFromString(
+ int pos, Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+
+ // Temporary glue; these functions will move to ParserBase.
+ Expression* ParseArrayLiteral(bool* ok);
+ Expression* ParseObjectLiteral(bool* ok);
+ Expression* ParseExpression(bool accept_IN, bool* ok);
+ Expression* ParseV8Intrinsic(bool* ok);
+
+ private:
+ Parser* parser_;
+};
+
+
+class Parser : public ParserBase<ParserTraits> {
public:
explicit Parser(CompilationInfo* info);
~Parser() {
@@ -436,10 +500,17 @@ class Parser : public ParserBase {
// Parses the source code represented by the compilation info and sets its
// function literal. Returns false (and deallocates any allocated AST
// nodes) if parsing failed.
- static bool Parse(CompilationInfo* info) { return Parser(info).Parse(); }
+ static bool Parse(CompilationInfo* info,
+ bool allow_lazy = false) {
+ Parser parser(info);
+ parser.set_allow_lazy(allow_lazy);
+ return parser.Parse();
+ }
bool Parse();
private:
+ friend class ParserTraits;
+
static const int kMaxNumFunctionLocals = 131071; // 2^17-1
enum Mode {
@@ -460,66 +531,6 @@ class Parser : public ParserBase {
kHasNoInitializers
};
- class BlockState;
-
- class FunctionState BASE_EMBEDDED {
- public:
- FunctionState(Parser* parser,
- Scope* scope,
- Isolate* isolate);
- ~FunctionState();
-
- int NextMaterializedLiteralIndex() {
- return next_materialized_literal_index_++;
- }
- int materialized_literal_count() {
- return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
- }
-
- int NextHandlerIndex() { return next_handler_index_++; }
- int handler_count() { return next_handler_index_; }
-
- void AddProperty() { expected_property_count_++; }
- int expected_property_count() { return expected_property_count_; }
-
- void set_generator_object_variable(Variable *variable) {
- ASSERT(variable != NULL);
- ASSERT(!is_generator());
- generator_object_variable_ = variable;
- }
- Variable* generator_object_variable() const {
- return generator_object_variable_;
- }
- bool is_generator() const {
- return generator_object_variable_ != NULL;
- }
-
- AstNodeFactory<AstConstructionVisitor>* factory() { return &factory_; }
-
- private:
- // Used to assign an index to each literal that needs materialization in
- // the function. Includes regexp literals, and boilerplate for object and
- // array literals.
- int next_materialized_literal_index_;
-
- // Used to assign a per-function index to try and catch handlers.
- int next_handler_index_;
-
- // Properties count estimation.
- int expected_property_count_;
-
- // For generators, the variable that holds the generator object. This
- // variable is used by yield expressions and return statements. NULL
- // indicates that this function is not a generator.
- Variable* generator_object_variable_;
-
- Parser* parser_;
- FunctionState* outer_function_state_;
- Scope* outer_scope_;
- int saved_ast_node_id_;
- AstNodeFactory<AstConstructionVisitor> factory_;
- };
-
class ParsingModeScope BASE_EMBEDDED {
public:
ParsingModeScope(Parser* parser, Mode mode)
@@ -551,41 +562,25 @@ class Parser : public ParserBase {
Handle<String> source);
// Report syntax error
- void ReportUnexpectedToken(Token::Value token);
void ReportInvalidPreparseData(Handle<String> name, bool* ok);
- void ReportMessage(const char* message, Vector<const char*> args);
- void ReportMessage(const char* message, Vector<Handle<String> > args);
- void ReportMessageAt(Scanner::Location location, const char* type) {
- ReportMessageAt(location, type, Vector<const char*>::empty());
- }
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<const char*> args);
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<Handle<String> > args);
void set_pre_parse_data(ScriptDataImpl *data) {
pre_parse_data_ = data;
symbol_cache_.Initialize(data ? data->symbol_count() : 0, zone());
}
- bool inside_with() const { return top_scope_->inside_with(); }
- Scanner& scanner() { return scanner_; }
+ bool inside_with() const { return scope_->inside_with(); }
Mode mode() const { return mode_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
bool is_extended_mode() {
- ASSERT(top_scope_ != NULL);
- return top_scope_->is_extended_mode();
+ ASSERT(scope_ != NULL);
+ return scope_->is_extended_mode();
}
Scope* DeclarationScope(VariableMode mode) {
return IsLexicalVariableMode(mode)
- ? top_scope_ : top_scope_->DeclarationScope();
+ ? scope_ : scope_->DeclarationScope();
}
- // Check if the given string is 'eval' or 'arguments'.
- bool IsEvalOrArguments(Handle<String> string);
-
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -643,34 +638,12 @@ class Parser : public ParserBase {
Expression* ParseUnaryExpression(bool* ok);
Expression* ParsePostfixExpression(bool* ok);
Expression* ParseLeftHandSideExpression(bool* ok);
- Expression* ParseNewExpression(bool* ok);
+ Expression* ParseMemberWithNewPrefixesExpression(bool* ok);
Expression* ParseMemberExpression(bool* ok);
- Expression* ParseNewPrefix(PositionStack* stack, bool* ok);
- Expression* ParseMemberWithNewPrefixesExpression(PositionStack* stack,
- bool* ok);
- Expression* ParsePrimaryExpression(bool* ok);
+ Expression* ParseMemberExpressionContinuation(Expression* expression,
+ bool* ok);
Expression* ParseArrayLiteral(bool* ok);
Expression* ParseObjectLiteral(bool* ok);
- Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
-
- // Populate the constant properties fixed array for a materialized object
- // literal.
- void BuildObjectLiteralConstantProperties(
- ZoneList<ObjectLiteral::Property*>* properties,
- Handle<FixedArray> constants,
- bool* is_simple,
- bool* fast_elements,
- int* depth,
- bool* may_store_doubles);
-
- // Decide if a property should be in the object boilerplate.
- bool IsBoilerplateProperty(ObjectLiteral::Property* property);
- // If the expression is a literal, return the literal value;
- // if the expression is a materialized literal and is simple return a
- // compile time value as encoded by CompileTimeValue::GetValue().
- // Otherwise, return undefined literal as the placeholder
- // in the object literal boilerplate.
- Handle<Object> GetBoilerplateValue(Expression* expression);
// Initialize the components of a for-in / for-of statement.
void InitializeForEachStatement(ForEachStatement* stmt,
@@ -679,55 +652,34 @@ class Parser : public ParserBase {
Statement* body);
ZoneList<Expression*>* ParseArguments(bool* ok);
- FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
- bool name_is_reserved,
- bool is_generator,
- int function_token_position,
- FunctionLiteral::FunctionType type,
- bool* ok);
-
+ FunctionLiteral* ParseFunctionLiteral(
+ Handle<String> name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ bool* ok);
// Magical syntax support.
Expression* ParseV8Intrinsic(bool* ok);
- bool is_generator() const { return current_function_state_->is_generator(); }
-
bool CheckInOrOf(bool accept_OF, ForEachStatement::VisitMode* visit_mode);
Handle<String> LiteralString(PretenureFlag tenured) {
- if (scanner().is_literal_ascii()) {
+ if (scanner()->is_literal_ascii()) {
return isolate_->factory()->NewStringFromAscii(
- scanner().literal_ascii_string(), tenured);
+ scanner()->literal_ascii_string(), tenured);
} else {
return isolate_->factory()->NewStringFromTwoByte(
- scanner().literal_utf16_string(), tenured);
+ scanner()->literal_utf16_string(), tenured);
}
}
- Handle<String> NextLiteralString(PretenureFlag tenured) {
- if (scanner().is_next_literal_ascii()) {
- return isolate_->factory()->NewStringFromAscii(
- scanner().next_literal_ascii_string(), tenured);
- } else {
- return isolate_->factory()->NewStringFromTwoByte(
- scanner().next_literal_utf16_string(), tenured);
- }
- }
-
- Handle<String> GetSymbol();
-
// Get odd-ball literals.
Literal* GetLiteralUndefined(int position);
Literal* GetLiteralTheHole(int position);
- Handle<String> ParseIdentifier(bool* ok);
- Handle<String> ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved, bool* ok);
- Handle<String> ParseIdentifierName(bool* ok);
- Handle<String> ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
-
// Determine if the expression is a variable proxy and mark it as being used
// in an assignment or with a increment/decrement operator. This is currently
// used on for the statically checking assignments to harmony const bindings.
@@ -735,7 +687,6 @@ class Parser : public ParserBase {
// Strict mode validation of LValue expressions
void CheckStrictModeLValue(Expression* expression,
- const char* error,
bool* ok);
// For harmony block scoping mode: Check if the scope has conflicting var/let
@@ -791,35 +742,21 @@ class Parser : public ParserBase {
PreParser::PreParseResult LazyParseFunctionLiteral(
SingletonLogger* logger);
- AstNodeFactory<AstConstructionVisitor>* factory() {
- return current_function_state_->factory();
- }
-
Isolate* isolate_;
ZoneList<Handle<String> > symbol_cache_;
Handle<Script> script_;
Scanner scanner_;
PreParser* reusable_preparser_;
- Scope* top_scope_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
- FunctionState* current_function_state_;
Target* target_stack_; // for break, continue statements
- v8::Extension* extension_;
ScriptDataImpl* pre_parse_data_;
FuncNameInferrer* fni_;
Mode mode_;
- // If true, the next (and immediately following) function literal is
- // preceded by a parenthesis.
- // Heuristically that means that the function will be called immediately,
- // so never lazily compile it.
- bool parenthesized_function_;
Zone* zone_;
CompilationInfo* info_;
- friend class BlockState;
- friend class FunctionState;
};
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index 0076d567f8..ac804398f1 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Platform specific code for Cygwin goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
+// Platform-specific code for Cygwin goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
#include <errno.h>
#include <pthread.h>
@@ -53,7 +53,7 @@ namespace internal {
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return tzname[0]; // The location of the timezone string on Cygwin.
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 75d88ec5d3..9ab6583e06 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Platform specific code for FreeBSD goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
+// Platform-specific code for FreeBSD goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
#include <pthread.h>
#include <semaphore.h>
@@ -63,7 +63,7 @@ namespace internal {
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index eb2d10b3f9..b35cd28454 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Platform specific code for Linux goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
+// Platform-specific code for Linux goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
#include <pthread.h>
#include <semaphore.h>
@@ -53,10 +53,15 @@
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+ (defined(__arm__) || defined(__aarch64__)) && \
+ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
+#if defined(LEAK_SANITIZER)
+#include <sanitizer/lsan_interface.h>
+#endif
+
#undef MAP_TYPE
#include "v8.h"
@@ -115,7 +120,7 @@ bool OS::ArmUsingHardFloat() {
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
@@ -348,6 +353,9 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
address_ = static_cast<void*>(aligned_base);
size_ = aligned_size;
+#if defined(LEAK_SANITIZER)
+ __lsan_register_root_region(address_, size_);
+#endif
}
@@ -397,6 +405,9 @@ void* VirtualMemory::ReserveRegion(size_t size) {
if (result == MAP_FAILED) return NULL;
+#if defined(LEAK_SANITIZER)
+ __lsan_register_root_region(result, size);
+#endif
return result;
}
@@ -433,6 +444,9 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+#if defined(LEAK_SANITIZER)
+ __lsan_unregister_root_region(base, size);
+#endif
return munmap(base, size) == 0;
}
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 5ffc3fc54c..683a04d381 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Platform specific code for MacOS goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
+// Platform-specific code for MacOS goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
#include <dlfcn.h>
#include <unistd.h>
@@ -184,7 +184,7 @@ void OS::SignalCodeMovingGC() {
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index 710c3904af..c881d4735d 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Platform specific code for OpenBSD and NetBSD goes here. For the POSIX
-// comaptible parts the implementation is in platform-posix.cc.
+// Platform-specific code for OpenBSD and NetBSD goes here. For the
+// POSIX-compatible parts, the implementation is in platform-posix.cc.
#include <pthread.h>
#include <semaphore.h>
@@ -61,7 +61,7 @@ namespace internal {
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 797557d76f..b5ab0cfef1 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -25,9 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Platform specific code for POSIX goes here. This is not a platform on its
-// own but contains the parts which are the same across POSIX platforms Linux,
-// Mac OS, FreeBSD and OpenBSD.
+// Platform-specific code for POSIX goes here. This is not a platform on its
+// own, but contains the parts which are the same across the POSIX platforms
+// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
#include <dlfcn.h>
#include <pthread.h>
@@ -130,6 +130,13 @@ uint64_t OS::TotalPhysicalMemory() {
return 0;
}
return static_cast<uint64_t>(memory_info.dwTotalPhys);
+#elif V8_OS_QNX
+ struct stat stat_buf;
+ if (stat("/proc", &stat_buf) != 0) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(stat_buf.st_size);
#else
intptr_t pages = sysconf(_SC_PHYS_PAGES);
intptr_t page_size = sysconf(_SC_PAGESIZE);
@@ -176,10 +183,10 @@ void OS::Free(void* address, const size_t size) {
// Get rid of writable permission on code allocations.
void OS::ProtectCode(void* address, const size_t size) {
-#if defined(__CYGWIN__)
+#if V8_OS_CYGWIN
DWORD old_protect;
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-#elif defined(__native_client__)
+#elif V8_OS_NACL
// The Native Client port of V8 uses an interpreter, so
// code pages don't need PROT_EXEC.
mprotect(address, size, PROT_READ);
@@ -191,7 +198,7 @@ void OS::ProtectCode(void* address, const size_t size) {
// Create guard pages.
void OS::Guard(void* address, const size_t size) {
-#if defined(__CYGWIN__)
+#if V8_OS_CYGWIN
DWORD oldprotect;
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
#else
@@ -201,7 +208,7 @@ void OS::Guard(void* address, const size_t size) {
void* OS::GetRandomMmapAddr() {
-#if defined(__native_client__)
+#if V8_OS_NACL
// TODO(bradchen): restore randomization once Native Client gets
// smarter about using mmap address hints.
// See http://code.google.com/p/nativeclient/issues/3341
@@ -247,7 +254,7 @@ void* OS::GetRandomMmapAddr() {
size_t OS::AllocateAlignment() {
- return getpagesize();
+ return static_cast<size_t>(sysconf(_SC_PAGESIZE));
}
@@ -269,6 +276,8 @@ void OS::Abort() {
void OS::DebugBreak() {
#if V8_HOST_ARCH_ARM
asm("bkpt 0");
+#elif V8_HOST_ARCH_A64
+ asm("brk 0");
#elif V8_HOST_ARCH_MIPS
asm("break");
#elif V8_HOST_ARCH_IA32
@@ -288,14 +297,8 @@ void OS::DebugBreak() {
// ----------------------------------------------------------------------------
// Math functions
-double ceiling(double x) {
- // Correct buggy 'ceil' on some systems (i.e. FreeBSD, OS X 10.5)
- return (-1.0 < x && x < 0.0) ? -0.0 : ceil(x);
-}
-
-
double modulo(double x, double y) {
- return fmod(x, y);
+ return std::fmod(x, y);
}
@@ -308,10 +311,6 @@ double fast_##name(double x) { \
return (*fast_##name##_function)(x); \
}
-UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
-UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
-UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
-UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
UNARY_MATH_FUNCTION(exp, CreateExpFunction())
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
@@ -357,7 +356,7 @@ double OS::TimeCurrentMillis() {
double OS::DaylightSavingsOffset(double time) {
if (std::isnan(time)) return nan_value();
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return nan_value();
return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
@@ -495,8 +494,8 @@ void OS::MemMove(void* dest, const void* src, size_t size) {
#elif defined(V8_HOST_ARCH_ARM)
void OS::MemCopyUint16Uint8Wrapper(uint16_t* dest,
- const uint8_t* src,
- size_t chars) {
+ const uint8_t* src,
+ size_t chars) {
uint16_t *limit = dest + chars;
while (dest < limit) {
*dest++ = static_cast<uint16_t>(*src++);
@@ -512,6 +511,12 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
OS::MemCopyUint8Function stub);
OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
OS::MemCopyUint16Uint8Function stub);
+
+#elif defined(V8_HOST_ARCH_MIPS)
+OS::MemCopyUint8Function OS::memcopy_uint8_function = &OS::MemCopyUint8Wrapper;
+// Defined in codegen-mips.cc.
+OS::MemCopyUint8Function CreateMemCopyUint8Function(
+ OS::MemCopyUint8Function stub);
#endif
@@ -526,11 +531,10 @@ void OS::PostSetUp() {
CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
OS::memcopy_uint16_uint8_function =
CreateMemCopyUint16Uint8Function(&OS::MemCopyUint16Uint8Wrapper);
+#elif defined(V8_HOST_ARCH_MIPS)
+ OS::memcopy_uint8_function =
+ CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
#endif
- init_fast_sin_function();
- init_fast_cos_function();
- init_fast_tan_function();
- init_fast_log_function();
// fast_exp is initialized lazily.
init_fast_sqrt_function();
}
@@ -564,6 +568,9 @@ Thread::Thread(const Options& options)
: data_(new PlatformData),
stack_size_(options.stack_size()),
start_semaphore_(NULL) {
+ if (stack_size_ > 0 && stack_size_ < PTHREAD_STACK_MIN) {
+ stack_size_ = PTHREAD_STACK_MIN;
+ }
set_name(options.name());
}
@@ -574,12 +581,12 @@ Thread::~Thread() {
static void SetThreadName(const char* name) {
-#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+#if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
pthread_set_name_np(pthread_self(), name);
-#elif defined(__NetBSD__)
+#elif V8_OS_NETBSD
STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
pthread_setname_np(pthread_self(), "%s", name);
-#elif defined(__APPLE__)
+#elif V8_OS_MACOSX
// pthread_setname_np is only available in 10.6 or later, so test
// for it at runtime.
int (*dynamic_pthread_setname_np)(const char*);
@@ -626,7 +633,7 @@ void Thread::Start() {
result = pthread_attr_init(&attr);
ASSERT_EQ(0, result);
// Native client uses default stack size.
-#if !defined(__native_client__)
+#if !V8_OS_NACL
if (stack_size_ > 0) {
result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
ASSERT_EQ(0, result);
@@ -654,7 +661,7 @@ void Thread::YieldCPU() {
static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
-#if defined(__CYGWIN__)
+#if V8_OS_CYGWIN
// We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
// because pthread_key_t is a pointer type on Cygwin. This will probably not
// work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
@@ -668,7 +675,7 @@ static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
-#if defined(__CYGWIN__)
+#if V8_OS_CYGWIN
STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
intptr_t ptr_key = static_cast<intptr_t>(local_key);
return reinterpret_cast<pthread_key_t>(ptr_key);
diff --git a/deps/v8/src/platform-qnx.cc b/deps/v8/src/platform-qnx.cc
new file mode 100644
index 0000000000..cd031e7956
--- /dev/null
+++ b/deps/v8/src/platform-qnx.cc
@@ -0,0 +1,401 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform-specific code for QNX goes here. For the POSIX-compatible
+// parts the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <stdlib.h>
+#include <ucontext.h>
+#include <backtrace.h>
+
+// QNX requires memory pages to be marked as executable.
+// Otherwise, the OS raises an exception when executing code in that page.
+#include <sys/types.h> // mmap & munmap
+#include <sys/mman.h> // mmap & munmap
+#include <sys/stat.h> // open
+#include <fcntl.h> // open
+#include <unistd.h> // sysconf
+#include <strings.h> // index
+#include <errno.h>
+#include <stdarg.h>
+#include <sys/procfs.h>
+
+#undef MAP_TYPE
+
+#include "v8.h"
+
+#include "platform.h"
+#include "v8threads.h"
+#include "vm-state-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+// 0 is never a valid thread id on Qnx since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+#ifdef __arm__
+
+bool OS::ArmUsingHardFloat() {
+ // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
+ // the Floating Point ABI used (PCS stands for Procedure Call Standard).
+ // We use these as well as a couple of other defines to statically determine
+ // what FP ABI used.
+ // GCC versions 4.4 and below don't support hard-fp.
+ // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
+ // __ARM_PCS_VFP.
+
+#define GCC_VERSION (__GNUC__ * 10000 \
+ + __GNUC_MINOR__ * 100 \
+ + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION >= 40600
+#if defined(__ARM_PCS_VFP)
+ return true;
+#else
+ return false;
+#endif
+
+#elif GCC_VERSION < 40500
+ return false;
+
+#else
+#if defined(__ARM_PCS_VFP)
+ return true;
+#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
+ !defined(__VFP_FP__)
+ return false;
+#else
+#error "Your version of GCC does not report the FP ABI compiled for." \
+ "Please report it on this issue" \
+ "http://code.google.com/p/v8/issues/detail?id=2140"
+
+#endif
+#endif
+#undef GCC_VERSION
+}
+
+#endif // __arm__
+
+
+const char* OS::LocalTimezone(double time) {
+ if (std::isnan(time)) return "";
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset() {
+ time_t tv = time(NULL);
+ struct tm* t = localtime(&tv);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, AllocateAlignment());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* addr = OS::GetRandomMmapAddr();
+ void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mbase == MAP_FAILED) {
+ LOG(i::Isolate::Current(),
+ StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "r+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) OS::Free(memory_, size_);
+ fclose(file_);
+}
+
+
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
+ procfs_mapinfo *mapinfos = NULL, *mapinfo;
+ int proc_fd, num, i;
+
+ struct {
+ procfs_debuginfo info;
+ char buff[PATH_MAX];
+ } map;
+
+ char buf[PATH_MAX + 1];
+ snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
+
+ if ((proc_fd = open(buf, O_RDONLY)) == -1) {
+ close(proc_fd);
+ return;
+ }
+
+ /* Get the number of map entries. */
+ if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
+ close(proc_fd);
+ return;
+ }
+
+ mapinfos = reinterpret_cast<procfs_mapinfo *>(
+ malloc(num * sizeof(procfs_mapinfo)));
+ if (mapinfos == NULL) {
+ close(proc_fd);
+ return;
+ }
+
+ /* Fill the map entries. */
+ if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
+ mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
+ free(mapinfos);
+ close(proc_fd);
+ return;
+ }
+
+ for (i = 0; i < num; i++) {
+ mapinfo = mapinfos + i;
+ if (mapinfo->flags & MAP_ELF) {
+ map.info.vaddr = mapinfo->vaddr;
+ if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
+ continue;
+ }
+ LOG(isolate, SharedLibraryEvent(map.info.path,
+ mapinfo->vaddr,
+ mapinfo->vaddr + mapinfo->size));
+ }
+ }
+ free(mapinfos);
+ close(proc_fd);
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ return false;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index a0590cbecb..4d910d47ad 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Platform specific code for Solaris 10 goes here. For the POSIX comaptible
-// parts the implementation is in platform-posix.cc.
+// Platform-specific code for Solaris 10 goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
#ifdef __sparc
# error "V8 does not support the SPARC CPU architecture."
@@ -82,7 +82,7 @@ namespace internal {
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return tzname[0]; // The location of the timezone string on Solaris.
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 35411bfdad..56261735b8 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Platform specific code for Win32.
+// Platform-specific code for Win32.
// Secure API functions are not available using MinGW with msvcrt.dll
// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to
@@ -133,11 +133,6 @@ intptr_t OS::MaxVirtualMemory() {
}
-double ceiling(double x) {
- return ceil(x);
-}
-
-
#if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
@@ -201,10 +196,6 @@ double fast_##name(double x) { \
return (*fast_##name##_function)(x); \
}
-UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
-UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
-UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
-UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
UNARY_MATH_FUNCTION(exp, CreateExpFunction())
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
@@ -222,10 +213,6 @@ void MathSetup() {
#ifdef _WIN64
init_modulo_function();
#endif
- init_fast_sin_function();
- init_fast_cos_function();
- init_fast_tan_function();
- init_fast_log_function();
// fast_exp is initialized lazily.
init_fast_sqrt_function();
}
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 8e524aeaf0..8af90f1cb3 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -44,7 +44,7 @@
#ifndef V8_PLATFORM_H_
#define V8_PLATFORM_H_
-#include <cstdarg>
+#include <stdarg.h>
#include "platform/mutex.h"
#include "platform/semaphore.h"
@@ -59,8 +59,12 @@ int signbit(double x);
# endif
#endif
+#if V8_OS_QNX
+#include "qnx-math.h"
+#endif
+
// Microsoft Visual C++ specific stuff.
-#if V8_CC_MSVC
+#if V8_LIBC_MSVCRT
#include "win32-headers.h"
#include "win32-math.h"
@@ -85,22 +89,16 @@ inline int lrint(double flt) {
#endif
return intgr;
}
-
#endif // _MSC_VER < 1800
-#endif // V8_CC_MSVC
+#endif // V8_LIBC_MSVCRT
namespace v8 {
namespace internal {
-double ceiling(double x);
double modulo(double x, double y);
// Custom implementation of math functions.
-double fast_sin(double input);
-double fast_cos(double input);
-double fast_tan(double input);
-double fast_log(double input);
double fast_exp(double input);
double fast_sqrt(double input);
// The custom exp implementation needs 16KB of lookup data; initialize it
@@ -369,6 +367,26 @@ class OS {
size_t size) {
(*memcopy_uint16_uint8_function)(dest, src, size);
}
+#elif defined(V8_HOST_ARCH_MIPS)
+ typedef void (*MemCopyUint8Function)(uint8_t* dest,
+ const uint8_t* src,
+ size_t size);
+ static MemCopyUint8Function memcopy_uint8_function;
+ static void MemCopyUint8Wrapper(uint8_t* dest,
+ const uint8_t* src,
+ size_t chars) {
+ memcpy(dest, src, chars);
+ }
+ // For values < 16, the assembler function is slower than the inlined C code.
+ static const int kMinComplexMemCopy = 16;
+ static void MemCopy(void* dest, const void* src, size_t size) {
+ (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src),
+ size);
+ }
+ static void MemMove(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+ }
#else
// Copy memory area to disjoint memory area.
static void MemCopy(void* dest, const void* src, size_t size) {
diff --git a/deps/v8/src/platform/condition-variable.cc b/deps/v8/src/platform/condition-variable.cc
index e2bf3882ec..83c35d4b1b 100644
--- a/deps/v8/src/platform/condition-variable.cc
+++ b/deps/v8/src/platform/condition-variable.cc
@@ -27,8 +27,8 @@
#include "platform/condition-variable.h"
-#include <cerrno>
-#include <ctime>
+#include <errno.h>
+#include <time.h>
#include "platform/time.h"
diff --git a/deps/v8/src/platform/mutex.cc b/deps/v8/src/platform/mutex.cc
index ad97740995..ff4a8a3790 100644
--- a/deps/v8/src/platform/mutex.cc
+++ b/deps/v8/src/platform/mutex.cc
@@ -27,7 +27,7 @@
#include "platform/mutex.h"
-#include <cerrno>
+#include <errno.h>
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/platform/semaphore.cc b/deps/v8/src/platform/semaphore.cc
index c3e5826f4f..0b82d4ad57 100644
--- a/deps/v8/src/platform/semaphore.cc
+++ b/deps/v8/src/platform/semaphore.cc
@@ -32,7 +32,7 @@
#include <mach/task.h>
#endif
-#include <cerrno>
+#include <errno.h>
#include "checks.h"
#include "platform/time.h"
diff --git a/deps/v8/src/platform/socket.cc b/deps/v8/src/platform/socket.cc
index 2fce6f2992..9d56cc79a8 100644
--- a/deps/v8/src/platform/socket.cc
+++ b/deps/v8/src/platform/socket.cc
@@ -37,7 +37,7 @@
#include <unistd.h>
#endif
-#include <cerrno>
+#include <errno.h>
#include "checks.h"
#include "once.h"
diff --git a/deps/v8/src/platform/time.cc b/deps/v8/src/platform/time.cc
index de0ca16473..5374af802f 100644
--- a/deps/v8/src/platform/time.cc
+++ b/deps/v8/src/platform/time.cc
@@ -34,7 +34,7 @@
#include <mach/mach_time.h>
#endif
-#include <cstring>
+#include <string.h>
#include "checks.h"
#include "cpu.h"
diff --git a/deps/v8/src/platform/time.h b/deps/v8/src/platform/time.h
index 877e0203bb..99a8d39318 100644
--- a/deps/v8/src/platform/time.h
+++ b/deps/v8/src/platform/time.h
@@ -28,7 +28,7 @@
#ifndef V8_PLATFORM_TIME_H_
#define V8_PLATFORM_TIME_H_
-#include <ctime>
+#include <time.h>
#include <limits>
#include "../allocation.h"
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index a87c434558..fc4481fd86 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -42,7 +42,7 @@
#include "unicode.h"
#include "utils.h"
-#if V8_CC_MSVC && (_MSC_VER < 1800)
+#if V8_LIBC_MSVCRT && (_MSC_VER < 1800)
namespace std {
// Usually defined in math.h, but not in MSVC until VS2013+.
@@ -55,14 +55,103 @@ int isfinite(double value);
namespace v8 {
namespace internal {
+void PreParserTraits::ReportMessageAt(Scanner::Location location,
+ const char* message,
+ Vector<const char*> args) {
+ ReportMessageAt(location.beg_pos,
+ location.end_pos,
+ message,
+ args.length() > 0 ? args[0] : NULL);
+}
+
+
+void PreParserTraits::ReportMessageAt(Scanner::Location location,
+ const char* type,
+ const char* name_opt) {
+ pre_parser_->log_
+ ->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
+}
+
+
+void PreParserTraits::ReportMessageAt(int start_pos,
+ int end_pos,
+ const char* type,
+ const char* name_opt) {
+ pre_parser_->log_->LogMessage(start_pos, end_pos, type, name_opt);
+}
+
+
+PreParserIdentifier PreParserTraits::GetSymbol(Scanner* scanner) {
+ pre_parser_->LogSymbol();
+ if (scanner->current_token() == Token::FUTURE_RESERVED_WORD) {
+ return PreParserIdentifier::FutureReserved();
+ } else if (scanner->current_token() ==
+ Token::FUTURE_STRICT_RESERVED_WORD) {
+ return PreParserIdentifier::FutureStrictReserved();
+ } else if (scanner->current_token() == Token::YIELD) {
+ return PreParserIdentifier::Yield();
+ }
+ if (scanner->is_literal_ascii()) {
+ // Detect strict-mode poison words.
+ if (scanner->literal_length() == 4 &&
+ !strncmp(scanner->literal_ascii_string().start(), "eval", 4)) {
+ return PreParserIdentifier::Eval();
+ }
+ if (scanner->literal_length() == 9 &&
+ !strncmp(scanner->literal_ascii_string().start(), "arguments", 9)) {
+ return PreParserIdentifier::Arguments();
+ }
+ }
+ return PreParserIdentifier::Default();
+}
+
+
+PreParserExpression PreParserTraits::ExpressionFromString(
+ int pos, Scanner* scanner, PreParserFactory* factory) {
+ const int kUseStrictLength = 10;
+ const char* kUseStrictChars = "use strict";
+ pre_parser_->LogSymbol();
+ if (scanner->is_literal_ascii() &&
+ scanner->literal_length() == kUseStrictLength &&
+ !scanner->literal_contains_escapes() &&
+ !strncmp(scanner->literal_ascii_string().start(), kUseStrictChars,
+ kUseStrictLength)) {
+ return PreParserExpression::UseStrictStringLiteral();
+ }
+ return PreParserExpression::StringLiteral();
+}
+
+
+PreParserExpression PreParserTraits::ParseArrayLiteral(bool* ok) {
+ return pre_parser_->ParseArrayLiteral(ok);
+}
+
+
+PreParserExpression PreParserTraits::ParseObjectLiteral(bool* ok) {
+ return pre_parser_->ParseObjectLiteral(ok);
+}
+
+
+PreParserExpression PreParserTraits::ParseExpression(bool accept_IN, bool* ok) {
+ return pre_parser_->ParseExpression(accept_IN, ok);
+}
+
+
+PreParserExpression PreParserTraits::ParseV8Intrinsic(bool* ok) {
+ return pre_parser_->ParseV8Intrinsic(ok);
+}
+
+
PreParser::PreParseResult PreParser::PreParseLazyFunction(
LanguageMode mode, bool is_generator, ParserRecorder* log) {
log_ = log;
// Lazy functions always have trivial outer scopes (no with/catch scopes).
- Scope top_scope(&scope_, kTopLevelScope);
- set_language_mode(mode);
- Scope function_scope(&scope_, kFunctionScope);
- function_scope.set_is_generator(is_generator);
+ PreParserScope top_scope(scope_, GLOBAL_SCOPE);
+ FunctionState top_state(&function_state_, &scope_, &top_scope);
+ scope_->SetLanguageMode(mode);
+ PreParserScope function_scope(scope_, FUNCTION_SCOPE);
+ FunctionState function_state(&function_state_, &scope_, &function_scope);
+ function_state.set_is_generator(is_generator);
ASSERT_EQ(Token::LBRACE, scanner()->current_token());
bool ok = true;
int start_position = peek_position();
@@ -72,12 +161,9 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
ReportUnexpectedToken(scanner()->current_token());
} else {
ASSERT_EQ(Token::RBRACE, scanner()->peek());
- if (!is_classic_mode()) {
+ if (!scope_->is_classic_mode()) {
int end_pos = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_pos, &ok);
- if (ok) {
- CheckDelayedStrictModeViolation(start_position, end_pos, &ok);
- }
}
}
return kPreParseSuccess;
@@ -97,37 +183,6 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-void PreParser::ReportUnexpectedToken(Token::Value token) {
- // We don't report stack overflows here, to avoid increasing the
- // stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram.
- if (token == Token::ILLEGAL && stack_overflow()) {
- return;
- }
- Scanner::Location source_location = scanner()->location();
-
- // Four of the tokens are treated specially
- switch (token) {
- case Token::EOS:
- return ReportMessageAt(source_location, "unexpected_eos", NULL);
- case Token::NUMBER:
- return ReportMessageAt(source_location, "unexpected_token_number", NULL);
- case Token::STRING:
- return ReportMessageAt(source_location, "unexpected_token_string", NULL);
- case Token::IDENTIFIER:
- return ReportMessageAt(source_location,
- "unexpected_token_identifier", NULL);
- case Token::FUTURE_RESERVED_WORD:
- return ReportMessageAt(source_location, "unexpected_reserved", NULL);
- case Token::FUTURE_STRICT_RESERVED_WORD:
- return ReportMessageAt(source_location,
- "unexpected_strict_reserved", NULL);
- default:
- const char* name = Token::String(token);
- ReportMessageAt(source_location, "unexpected_token", name);
- }
-}
-
#define CHECK_OK ok); \
if (!*ok) return kUnknownSourceElements; \
@@ -165,15 +220,18 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
// SourceElements ::
// (Statement)* <end_token>
- bool allow_directive_prologue = true;
+ bool directive_prologue = true;
while (peek() != end_token) {
+ if (directive_prologue && peek() != Token::STRING) {
+ directive_prologue = false;
+ }
Statement statement = ParseSourceElement(CHECK_OK);
- if (allow_directive_prologue) {
+ if (directive_prologue) {
if (statement.IsUseStrictLiteral()) {
- set_language_mode(allow_harmony_scoping() ?
- EXTENDED_MODE : STRICT_MODE);
+ scope_->SetLanguageMode(allow_harmony_scoping() ?
+ EXTENDED_MODE : STRICT_MODE);
} else if (!statement.IsStringLiteral()) {
- allow_directive_prologue = false;
+ directive_prologue = false;
}
}
}
@@ -265,9 +323,11 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
Scanner::Location start_location = scanner()->peek_location();
Statement statement = ParseFunctionDeclaration(CHECK_OK);
Scanner::Location end_location = scanner()->location();
- if (!is_classic_mode()) {
- ReportMessageAt(start_location.beg_pos, end_location.end_pos,
- "strict_function", NULL);
+ if (!scope_->is_classic_mode()) {
+ PreParserTraits::ReportMessageAt(start_location.beg_pos,
+ end_location.end_pos,
+ "strict_function",
+ NULL);
*ok = false;
return Statement::Default();
} else {
@@ -293,22 +353,14 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
Expect(Token::FUNCTION, CHECK_OK);
bool is_generator = allow_generators() && Check(Token::MUL);
- Identifier identifier = ParseIdentifier(CHECK_OK);
- Scanner::Location location = scanner()->location();
-
- Expression function_value = ParseFunctionLiteral(is_generator, CHECK_OK);
-
- if (function_value.IsStrictFunction() &&
- !identifier.IsValidStrictVariable()) {
- // Strict mode violation, using either reserved word or eval/arguments
- // as name of strict function.
- const char* type = "strict_function_name";
- if (identifier.IsFutureStrictReserved() || identifier.IsYield()) {
- type = "strict_reserved_word";
- }
- ReportMessageAt(location, type, NULL);
- *ok = false;
- }
+ bool is_strict_reserved = false;
+ Identifier name = ParseIdentifierOrStrictReservedWord(
+ &is_strict_reserved, CHECK_OK);
+ ParseFunctionLiteral(name,
+ scanner()->location(),
+ is_strict_reserved,
+ is_generator,
+ CHECK_OK);
return Statement::FunctionDeclaration();
}
@@ -322,7 +374,7 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
//
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
- if (is_extended_mode()) {
+ if (scope_->is_extended_mode()) {
ParseSourceElement(CHECK_OK);
} else {
ParseStatement(CHECK_OK);
@@ -386,21 +438,19 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// existing pages. Therefore we keep allowing const with the old
// non-harmony semantics in classic mode.
Consume(Token::CONST);
- switch (language_mode()) {
+ switch (scope_->language_mode()) {
case CLASSIC_MODE:
break;
case STRICT_MODE: {
Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location, "strict_const", NULL);
+ ReportMessageAt(location, "strict_const");
*ok = false;
return Statement::Default();
}
case EXTENDED_MODE:
if (var_context != kSourceElement &&
var_context != kForStatement) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "unprotected_const", NULL);
+ ReportMessageAt(scanner()->peek_location(), "unprotected_const");
*ok = false;
return Statement::Default();
}
@@ -414,19 +464,15 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
//
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
- if (!is_extended_mode()) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "illegal_let", NULL);
+ if (!scope_->is_extended_mode()) {
+ ReportMessageAt(scanner()->peek_location(), "illegal_let");
*ok = false;
return Statement::Default();
}
Consume(Token::LET);
if (var_context != kSourceElement &&
var_context != kForStatement) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "unprotected_let", NULL);
+ ReportMessageAt(scanner()->peek_location(), "unprotected_let");
*ok = false;
return Statement::Default();
}
@@ -443,14 +489,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
do {
// Parse variable name.
if (nvars > 0) Consume(Token::COMMA);
- Identifier identifier = ParseIdentifier(CHECK_OK);
- if (!is_classic_mode() && !identifier.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner()->location(),
- "strict_var_name",
- identifier,
- ok);
- return Statement::Default();
- }
+ ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
nvars++;
if (peek() == Token::ASSIGN || require_initializer) {
Expect(Token::ASSIGN, CHECK_OK);
@@ -469,16 +508,20 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
// Expression ';'
// Identifier ':' Statement
+ bool starts_with_identifier = peek_any_identifier();
Expression expr = ParseExpression(true, CHECK_OK);
- if (expr.IsRawIdentifier()) {
+ // Even if the expression starts with an identifier, it is not necessarily an
+ // identifier. For example, "foo + bar" starts with an identifier but is not
+ // an identifier.
+ if (starts_with_identifier && expr.IsIdentifier() && peek() == Token::COLON) {
+ // Expression is a single identifier, and not, e.g., a parenthesized
+ // identifier.
ASSERT(!expr.AsIdentifier().IsFutureReserved());
- ASSERT(is_classic_mode() ||
+ ASSERT(scope_->is_classic_mode() ||
(!expr.AsIdentifier().IsFutureStrictReserved() &&
!expr.AsIdentifier().IsYield()));
- if (peek() == Token::COLON) {
- Consume(Token::COLON);
- return ParseStatement(ok);
- }
+ Consume(Token::COLON);
+ return ParseStatement(ok);
// Preparsing is disabled for extensions (because the extension details
// aren't passed to lazily compiled functions), so we don't
// accept "native function" in the preparser.
@@ -516,7 +559,8 @@ PreParser::Statement PreParser::ParseContinueStatement(bool* ok) {
tok != Token::SEMICOLON &&
tok != Token::RBRACE &&
tok != Token::EOS) {
- ParseIdentifier(CHECK_OK);
+ // ECMA allows "eval" or "arguments" as labels even in strict mode.
+ ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
return Statement::Default();
@@ -533,7 +577,8 @@ PreParser::Statement PreParser::ParseBreakStatement(bool* ok) {
tok != Token::SEMICOLON &&
tok != Token::RBRACE &&
tok != Token::EOS) {
- ParseIdentifier(CHECK_OK);
+ // ECMA allows "eval" or "arguments" as labels even in strict mode.
+ ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
return Statement::Default();
@@ -570,9 +615,8 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK);
- if (!is_classic_mode()) {
- Scanner::Location location = scanner()->location();
- ReportMessageAt(location, "strict_mode_with", NULL);
+ if (!scope_->is_classic_mode()) {
+ ReportMessageAt(scanner()->location(), "strict_mode_with");
*ok = false;
return Statement::Default();
}
@@ -580,7 +624,8 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Scope::InsideWith iw(scope_);
+ PreParserScope with_scope(scope_, WITH_SCOPE);
+ BlockState block_state(&scope_, &with_scope);
ParseStatement(CHECK_OK);
return Statement::Default();
}
@@ -716,8 +761,7 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
if (scanner()->HasAnyLineTerminatorBeforeNext()) {
- Scanner::Location pos = scanner()->location();
- ReportMessageAt(pos, "newline_after_throw", NULL);
+ ReportMessageAt(scanner()->location(), "newline_after_throw");
*ok = false;
return Statement::Default();
}
@@ -739,38 +783,31 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
// Finally ::
// 'finally' Block
- // In preparsing, allow any number of catch/finally blocks, including zero
- // of both.
-
Expect(Token::TRY, CHECK_OK);
ParseBlock(CHECK_OK);
- bool catch_or_finally_seen = false;
- if (peek() == Token::CATCH) {
+ Token::Value tok = peek();
+ if (tok != Token::CATCH && tok != Token::FINALLY) {
+ ReportMessageAt(scanner()->location(), "no_catch_or_finally");
+ *ok = false;
+ return Statement::Default();
+ }
+ if (tok == Token::CATCH) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- Identifier id = ParseIdentifier(CHECK_OK);
- if (!is_classic_mode() && !id.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner()->location(),
- "strict_catch_variable",
- id,
- ok);
- return Statement::Default();
- }
+ ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- { Scope::InsideWith iw(scope_);
+ {
+ PreParserScope with_scope(scope_, WITH_SCOPE);
+ BlockState block_state(&scope_, &with_scope);
ParseBlock(CHECK_OK);
}
- catch_or_finally_seen = true;
+ tok = peek();
}
- if (peek() == Token::FINALLY) {
+ if (tok == Token::FINALLY) {
Consume(Token::FINALLY);
ParseBlock(CHECK_OK);
- catch_or_finally_seen = true;
- }
- if (!catch_or_finally_seen) {
- *ok = false;
}
return Statement::Default();
}
@@ -821,7 +858,7 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
// YieldExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
- if (scope_->is_generator() && peek() == Token::YIELD) {
+ if (function_state_->is_generator() && peek() == Token::YIELD) {
return ParseYieldExpression(ok);
}
@@ -833,12 +870,12 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
return expression;
}
- if (!is_classic_mode() &&
+ if (!scope_->is_classic_mode() &&
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
Scanner::Location after = scanner()->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_lhs_assignment", NULL);
+ PreParserTraits::ReportMessageAt(before.beg_pos, after.end_pos,
+ "strict_eval_arguments", NULL);
*ok = false;
return Expression::Default();
}
@@ -847,7 +884,7 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
ParseAssignmentExpression(accept_IN, CHECK_OK);
if ((op == Token::ASSIGN) && expression.IsThisProperty()) {
- scope_->AddProperty();
+ function_state_->AddProperty();
}
return Expression::Default();
@@ -927,12 +964,12 @@ PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
op = Next();
Scanner::Location before = scanner()->peek_location();
Expression expression = ParseUnaryExpression(CHECK_OK);
- if (!is_classic_mode() &&
+ if (!scope_->is_classic_mode() &&
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
Scanner::Location after = scanner()->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_lhs_prefix", NULL);
+ PreParserTraits::ReportMessageAt(before.beg_pos, after.end_pos,
+ "strict_eval_arguments", NULL);
*ok = false;
}
return Expression::Default();
@@ -950,12 +987,12 @@ PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
Expression expression = ParseLeftHandSideExpression(CHECK_OK);
if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
Token::IsCountOp(peek())) {
- if (!is_classic_mode() &&
+ if (!scope_->is_classic_mode() &&
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
Scanner::Location after = scanner()->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_lhs_postfix", NULL);
+ PreParserTraits::ReportMessageAt(before.beg_pos, after.end_pos,
+ "strict_eval_arguments", NULL);
*ok = false;
return Expression::Default();
}
@@ -970,12 +1007,7 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
// LeftHandSideExpression ::
// (NewExpression | MemberExpression) ...
- Expression result = Expression::Default();
- if (peek() == Token::NEW) {
- result = ParseNewExpression(CHECK_OK);
- } else {
- result = ParseMemberExpression(CHECK_OK);
- }
+ Expression result = ParseMemberWithNewPrefixesExpression(CHECK_OK);
while (true) {
switch (peek()) {
@@ -1015,179 +1047,98 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
}
-PreParser::Expression PreParser::ParseNewExpression(bool* ok) {
+PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
+ bool* ok) {
// NewExpression ::
// ('new')+ MemberExpression
- // The grammar for new expressions is pretty warped. The keyword
- // 'new' can either be a part of the new expression (where it isn't
- // followed by an argument list) or a part of the member expression,
- // where it must be followed by an argument list. To accommodate
- // this, we parse the 'new' keywords greedily and keep track of how
- // many we have parsed. This information is then passed on to the
- // member expression parser, which is only allowed to match argument
- // lists as long as it has 'new' prefixes left
- unsigned new_count = 0;
- do {
- Consume(Token::NEW);
- new_count++;
- } while (peek() == Token::NEW);
+ // See Parser::ParseNewExpression.
- return ParseMemberWithNewPrefixesExpression(new_count, ok);
+ if (peek() == Token::NEW) {
+ Consume(Token::NEW);
+ ParseMemberWithNewPrefixesExpression(CHECK_OK);
+ if (peek() == Token::LPAREN) {
+ // NewExpression with arguments.
+ ParseArguments(CHECK_OK);
+ // The expression can still continue with . or [ after the arguments.
+ ParseMemberExpressionContinuation(Expression::Default(), CHECK_OK);
+ }
+ return Expression::Default();
+ }
+ // No 'new' keyword.
+ return ParseMemberExpression(ok);
}
PreParser::Expression PreParser::ParseMemberExpression(bool* ok) {
- return ParseMemberWithNewPrefixesExpression(0, ok);
-}
-
-
-PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
- unsigned new_count, bool* ok) {
// MemberExpression ::
// (PrimaryExpression | FunctionLiteral)
// ('[' Expression ']' | '.' Identifier | Arguments)*
+ // The '[' Expression ']' and '.' Identifier parts are parsed by
+ // ParseMemberExpressionContinuation, and the Arguments part is parsed by the
+ // caller.
+
// Parse the initial primary or function expression.
Expression result = Expression::Default();
if (peek() == Token::FUNCTION) {
Consume(Token::FUNCTION);
bool is_generator = allow_generators() && Check(Token::MUL);
- Identifier identifier = Identifier::Default();
+ Identifier name = Identifier::Default();
+ bool is_strict_reserved_name = false;
+ Scanner::Location function_name_location = Scanner::Location::invalid();
if (peek_any_identifier()) {
- identifier = ParseIdentifier(CHECK_OK);
- }
- result = ParseFunctionLiteral(is_generator, CHECK_OK);
- if (result.IsStrictFunction() && !identifier.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner()->location(),
- "strict_function_name",
- identifier,
- ok);
- return Expression::Default();
+ name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
+ CHECK_OK);
+ function_name_location = scanner()->location();
}
+ result = ParseFunctionLiteral(name,
+ function_name_location,
+ is_strict_reserved_name,
+ is_generator,
+ CHECK_OK);
} else {
result = ParsePrimaryExpression(CHECK_OK);
}
+ result = ParseMemberExpressionContinuation(result, CHECK_OK);
+ return result;
+}
+
+PreParser::Expression PreParser::ParseMemberExpressionContinuation(
+ PreParserExpression expression, bool* ok) {
+ // Parses this part of MemberExpression:
+ // ('[' Expression ']' | '.' Identifier)*
while (true) {
switch (peek()) {
case Token::LBRACK: {
Consume(Token::LBRACK);
ParseExpression(true, CHECK_OK);
Expect(Token::RBRACK, CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
+ if (expression.IsThis()) {
+ expression = Expression::ThisProperty();
} else {
- result = Expression::Default();
+ expression = Expression::Default();
}
break;
}
case Token::PERIOD: {
Consume(Token::PERIOD);
ParseIdentifierName(CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
+ if (expression.IsThis()) {
+ expression = Expression::ThisProperty();
} else {
- result = Expression::Default();
+ expression = Expression::Default();
}
break;
}
- case Token::LPAREN: {
- if (new_count == 0) return result;
- // Consume one of the new prefixes (already parsed).
- ParseArguments(CHECK_OK);
- new_count--;
- result = Expression::Default();
- break;
- }
default:
- return result;
+ return expression;
}
}
-}
-
-
-PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- Expression result = Expression::Default();
- switch (peek()) {
- case Token::THIS: {
- Next();
- result = Expression::This();
- break;
- }
-
- case Token::FUTURE_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- case Token::YIELD:
- case Token::IDENTIFIER: {
- Identifier id = ParseIdentifier(CHECK_OK);
- result = Expression::FromIdentifier(id);
- break;
- }
-
- case Token::NULL_LITERAL:
- case Token::TRUE_LITERAL:
- case Token::FALSE_LITERAL:
- case Token::NUMBER: {
- Next();
- break;
- }
- case Token::STRING: {
- Next();
- result = GetStringSymbol();
- break;
- }
-
- case Token::ASSIGN_DIV:
- result = ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case Token::DIV:
- result = ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case Token::LBRACK:
- result = ParseArrayLiteral(CHECK_OK);
- break;
-
- case Token::LBRACE:
- result = ParseObjectLiteral(CHECK_OK);
- break;
-
- case Token::LPAREN:
- Consume(Token::LPAREN);
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- result = result.Parenthesize();
- break;
-
- case Token::MOD:
- result = ParseV8Intrinsic(CHECK_OK);
- break;
-
- default: {
- Next();
- *ok = false;
- return Expression::Default();
- }
- }
-
- return result;
+ ASSERT(false);
+ return PreParserExpression::Default();
}
@@ -1205,7 +1156,7 @@ PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) {
}
Expect(Token::RBRACK, CHECK_OK);
- scope_->NextMaterializedLiteralIndex();
+ function_state_->NextMaterializedLiteralIndex();
return Expression::Default();
}
@@ -1217,7 +1168,7 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
// | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
// )*[','] '}'
- ObjectLiteralChecker checker(this, language_mode());
+ ObjectLiteralChecker checker(this, scope_->language_mode());
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
@@ -1246,7 +1197,11 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
}
PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
checker.CheckProperty(name, type, CHECK_OK);
- ParseFunctionLiteral(false, CHECK_OK);
+ ParseFunctionLiteral(Identifier::Default(),
+ scanner()->location(),
+ false, // reserved words are allowed here
+ false, // not a generator
+ CHECK_OK);
if (peek() != Token::RBRACE) {
Expect(Token::COMMA, CHECK_OK);
}
@@ -1258,7 +1213,7 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
case Token::STRING:
Consume(next);
checker.CheckProperty(next, kValueProperty, CHECK_OK);
- GetStringSymbol();
+ LogSymbol();
break;
case Token::NUMBER:
Consume(next);
@@ -1283,29 +1238,7 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
}
Expect(Token::RBRACE, CHECK_OK);
- scope_->NextMaterializedLiteralIndex();
- return Expression::Default();
-}
-
-
-PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
- bool* ok) {
- if (!scanner()->ScanRegExpPattern(seen_equal)) {
- Next();
- ReportMessageAt(scanner()->location(), "unterminated_regexp", NULL);
- *ok = false;
- return Expression::Default();
- }
-
- scope_->NextMaterializedLiteralIndex();
-
- if (!scanner()->ScanRegExpFlags()) {
- Next();
- ReportMessageAt(scanner()->location(), "invalid_regexp_flags", NULL);
- *ok = false;
- return Expression::Default();
- }
- Next();
+ function_state_->NextMaterializedLiteralIndex();
return Expression::Default();
}
@@ -1332,31 +1265,44 @@ PreParser::Arguments PreParser::ParseArguments(bool* ok) {
return argc;
}
-
-PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator,
- bool* ok) {
+PreParser::Expression PreParser::ParseFunctionLiteral(
+ Identifier function_name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
// Parse function body.
ScopeType outer_scope_type = scope_->type();
- bool inside_with = scope_->IsInsideWith();
- Scope function_scope(&scope_, kFunctionScope);
- function_scope.set_is_generator(is_generator);
+ bool inside_with = scope_->inside_with();
+ PreParserScope function_scope(scope_, FUNCTION_SCOPE);
+ FunctionState function_state(&function_state_, &scope_, &function_scope);
+ function_state.set_is_generator(is_generator);
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
int start_position = position();
bool done = (peek() == Token::RPAREN);
DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+ // We don't yet know if the function will be strict, so we cannot yet produce
+ // errors for parameter names or duplicates. However, we remember the
+ // locations of these errors if they occur and produce the errors later.
+ Scanner::Location eval_args_error_loc = Scanner::Location::invalid();
+ Scanner::Location dupe_error_loc = Scanner::Location::invalid();
+ Scanner::Location reserved_error_loc = Scanner::Location::invalid();
while (!done) {
- Identifier id = ParseIdentifier(CHECK_OK);
- if (!id.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner()->location(),
- "strict_param_name",
- id,
- CHECK_OK);
+ bool is_strict_reserved = false;
+ Identifier param_name =
+ ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
+ if (!eval_args_error_loc.IsValid() && param_name.IsEvalOrArguments()) {
+ eval_args_error_loc = scanner()->location();
+ }
+ if (!reserved_error_loc.IsValid() && is_strict_reserved) {
+ reserved_error_loc = scanner()->location();
}
+
int prev_value;
if (scanner()->is_literal_ascii()) {
prev_value =
@@ -1366,11 +1312,10 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator,
duplicate_finder.AddUtf16Symbol(scanner()->literal_utf16_string(), 1);
}
- if (prev_value != 0) {
- SetStrictModeViolation(scanner()->location(),
- "strict_param_dupe",
- CHECK_OK);
+ if (!dupe_error_loc.IsValid() && prev_value != 0) {
+ dupe_error_loc = scanner()->location();
}
+
done = (peek() == Token::RPAREN);
if (!done) {
Expect(Token::COMMA, CHECK_OK);
@@ -1381,7 +1326,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator,
// Determine if the function will be lazily compiled.
// Currently only happens to top-level functions.
// Optimistically assume that all top-level functions are lazily compiled.
- bool is_lazily_compiled = (outer_scope_type == kTopLevelScope &&
+ bool is_lazily_compiled = (outer_scope_type == GLOBAL_SCOPE &&
!inside_with && allow_lazy() &&
!parenthesized_function_);
parenthesized_function_ = false;
@@ -1394,10 +1339,37 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator,
}
Expect(Token::RBRACE, CHECK_OK);
- if (!is_classic_mode()) {
+ // Validate strict mode. We can do this only after parsing the function,
+ // since the function can declare itself strict.
+ if (!scope_->is_classic_mode()) {
+ if (function_name.IsEvalOrArguments()) {
+ ReportMessageAt(function_name_location, "strict_eval_arguments");
+ *ok = false;
+ return Expression::Default();
+ }
+ if (name_is_strict_reserved) {
+ ReportMessageAt(function_name_location, "unexpected_strict_reserved");
+ *ok = false;
+ return Expression::Default();
+ }
+ if (eval_args_error_loc.IsValid()) {
+ ReportMessageAt(eval_args_error_loc, "strict_eval_arguments");
+ *ok = false;
+ return Expression::Default();
+ }
+ if (dupe_error_loc.IsValid()) {
+ ReportMessageAt(dupe_error_loc, "strict_param_dupe");
+ *ok = false;
+ return Expression::Default();
+ }
+ if (reserved_error_loc.IsValid()) {
+ ReportMessageAt(reserved_error_loc, "unexpected_strict_reserved");
+ *ok = false;
+ return Expression::Default();
+ }
+
int end_position = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_position, CHECK_OK);
- CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
return Expression::StrictFunction();
}
@@ -1416,9 +1388,9 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
ASSERT_EQ(Token::RBRACE, scanner()->peek());
int body_end = scanner()->peek_location().end_pos;
log_->LogFunction(body_start, body_end,
- scope_->materialized_literal_count(),
- scope_->expected_properties(),
- language_mode());
+ function_state_->materialized_literal_count(),
+ function_state_->expected_property_count(),
+ scope_->language_mode());
}
@@ -1430,7 +1402,8 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
*ok = false;
return Expression::Default();
}
- ParseIdentifier(CHECK_OK);
+ // Allow "eval" or "arguments" for backward compatibility.
+ ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
ParseArguments(ok);
return Expression::Default();
@@ -1449,201 +1422,4 @@ void PreParser::LogSymbol() {
}
-PreParser::Expression PreParser::GetStringSymbol() {
- const int kUseStrictLength = 10;
- const char* kUseStrictChars = "use strict";
- LogSymbol();
- if (scanner()->is_literal_ascii() &&
- scanner()->literal_length() == kUseStrictLength &&
- !scanner()->literal_contains_escapes() &&
- !strncmp(scanner()->literal_ascii_string().start(), kUseStrictChars,
- kUseStrictLength)) {
- return Expression::UseStrictStringLiteral();
- }
- return Expression::StringLiteral();
-}
-
-
-PreParser::Identifier PreParser::GetIdentifierSymbol() {
- LogSymbol();
- if (scanner()->current_token() == Token::FUTURE_RESERVED_WORD) {
- return Identifier::FutureReserved();
- } else if (scanner()->current_token() ==
- Token::FUTURE_STRICT_RESERVED_WORD) {
- return Identifier::FutureStrictReserved();
- } else if (scanner()->current_token() == Token::YIELD) {
- return Identifier::Yield();
- }
- if (scanner()->is_literal_ascii()) {
- // Detect strict-mode poison words.
- if (scanner()->literal_length() == 4 &&
- !strncmp(scanner()->literal_ascii_string().start(), "eval", 4)) {
- return Identifier::Eval();
- }
- if (scanner()->literal_length() == 9 &&
- !strncmp(scanner()->literal_ascii_string().start(), "arguments", 9)) {
- return Identifier::Arguments();
- }
- }
- return Identifier::Default();
-}
-
-
-PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
- Token::Value next = Next();
- switch (next) {
- case Token::FUTURE_RESERVED_WORD: {
- Scanner::Location location = scanner()->location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "reserved_word", NULL);
- *ok = false;
- return GetIdentifierSymbol();
- }
- case Token::YIELD:
- if (scope_->is_generator()) {
- // 'yield' in a generator is only valid as part of a YieldExpression.
- ReportMessageAt(scanner()->location(), "unexpected_token", "yield");
- *ok = false;
- return Identifier::Yield();
- }
- // FALLTHROUGH
- case Token::FUTURE_STRICT_RESERVED_WORD:
- if (!is_classic_mode()) {
- Scanner::Location location = scanner()->location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "strict_reserved_word", NULL);
- *ok = false;
- }
- // FALLTHROUGH
- case Token::IDENTIFIER:
- return GetIdentifierSymbol();
- default:
- *ok = false;
- return Identifier::Default();
- }
-}
-
-
-void PreParser::SetStrictModeViolation(Scanner::Location location,
- const char* type,
- bool* ok) {
- if (!is_classic_mode()) {
- ReportMessageAt(location, type, NULL);
- *ok = false;
- return;
- }
- // Delay report in case this later turns out to be strict code
- // (i.e., for function names and parameters prior to a "use strict"
- // directive).
- // It's safe to overwrite an existing violation.
- // It's either from a function that turned out to be non-strict,
- // or it's in the current function (and we just need to report
- // one error), or it's in a unclosed nesting function that wasn't
- // strict (otherwise we would already be in strict mode).
- strict_mode_violation_location_ = location;
- strict_mode_violation_type_ = type;
-}
-
-
-void PreParser::CheckDelayedStrictModeViolation(int beg_pos,
- int end_pos,
- bool* ok) {
- Scanner::Location location = strict_mode_violation_location_;
- if (location.IsValid() &&
- location.beg_pos > beg_pos && location.end_pos < end_pos) {
- ReportMessageAt(location, strict_mode_violation_type_, NULL);
- *ok = false;
- }
-}
-
-
-void PreParser::StrictModeIdentifierViolation(Scanner::Location location,
- const char* eval_args_type,
- Identifier identifier,
- bool* ok) {
- const char* type = eval_args_type;
- if (identifier.IsFutureReserved()) {
- type = "reserved_word";
- } else if (identifier.IsFutureStrictReserved() || identifier.IsYield()) {
- type = "strict_reserved_word";
- }
- if (!is_classic_mode()) {
- ReportMessageAt(location, type, NULL);
- *ok = false;
- return;
- }
- strict_mode_violation_location_ = location;
- strict_mode_violation_type_ = type;
-}
-
-
-PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
- Token::Value next = Next();
- if (Token::IsKeyword(next)) {
- int pos = position();
- const char* keyword = Token::String(next);
- log_->LogAsciiSymbol(pos, Vector<const char>(keyword, StrLength(keyword)));
- return Identifier::Default();
- }
- if (next == Token::IDENTIFIER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD) {
- return GetIdentifierSymbol();
- }
- *ok = false;
- return Identifier::Default();
-}
-
-#undef CHECK_OK
-
-
-// This function reads an identifier and determines whether or not it
-// is 'get' or 'set'.
-PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Identifier result = ParseIdentifierName(ok);
- if (!*ok) return Identifier::Default();
- if (scanner()->is_literal_ascii() &&
- scanner()->literal_length() == 3) {
- const char* token = scanner()->literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
- }
- return result;
-}
-
-
-void PreParser::ObjectLiteralChecker::CheckProperty(Token::Value property,
- PropertyKind type,
- bool* ok) {
- int old;
- if (property == Token::NUMBER) {
- old = finder_.AddNumber(scanner()->literal_ascii_string(), type);
- } else if (scanner()->is_literal_ascii()) {
- old = finder_.AddAsciiSymbol(scanner()->literal_ascii_string(), type);
- } else {
- old = finder_.AddUtf16Symbol(scanner()->literal_utf16_string(), type);
- }
- PropertyKind old_type = static_cast<PropertyKind>(old);
- if (HasConflict(old_type, type)) {
- if (IsDataDataConflict(old_type, type)) {
- // Both are data properties.
- if (language_mode_ == CLASSIC_MODE) return;
- parser()->ReportMessageAt(scanner()->location(),
- "strict_duplicate_property");
- } else if (IsDataAccessorConflict(old_type, type)) {
- // Both a data and an accessor property with the same name.
- parser()->ReportMessageAt(scanner()->location(),
- "accessor_data_property");
- } else {
- ASSERT(IsAccessorAccessorConflict(old_type, type));
- // Both accessors of the same type.
- parser()->ReportMessageAt(scanner()->location(),
- "accessor_get_set");
- }
- *ok = false;
- }
-}
-
} } // v8::internal
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index e99b4b0a18..5cb0a9d352 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -29,25 +29,33 @@
#define V8_PREPARSER_H
#include "hashmap.h"
+#include "scopes.h"
#include "token.h"
#include "scanner.h"
+#include "v8.h"
namespace v8 {
namespace internal {
// Common base class shared between parser and pre-parser.
-class ParserBase {
+template <typename Traits>
+class ParserBase : public Traits {
public:
- ParserBase(Scanner* scanner, uintptr_t stack_limit)
- : scanner_(scanner),
+ ParserBase(Scanner* scanner, uintptr_t stack_limit,
+ v8::Extension* extension,
+ typename Traits::Type::Parser this_object)
+ : Traits(this_object),
+ parenthesized_function_(false),
+ scope_(NULL),
+ function_state_(NULL),
+ extension_(extension),
+ scanner_(scanner),
stack_limit_(stack_limit),
stack_overflow_(false),
allow_lazy_(false),
allow_natives_syntax_(false),
allow_generators_(false),
allow_for_of_(false) { }
- // TODO(mstarzinger): Only virtual until message reporting has been unified.
- virtual ~ParserBase() { }
// Getters that indicate whether certain syntactical constructs are
// allowed to be parsed by this instance of the parser.
@@ -76,6 +84,102 @@ class ParserBase {
}
protected:
+ enum AllowEvalOrArgumentsAsIdentifier {
+ kAllowEvalOrArguments,
+ kDontAllowEvalOrArguments
+ };
+
+ // ---------------------------------------------------------------------------
+ // FunctionState and BlockState together implement the parser's scope stack.
+ // The parser's current scope is in scope_. BlockState and FunctionState
+ // constructors push on the scope stack and the destructors pop. They are also
+ // used to hold the parser's per-function and per-block state.
+ class BlockState BASE_EMBEDDED {
+ public:
+ BlockState(typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope)
+ : scope_stack_(scope_stack),
+ outer_scope_(*scope_stack),
+ scope_(scope) {
+ *scope_stack_ = scope_;
+ }
+ ~BlockState() { *scope_stack_ = outer_scope_; }
+
+ private:
+ typename Traits::Type::Scope** scope_stack_;
+ typename Traits::Type::Scope* outer_scope_;
+ typename Traits::Type::Scope* scope_;
+ };
+
+ class FunctionState BASE_EMBEDDED {
+ public:
+ FunctionState(
+ FunctionState** function_state_stack,
+ typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope,
+ typename Traits::Type::Zone* zone = NULL);
+ ~FunctionState();
+
+ int NextMaterializedLiteralIndex() {
+ return next_materialized_literal_index_++;
+ }
+ int materialized_literal_count() {
+ return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
+ }
+
+ int NextHandlerIndex() { return next_handler_index_++; }
+ int handler_count() { return next_handler_index_; }
+
+ void AddProperty() { expected_property_count_++; }
+ int expected_property_count() { return expected_property_count_; }
+
+ void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
+ bool is_generator() const { return is_generator_; }
+
+ void set_generator_object_variable(
+ typename Traits::Type::GeneratorVariable* variable) {
+ ASSERT(variable != NULL);
+ ASSERT(!is_generator());
+ generator_object_variable_ = variable;
+ is_generator_ = true;
+ }
+ typename Traits::Type::GeneratorVariable* generator_object_variable()
+ const {
+ return generator_object_variable_;
+ }
+
+ typename Traits::Type::Factory* factory() { return &factory_; }
+
+ private:
+ // Used to assign an index to each literal that needs materialization in
+ // the function. Includes regexp literals, and boilerplate for object and
+ // array literals.
+ int next_materialized_literal_index_;
+
+ // Used to assign a per-function index to try and catch handlers.
+ int next_handler_index_;
+
+ // Properties count estimation.
+ int expected_property_count_;
+
+ // Whether the function is a generator.
+ bool is_generator_;
+ // For generators, this variable may hold the generator object. It variable
+ // is used by yield expressions and return statements. It is not necessary
+ // for generator functions to have this variable set.
+ Variable* generator_object_variable_;
+
+ FunctionState** function_state_stack_;
+ FunctionState* outer_function_state_;
+ typename Traits::Type::Scope** scope_stack_;
+ typename Traits::Type::Scope* outer_scope_;
+ Isolate* isolate_; // Only used by ParserTraits.
+ int saved_ast_node_id_; // Only used by ParserTraits.
+ typename Traits::Type::Factory factory_;
+
+ friend class ParserTraits;
+ };
+
Scanner* scanner() const { return scanner_; }
int position() { return scanner_->location().beg_pos; }
int peek_position() { return scanner_->peek_location().beg_pos; }
@@ -125,20 +229,112 @@ class ParserBase {
}
}
- bool peek_any_identifier();
- void ExpectSemicolon(bool* ok);
- bool CheckContextualKeyword(Vector<const char> keyword);
- void ExpectContextualKeyword(Vector<const char> keyword, bool* ok);
+ void ExpectSemicolon(bool* ok) {
+ // Check for automatic semicolon insertion according to
+ // the rules given in ECMA-262, section 7.9, page 21.
+ Token::Value tok = peek();
+ if (tok == Token::SEMICOLON) {
+ Next();
+ return;
+ }
+ if (scanner()->HasAnyLineTerminatorBeforeNext() ||
+ tok == Token::RBRACE ||
+ tok == Token::EOS) {
+ return;
+ }
+ Expect(Token::SEMICOLON, ok);
+ }
+
+ bool peek_any_identifier() {
+ Token::Value next = peek();
+ return next == Token::IDENTIFIER ||
+ next == Token::FUTURE_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ next == Token::YIELD;
+ }
+
+ bool CheckContextualKeyword(Vector<const char> keyword) {
+ if (peek() == Token::IDENTIFIER &&
+ scanner()->is_next_contextual_keyword(keyword)) {
+ Consume(Token::IDENTIFIER);
+ return true;
+ }
+ return false;
+ }
+
+ void ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
+ Expect(Token::IDENTIFIER, ok);
+ if (!*ok) return;
+ if (!scanner()->is_literal_contextual_keyword(keyword)) {
+ ReportUnexpectedToken(scanner()->current_token());
+ *ok = false;
+ }
+ }
- // Strict mode octal literal validation.
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
+ // Checks whether an octal literal was last seen between beg_pos and end_pos.
+ // If so, reports an error. Only called for strict mode.
+ void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+ Scanner::Location octal = scanner()->octal_position();
+ if (octal.IsValid() && beg_pos <= octal.beg_pos &&
+ octal.end_pos <= end_pos) {
+ ReportMessageAt(octal, "strict_octal_literal");
+ scanner()->clear_octal_position();
+ *ok = false;
+ }
+ }
// Determine precedence of given token.
- static int Precedence(Token::Value token, bool accept_IN);
+ static int Precedence(Token::Value token, bool accept_IN) {
+ if (token == Token::IN && !accept_IN)
+ return 0; // 0 precedence will terminate binary expression parsing
+ return Token::Precedence(token);
+ }
+
+ typename Traits::Type::Factory* factory() {
+ return function_state_->factory();
+ }
+
+ bool is_classic_mode() const { return scope_->is_classic_mode(); }
+
+ bool is_generator() const { return function_state_->is_generator(); }
// Report syntax errors.
- virtual void ReportUnexpectedToken(Token::Value token) = 0;
- virtual void ReportMessageAt(Scanner::Location loc, const char* type) = 0;
+ void ReportMessage(const char* message, Vector<const char*> args) {
+ Scanner::Location source_location = scanner()->location();
+ Traits::ReportMessageAt(source_location, message, args);
+ }
+
+ void ReportMessageAt(Scanner::Location location, const char* message) {
+ Traits::ReportMessageAt(location, message, Vector<const char*>::empty());
+ }
+
+ void ReportUnexpectedToken(Token::Value token);
+
+ // Recursive descent functions:
+
+ // Parses an identifier that is valid for the current scope, in particular it
+ // fails on strict mode future reserved keywords in a strict scope. If
+ // allow_eval_or_arguments is kAllowEvalOrArguments, we allow "eval" or
+ // "arguments" as identifier even in strict mode (this is needed in cases like
+ // "var foo = eval;").
+ typename Traits::Type::Identifier ParseIdentifier(
+ AllowEvalOrArgumentsAsIdentifier,
+ bool* ok);
+ // Parses an identifier or a strict mode future reserved word, and indicate
+ // whether it is strict mode future reserved.
+ typename Traits::Type::Identifier ParseIdentifierOrStrictReservedWord(
+ bool* is_strict_reserved,
+ bool* ok);
+ typename Traits::Type::Identifier ParseIdentifierName(bool* ok);
+ // Parses an identifier and determines whether or not it is 'get' or 'set'.
+ typename Traits::Type::Identifier ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok);
+
+ typename Traits::Type::Expression ParseRegExpLiteral(bool seen_equal,
+ bool* ok);
+
+ typename Traits::Type::Expression ParsePrimaryExpression(bool* ok);
// Used to detect duplicates in object literals. Each of the values
// kGetterProperty, kSetterProperty and kValueProperty represents
@@ -194,6 +390,16 @@ class ParserBase {
LanguageMode language_mode_;
};
+ // If true, the next (and immediately following) function literal is
+ // preceded by a parenthesis.
+ // Heuristically that means that the function will be called immediately,
+ // so never lazily compile it.
+ bool parenthesized_function_;
+
+ typename Traits::Type::Scope* scope_; // Scope stack.
+ FunctionState* function_state_; // Function state stack.
+ v8::Extension* extension_;
+
private:
Scanner* scanner_;
uintptr_t stack_limit_;
@@ -206,6 +412,279 @@ class ParserBase {
};
+class PreParserIdentifier {
+ public:
+ static PreParserIdentifier Default() {
+ return PreParserIdentifier(kUnknownIdentifier);
+ }
+ static PreParserIdentifier Eval() {
+ return PreParserIdentifier(kEvalIdentifier);
+ }
+ static PreParserIdentifier Arguments() {
+ return PreParserIdentifier(kArgumentsIdentifier);
+ }
+ static PreParserIdentifier FutureReserved() {
+ return PreParserIdentifier(kFutureReservedIdentifier);
+ }
+ static PreParserIdentifier FutureStrictReserved() {
+ return PreParserIdentifier(kFutureStrictReservedIdentifier);
+ }
+ static PreParserIdentifier Yield() {
+ return PreParserIdentifier(kYieldIdentifier);
+ }
+ bool IsEval() { return type_ == kEvalIdentifier; }
+ bool IsArguments() { return type_ == kArgumentsIdentifier; }
+ bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
+ bool IsYield() { return type_ == kYieldIdentifier; }
+ bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
+ bool IsFutureStrictReserved() {
+ return type_ == kFutureStrictReservedIdentifier;
+ }
+ bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
+
+ private:
+ enum Type {
+ kUnknownIdentifier,
+ kFutureReservedIdentifier,
+ kFutureStrictReservedIdentifier,
+ kYieldIdentifier,
+ kEvalIdentifier,
+ kArgumentsIdentifier
+ };
+ explicit PreParserIdentifier(Type type) : type_(type) {}
+ Type type_;
+
+ friend class PreParserExpression;
+};
+
+
+// Bits 0 and 1 are used to identify the type of expression:
+// If bit 0 is set, it's an identifier.
+// if bit 1 is set, it's a string literal.
+// If neither is set, it's no particular type, and both set isn't
+// use yet.
+class PreParserExpression {
+ public:
+ static PreParserExpression Default() {
+ return PreParserExpression(kUnknownExpression);
+ }
+
+ static PreParserExpression FromIdentifier(PreParserIdentifier id) {
+ return PreParserExpression(kIdentifierFlag |
+ (id.type_ << kIdentifierShift));
+ }
+
+ static PreParserExpression StringLiteral() {
+ return PreParserExpression(kUnknownStringLiteral);
+ }
+
+ static PreParserExpression UseStrictStringLiteral() {
+ return PreParserExpression(kUseStrictString);
+ }
+
+ static PreParserExpression This() {
+ return PreParserExpression(kThisExpression);
+ }
+
+ static PreParserExpression ThisProperty() {
+ return PreParserExpression(kThisPropertyExpression);
+ }
+
+ static PreParserExpression StrictFunction() {
+ return PreParserExpression(kStrictFunctionExpression);
+ }
+
+ bool IsIdentifier() { return (code_ & kIdentifierFlag) != 0; }
+
+ // Only works corretly if it is actually an identifier expression.
+ PreParserIdentifier AsIdentifier() {
+ return PreParserIdentifier(
+ static_cast<PreParserIdentifier::Type>(code_ >> kIdentifierShift));
+ }
+
+ bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
+
+ bool IsUseStrictLiteral() {
+ return (code_ & kStringLiteralMask) == kUseStrictString;
+ }
+
+ bool IsThis() { return code_ == kThisExpression; }
+
+ bool IsThisProperty() { return code_ == kThisPropertyExpression; }
+
+ bool IsStrictFunction() { return code_ == kStrictFunctionExpression; }
+
+ private:
+ // First two/three bits are used as flags.
+ // Bit 0 and 1 represent identifiers or strings literals, and are
+ // mutually exclusive, but can both be absent.
+ enum {
+ kUnknownExpression = 0,
+ // Identifiers
+ kIdentifierFlag = 1, // Used to detect labels.
+ kIdentifierShift = 3,
+
+ kStringLiteralFlag = 2, // Used to detect directive prologue.
+ kUnknownStringLiteral = kStringLiteralFlag,
+ kUseStrictString = kStringLiteralFlag | 8,
+ kStringLiteralMask = kUseStrictString,
+
+ // Below here applies if neither identifier nor string literal.
+ kThisExpression = 4,
+ kThisPropertyExpression = 8,
+ kStrictFunctionExpression = 12
+ };
+
+ explicit PreParserExpression(int expression_code) : code_(expression_code) {}
+
+ int code_;
+};
+
+
+class PreParserScope {
+ public:
+ explicit PreParserScope(PreParserScope* outer_scope, ScopeType scope_type)
+ : scope_type_(scope_type) {
+ if (outer_scope) {
+ scope_inside_with_ =
+ outer_scope->scope_inside_with_ || is_with_scope();
+ language_mode_ = outer_scope->language_mode();
+ } else {
+ scope_inside_with_ = is_with_scope();
+ language_mode_ = CLASSIC_MODE;
+ }
+ }
+
+ bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
+ bool is_classic_mode() const {
+ return language_mode() == CLASSIC_MODE;
+ }
+ bool is_extended_mode() {
+ return language_mode() == EXTENDED_MODE;
+ }
+ bool inside_with() const {
+ return scope_inside_with_;
+ }
+
+ ScopeType type() { return scope_type_; }
+ LanguageMode language_mode() const { return language_mode_; }
+ void SetLanguageMode(LanguageMode language_mode) {
+ language_mode_ = language_mode;
+ }
+
+ private:
+ ScopeType scope_type_;
+ bool scope_inside_with_;
+ LanguageMode language_mode_;
+};
+
+
+class PreParserFactory {
+ public:
+ explicit PreParserFactory(void* extra_param) {}
+
+ PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
+ PreParserIdentifier js_flags,
+ int literal_index,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+};
+
+
+class PreParser;
+
+class PreParserTraits {
+ public:
+ struct Type {
+ typedef PreParser* Parser;
+
+ // Types used by FunctionState and BlockState.
+ typedef PreParserScope Scope;
+ typedef PreParserFactory Factory;
+ // PreParser doesn't need to store generator variables.
+ typedef void GeneratorVariable;
+ // No interaction with Zones.
+ typedef void Zone;
+
+ // Return types for traversing functions.
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+ };
+
+ explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
+
+ // Custom operations executed when FunctionStates are created and
+ // destructed. (The PreParser doesn't need to do anything.)
+ template<typename FS>
+ static void SetUpFunctionState(FS* function_state, void*) {}
+ template<typename FS>
+ static void TearDownFunctionState(FS* function_state) {}
+
+ // Helper functions for recursive descent.
+ static bool IsEvalOrArguments(PreParserIdentifier identifier) {
+ return identifier.IsEvalOrArguments();
+ }
+
+ // Reporting errors.
+ void ReportMessageAt(Scanner::Location location,
+ const char* message,
+ Vector<const char*> args);
+ void ReportMessageAt(Scanner::Location location,
+ const char* type,
+ const char* name_opt);
+ void ReportMessageAt(int start_pos,
+ int end_pos,
+ const char* type,
+ const char* name_opt);
+
+ // "null" return type creators.
+ static PreParserIdentifier EmptyIdentifier() {
+ return PreParserIdentifier::Default();
+ }
+ static PreParserExpression EmptyExpression() {
+ return PreParserExpression::Default();
+ }
+
+ // Producing data during the recursive descent.
+ PreParserIdentifier GetSymbol(Scanner* scanner);
+ static PreParserIdentifier NextLiteralString(Scanner* scanner,
+ PretenureFlag tenured) {
+ return PreParserIdentifier::Default();
+ }
+
+ static PreParserExpression ThisExpression(PreParserScope* scope,
+ PreParserFactory* factory) {
+ return PreParserExpression::This();
+ }
+
+ static PreParserExpression ExpressionFromLiteral(
+ Token::Value token, int pos, Scanner* scanner,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression ExpressionFromIdentifier(
+ PreParserIdentifier name, int pos, PreParserScope* scope,
+ PreParserFactory* factory) {
+ return PreParserExpression::FromIdentifier(name);
+ }
+
+ PreParserExpression ExpressionFromString(int pos,
+ Scanner* scanner,
+ PreParserFactory* factory = NULL);
+
+ // Temporary glue; these functions will move to ParserBase.
+ PreParserExpression ParseArrayLiteral(bool* ok);
+ PreParserExpression ParseObjectLiteral(bool* ok);
+ PreParserExpression ParseExpression(bool accept_IN, bool* ok);
+ PreParserExpression ParseV8Intrinsic(bool* ok);
+
+ private:
+ PreParser* pre_parser_;
+};
+
+
// Preparsing checks a JavaScript program and emits preparse-data that helps
// a later parsing to be faster.
// See preparse-data-format.h for the data format.
@@ -218,8 +697,11 @@ class ParserBase {
// rather it is to speed up properly written and correct programs.
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-class PreParser : public ParserBase {
+class PreParser : public ParserBase<PreParserTraits> {
public:
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+
enum PreParseResult {
kPreParseStackOverflow,
kPreParseSuccess
@@ -228,21 +710,16 @@ class PreParser : public ParserBase {
PreParser(Scanner* scanner,
ParserRecorder* log,
uintptr_t stack_limit)
- : ParserBase(scanner, stack_limit),
- log_(log),
- scope_(NULL),
- strict_mode_violation_location_(Scanner::Location::invalid()),
- strict_mode_violation_type_(NULL),
- parenthesized_function_(false) { }
-
- ~PreParser() {}
+ : ParserBase<PreParserTraits>(scanner, stack_limit, NULL, this),
+ log_(log) {}
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
PreParseResult PreParseProgram() {
- Scope top_scope(&scope_, kTopLevelScope);
+ PreParserScope scope(scope_, GLOBAL_SCOPE);
+ FunctionState top_scope(&function_state_, &scope_, &scope, NULL);
bool ok = true;
int start_position = scanner()->peek_location().beg_pos;
ParseSourceElements(Token::EOS, &ok);
@@ -268,16 +745,13 @@ class PreParser : public ParserBase {
ParserRecorder* log);
private:
+ friend class PreParserTraits;
+
// These types form an algebra over syntactic categories that is just
// rich enough to let us recognize and propagate the constructs that
// are either being counted in the preparser data, or is important
// to throw the correct syntax error exceptions.
- enum ScopeType {
- kTopLevelScope,
- kFunctionScope
- };
-
enum VariableDeclarationContext {
kSourceElement,
kStatement,
@@ -290,177 +764,6 @@ class PreParser : public ParserBase {
kHasNoInitializers
};
- class Expression;
-
- class Identifier {
- public:
- static Identifier Default() {
- return Identifier(kUnknownIdentifier);
- }
- static Identifier Eval() {
- return Identifier(kEvalIdentifier);
- }
- static Identifier Arguments() {
- return Identifier(kArgumentsIdentifier);
- }
- static Identifier FutureReserved() {
- return Identifier(kFutureReservedIdentifier);
- }
- static Identifier FutureStrictReserved() {
- return Identifier(kFutureStrictReservedIdentifier);
- }
- static Identifier Yield() {
- return Identifier(kYieldIdentifier);
- }
- bool IsEval() { return type_ == kEvalIdentifier; }
- bool IsArguments() { return type_ == kArgumentsIdentifier; }
- bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
- bool IsYield() { return type_ == kYieldIdentifier; }
- bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
- bool IsFutureStrictReserved() {
- return type_ == kFutureStrictReservedIdentifier;
- }
- bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
-
- private:
- enum Type {
- kUnknownIdentifier,
- kFutureReservedIdentifier,
- kFutureStrictReservedIdentifier,
- kYieldIdentifier,
- kEvalIdentifier,
- kArgumentsIdentifier
- };
- explicit Identifier(Type type) : type_(type) { }
- Type type_;
-
- friend class Expression;
- };
-
- // Bits 0 and 1 are used to identify the type of expression:
- // If bit 0 is set, it's an identifier.
- // if bit 1 is set, it's a string literal.
- // If neither is set, it's no particular type, and both set isn't
- // use yet.
- // Bit 2 is used to mark the expression as being parenthesized,
- // so "(foo)" isn't recognized as a pure identifier (and possible label).
- class Expression {
- public:
- static Expression Default() {
- return Expression(kUnknownExpression);
- }
-
- static Expression FromIdentifier(Identifier id) {
- return Expression(kIdentifierFlag | (id.type_ << kIdentifierShift));
- }
-
- static Expression StringLiteral() {
- return Expression(kUnknownStringLiteral);
- }
-
- static Expression UseStrictStringLiteral() {
- return Expression(kUseStrictString);
- }
-
- static Expression This() {
- return Expression(kThisExpression);
- }
-
- static Expression ThisProperty() {
- return Expression(kThisPropertyExpression);
- }
-
- static Expression StrictFunction() {
- return Expression(kStrictFunctionExpression);
- }
-
- bool IsIdentifier() {
- return (code_ & kIdentifierFlag) != 0;
- }
-
- // Only works corretly if it is actually an identifier expression.
- PreParser::Identifier AsIdentifier() {
- return PreParser::Identifier(
- static_cast<PreParser::Identifier::Type>(code_ >> kIdentifierShift));
- }
-
- bool IsParenthesized() {
- // If bit 0 or 1 is set, we interpret bit 2 as meaning parenthesized.
- return (code_ & 7) > 4;
- }
-
- bool IsRawIdentifier() {
- return !IsParenthesized() && IsIdentifier();
- }
-
- bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
-
- bool IsRawStringLiteral() {
- return !IsParenthesized() && IsStringLiteral();
- }
-
- bool IsUseStrictLiteral() {
- return (code_ & kStringLiteralMask) == kUseStrictString;
- }
-
- bool IsThis() {
- return code_ == kThisExpression;
- }
-
- bool IsThisProperty() {
- return code_ == kThisPropertyExpression;
- }
-
- bool IsStrictFunction() {
- return code_ == kStrictFunctionExpression;
- }
-
- Expression Parenthesize() {
- int type = code_ & 3;
- if (type != 0) {
- // Identifiers and string literals can be parenthesized.
- // They no longer work as labels or directive prologues,
- // but are still recognized in other contexts.
- return Expression(code_ | kParenthesizedExpressionFlag);
- }
- // For other types of expressions, it's not important to remember
- // the parentheses.
- return *this;
- }
-
- private:
- // First two/three bits are used as flags.
- // Bit 0 and 1 represent identifiers or strings literals, and are
- // mutually exclusive, but can both be absent.
- // If bit 0 or 1 are set, bit 2 marks that the expression has
- // been wrapped in parentheses (a string literal can no longer
- // be a directive prologue, and an identifier can no longer be
- // a label.
- enum {
- kUnknownExpression = 0,
- // Identifiers
- kIdentifierFlag = 1, // Used to detect labels.
- kIdentifierShift = 3,
-
- kStringLiteralFlag = 2, // Used to detect directive prologue.
- kUnknownStringLiteral = kStringLiteralFlag,
- kUseStrictString = kStringLiteralFlag | 8,
- kStringLiteralMask = kUseStrictString,
-
- // Only if identifier or string literal.
- kParenthesizedExpressionFlag = 4,
-
- // Below here applies if neither identifier nor string literal.
- kThisExpression = 4,
- kThisPropertyExpression = 8,
- kStrictFunctionExpression = 12
- };
-
- explicit Expression(int expression_code) : code_(expression_code) { }
-
- int code_;
- };
-
class Statement {
public:
static Statement Default() {
@@ -475,13 +778,11 @@ class PreParser : public ParserBase {
// Preserves being an unparenthesized string literal, possibly
// "use strict".
static Statement ExpressionStatement(Expression expression) {
- if (!expression.IsParenthesized()) {
- if (expression.IsUseStrictLiteral()) {
- return Statement(kUseStrictExpressionStatement);
- }
- if (expression.IsStringLiteral()) {
- return Statement(kStringLiteralExpressionStatement);
- }
+ if (expression.IsUseStrictLiteral()) {
+ return Statement(kUseStrictExpressionStatement);
+ }
+ if (expression.IsStringLiteral()) {
+ return Statement(kStringLiteralExpressionStatement);
}
return Default();
}
@@ -516,80 +817,6 @@ class PreParser : public ParserBase {
typedef int Arguments;
- class Scope {
- public:
- Scope(Scope** variable, ScopeType type)
- : variable_(variable),
- prev_(*variable),
- type_(type),
- materialized_literal_count_(0),
- expected_properties_(0),
- with_nesting_count_(0),
- language_mode_(
- (prev_ != NULL) ? prev_->language_mode() : CLASSIC_MODE),
- is_generator_(false) {
- *variable = this;
- }
- ~Scope() { *variable_ = prev_; }
- void NextMaterializedLiteralIndex() { materialized_literal_count_++; }
- void AddProperty() { expected_properties_++; }
- ScopeType type() { return type_; }
- int expected_properties() { return expected_properties_; }
- int materialized_literal_count() { return materialized_literal_count_; }
- bool IsInsideWith() { return with_nesting_count_ != 0; }
- bool is_generator() { return is_generator_; }
- void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
- bool is_classic_mode() {
- return language_mode_ == CLASSIC_MODE;
- }
- LanguageMode language_mode() {
- return language_mode_;
- }
- void set_language_mode(LanguageMode language_mode) {
- language_mode_ = language_mode;
- }
-
- class InsideWith {
- public:
- explicit InsideWith(Scope* scope) : scope_(scope) {
- scope->with_nesting_count_++;
- }
-
- ~InsideWith() { scope_->with_nesting_count_--; }
-
- private:
- Scope* scope_;
- DISALLOW_COPY_AND_ASSIGN(InsideWith);
- };
-
- private:
- Scope** const variable_;
- Scope* const prev_;
- const ScopeType type_;
- int materialized_literal_count_;
- int expected_properties_;
- int with_nesting_count_;
- LanguageMode language_mode_;
- bool is_generator_;
- };
-
- // Report syntax error
- void ReportUnexpectedToken(Token::Value token);
- void ReportMessageAt(Scanner::Location location, const char* type) {
- ReportMessageAt(location, type, NULL);
- }
- void ReportMessageAt(Scanner::Location location,
- const char* type,
- const char* name_opt) {
- log_->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
- }
- void ReportMessageAt(int start_pos,
- int end_pos,
- const char* type,
- const char* name_opt) {
- log_->LogMessage(start_pos, end_pos, type, name_opt);
- }
-
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -627,65 +854,341 @@ class PreParser : public ParserBase {
Expression ParseUnaryExpression(bool* ok);
Expression ParsePostfixExpression(bool* ok);
Expression ParseLeftHandSideExpression(bool* ok);
- Expression ParseNewExpression(bool* ok);
Expression ParseMemberExpression(bool* ok);
- Expression ParseMemberWithNewPrefixesExpression(unsigned new_count, bool* ok);
- Expression ParsePrimaryExpression(bool* ok);
+ Expression ParseMemberExpressionContinuation(PreParserExpression expression,
+ bool* ok);
+ Expression ParseMemberWithNewPrefixesExpression(bool* ok);
Expression ParseArrayLiteral(bool* ok);
Expression ParseObjectLiteral(bool* ok);
- Expression ParseRegExpLiteral(bool seen_equal, bool* ok);
Expression ParseV8Intrinsic(bool* ok);
Arguments ParseArguments(bool* ok);
- Expression ParseFunctionLiteral(bool is_generator, bool* ok);
+ Expression ParseFunctionLiteral(
+ Identifier name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ bool* ok);
void ParseLazyFunctionLiteralBody(bool* ok);
- Identifier ParseIdentifier(bool* ok);
- Identifier ParseIdentifierName(bool* ok);
- Identifier ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
-
// Logs the currently parsed literal as a symbol in the preparser data.
void LogSymbol();
- // Log the currently parsed identifier.
- Identifier GetIdentifierSymbol();
// Log the currently parsed string literal.
Expression GetStringSymbol();
- void set_language_mode(LanguageMode language_mode) {
- scope_->set_language_mode(language_mode);
- }
+ bool CheckInOrOf(bool accept_OF);
- bool is_classic_mode() {
- return scope_->language_mode() == CLASSIC_MODE;
+ ParserRecorder* log_;
+};
+
+
+template<class Traits>
+ParserBase<Traits>::FunctionState::FunctionState(
+ FunctionState** function_state_stack,
+ typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope,
+ typename Traits::Type::Zone* extra_param)
+ : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
+ next_handler_index_(0),
+ expected_property_count_(0),
+ is_generator_(false),
+ generator_object_variable_(NULL),
+ function_state_stack_(function_state_stack),
+ outer_function_state_(*function_state_stack),
+ scope_stack_(scope_stack),
+ outer_scope_(*scope_stack),
+ isolate_(NULL),
+ saved_ast_node_id_(0),
+ factory_(extra_param) {
+ *scope_stack_ = scope;
+ *function_state_stack = this;
+ Traits::SetUpFunctionState(this, extra_param);
+}
+
+
+template<class Traits>
+ParserBase<Traits>::FunctionState::~FunctionState() {
+ *scope_stack_ = outer_scope_;
+ *function_state_stack_ = outer_function_state_;
+ Traits::TearDownFunctionState(this);
+}
+
+
+template<class Traits>
+void ParserBase<Traits>::ReportUnexpectedToken(Token::Value token) {
+ // We don't report stack overflows here, to avoid increasing the
+ // stack depth even further. Instead we report it after parsing is
+ // over, in ParseProgram.
+ if (token == Token::ILLEGAL && stack_overflow()) {
+ return;
+ }
+ Scanner::Location source_location = scanner()->location();
+
+ // Four of the tokens are treated specially
+ switch (token) {
+ case Token::EOS:
+ return ReportMessageAt(source_location, "unexpected_eos");
+ case Token::NUMBER:
+ return ReportMessageAt(source_location, "unexpected_token_number");
+ case Token::STRING:
+ return ReportMessageAt(source_location, "unexpected_token_string");
+ case Token::IDENTIFIER:
+ return ReportMessageAt(source_location, "unexpected_token_identifier");
+ case Token::FUTURE_RESERVED_WORD:
+ return ReportMessageAt(source_location, "unexpected_reserved");
+ case Token::YIELD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ return ReportMessageAt(source_location,
+ is_classic_mode() ? "unexpected_token_identifier"
+ : "unexpected_strict_reserved");
+ default:
+ const char* name = Token::String(token);
+ ASSERT(name != NULL);
+ Traits::ReportMessageAt(
+ source_location, "unexpected_token", Vector<const char*>(&name, 1));
+ }
+}
+
+
+template<class Traits>
+typename Traits::Type::Identifier ParserBase<Traits>::ParseIdentifier(
+ AllowEvalOrArgumentsAsIdentifier allow_eval_or_arguments,
+ bool* ok) {
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ typename Traits::Type::Identifier name = this->GetSymbol(scanner());
+ if (allow_eval_or_arguments == kDontAllowEvalOrArguments &&
+ !is_classic_mode() && this->IsEvalOrArguments(name)) {
+ ReportMessageAt(scanner()->location(), "strict_eval_arguments");
+ *ok = false;
+ }
+ return name;
+ } else if (is_classic_mode() && (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !is_generator()))) {
+ return this->GetSymbol(scanner());
+ } else {
+ this->ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+}
+
+
+template <class Traits>
+typename Traits::Type::Identifier ParserBase<
+ Traits>::ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
+ bool* ok) {
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ *is_strict_reserved = false;
+ } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !this->is_generator())) {
+ *is_strict_reserved = true;
+ } else {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+ return this->GetSymbol(scanner());
+}
+
+
+template <class Traits>
+typename Traits::Type::Identifier ParserBase<Traits>::ParseIdentifierName(
+ bool* ok) {
+ Token::Value next = Next();
+ if (next != Token::IDENTIFIER && next != Token::FUTURE_RESERVED_WORD &&
+ next != Token::FUTURE_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
+ this->ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+ return this->GetSymbol(scanner());
+}
+
+
+template <class Traits>
+typename Traits::Type::Identifier
+ParserBase<Traits>::ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok) {
+ typename Traits::Type::Identifier result = ParseIdentifierName(ok);
+ if (!*ok) return Traits::EmptyIdentifier();
+ if (scanner()->is_literal_ascii() &&
+ scanner()->literal_length() == 3) {
+ const char* token = scanner()->literal_ascii_string().start();
+ *is_get = strncmp(token, "get", 3) == 0;
+ *is_set = !*is_get && strncmp(token, "set", 3) == 0;
+ }
+ return result;
+}
+
+
+template <class Traits>
+typename Traits::Type::Expression
+ParserBase<Traits>::ParseRegExpLiteral(bool seen_equal, bool* ok) {
+ int pos = peek_position();
+ if (!scanner()->ScanRegExpPattern(seen_equal)) {
+ Next();
+ ReportMessage("unterminated_regexp", Vector<const char*>::empty());
+ *ok = false;
+ return Traits::EmptyExpression();
}
- bool is_extended_mode() {
- return scope_->language_mode() == EXTENDED_MODE;
+ int literal_index = function_state_->NextMaterializedLiteralIndex();
+
+ typename Traits::Type::Identifier js_pattern =
+ this->NextLiteralString(scanner(), TENURED);
+ if (!scanner()->ScanRegExpFlags()) {
+ Next();
+ ReportMessageAt(scanner()->location(), "invalid_regexp_flags");
+ *ok = false;
+ return Traits::EmptyExpression();
}
+ typename Traits::Type::Identifier js_flags =
+ this->NextLiteralString(scanner(), TENURED);
+ Next();
+ return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
+}
+
+
+#define CHECK_OK ok); \
+ if (!*ok) return this->EmptyExpression(); \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+template <class Traits>
+typename Traits::Type::Expression ParserBase<Traits>::ParsePrimaryExpression(
+ bool* ok) {
+ // PrimaryExpression ::
+ // 'this'
+ // 'null'
+ // 'true'
+ // 'false'
+ // Identifier
+ // Number
+ // String
+ // ArrayLiteral
+ // ObjectLiteral
+ // RegExpLiteral
+ // '(' Expression ')'
+
+ int pos = peek_position();
+ typename Traits::Type::Expression result = this->EmptyExpression();
+ Token::Value token = peek();
+ switch (token) {
+ case Token::THIS: {
+ Consume(Token::THIS);
+ result = this->ThisExpression(scope_, factory());
+ break;
+ }
- LanguageMode language_mode() { return scope_->language_mode(); }
+ case Token::NULL_LITERAL:
+ case Token::TRUE_LITERAL:
+ case Token::FALSE_LITERAL:
+ case Token::NUMBER:
+ Next();
+ result = this->ExpressionFromLiteral(token, pos, scanner(), factory());
+ break;
+
+ case Token::IDENTIFIER:
+ case Token::YIELD:
+ case Token::FUTURE_STRICT_RESERVED_WORD: {
+ // Using eval or arguments in this context is OK even in strict mode.
+ typename Traits::Type::Identifier name =
+ ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
+ result =
+ this->ExpressionFromIdentifier(name, pos, scope_, factory());
+ break;
+ }
- bool CheckInOrOf(bool accept_OF);
+ case Token::STRING: {
+ Consume(Token::STRING);
+ result = this->ExpressionFromString(pos, scanner(), factory());
+ break;
+ }
- void SetStrictModeViolation(Scanner::Location,
- const char* type,
- bool* ok);
+ case Token::ASSIGN_DIV:
+ result = this->ParseRegExpLiteral(true, CHECK_OK);
+ break;
+
+ case Token::DIV:
+ result = this->ParseRegExpLiteral(false, CHECK_OK);
+ break;
+
+ case Token::LBRACK:
+ result = this->ParseArrayLiteral(CHECK_OK);
+ break;
+
+ case Token::LBRACE:
+ result = this->ParseObjectLiteral(CHECK_OK);
+ break;
+
+ case Token::LPAREN:
+ Consume(Token::LPAREN);
+ // Heuristically try to detect immediately called functions before
+ // seeing the call parentheses.
+ parenthesized_function_ = (peek() == Token::FUNCTION);
+ result = this->ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ break;
+
+ case Token::MOD:
+ if (allow_natives_syntax() || extension_ != NULL) {
+ result = this->ParseV8Intrinsic(CHECK_OK);
+ break;
+ }
+ // If we're not allowing special syntax we fall-through to the
+ // default case.
- void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
+ default: {
+ Next();
+ ReportUnexpectedToken(token);
+ *ok = false;
+ }
+ }
- void StrictModeIdentifierViolation(Scanner::Location,
- const char* eval_args_type,
- Identifier identifier,
- bool* ok);
+ return result;
+}
+
+#undef CHECK_OK
+
+
+template <typename Traits>
+void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
+ Token::Value property,
+ PropertyKind type,
+ bool* ok) {
+ int old;
+ if (property == Token::NUMBER) {
+ old = finder_.AddNumber(scanner()->literal_ascii_string(), type);
+ } else if (scanner()->is_literal_ascii()) {
+ old = finder_.AddAsciiSymbol(scanner()->literal_ascii_string(), type);
+ } else {
+ old = finder_.AddUtf16Symbol(scanner()->literal_utf16_string(), type);
+ }
+ PropertyKind old_type = static_cast<PropertyKind>(old);
+ if (HasConflict(old_type, type)) {
+ if (IsDataDataConflict(old_type, type)) {
+ // Both are data properties.
+ if (language_mode_ == CLASSIC_MODE) return;
+ parser()->ReportMessageAt(scanner()->location(),
+ "strict_duplicate_property");
+ } else if (IsDataAccessorConflict(old_type, type)) {
+ // Both a data and an accessor property with the same name.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_data_property");
+ } else {
+ ASSERT(IsAccessorAccessorConflict(old_type, type));
+ // Both accessors of the same type.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_get_set");
+ }
+ *ok = false;
+ }
+}
- ParserRecorder* log_;
- Scope* scope_;
- Scanner::Location strict_mode_violation_location_;
- const char* strict_mode_violation_type_;
- bool parenthesized_function_;
-};
} } // v8::internal
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 4b441b9ae4..130fec1b23 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -38,11 +38,11 @@ namespace internal {
#ifdef DEBUG
-PrettyPrinter::PrettyPrinter(Isolate* isolate) {
+PrettyPrinter::PrettyPrinter(Zone* zone) {
output_ = NULL;
size_ = 0;
pos_ = 0;
- InitializeAstVisitor(isolate);
+ InitializeAstVisitor(zone);
}
@@ -493,8 +493,8 @@ const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
}
-void PrettyPrinter::PrintOut(Isolate* isolate, AstNode* node) {
- PrettyPrinter printer(isolate);
+void PrettyPrinter::PrintOut(Zone* zone, AstNode* node) {
+ PrettyPrinter printer(zone);
PrintF("%s", printer.Print(node));
}
@@ -657,7 +657,7 @@ class IndentedScope BASE_EMBEDDED {
//-----------------------------------------------------------------------------
-AstPrinter::AstPrinter(Isolate* isolate) : PrettyPrinter(isolate), indent_(0) {
+AstPrinter::AstPrinter(Zone* zone) : PrettyPrinter(zone), indent_(0) {
}
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index b7ff2af5fa..a792720fe8 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -38,7 +38,7 @@ namespace internal {
class PrettyPrinter: public AstVisitor {
public:
- explicit PrettyPrinter(Isolate* isolate);
+ explicit PrettyPrinter(Zone* zone);
virtual ~PrettyPrinter();
// The following routines print a node into a string.
@@ -50,7 +50,7 @@ class PrettyPrinter: public AstVisitor {
void Print(const char* format, ...);
// Print a node to stdout.
- static void PrintOut(Isolate* isolate, AstNode* node);
+ static void PrintOut(Zone* zone, AstNode* node);
// Individual nodes
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
@@ -82,7 +82,7 @@ class PrettyPrinter: public AstVisitor {
// Prints the AST structure
class AstPrinter: public PrettyPrinter {
public:
- explicit AstPrinter(Isolate* isolate);
+ explicit AstPrinter(Zone* zone);
virtual ~AstPrinter();
const char* PrintProgram(FunctionLiteral* program);
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index acf54da1c7..6bd446e0c4 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -352,9 +352,8 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
-CpuProfile::CpuProfile(const char* title, unsigned uid, bool record_samples)
+CpuProfile::CpuProfile(const char* title, bool record_samples)
: title_(title),
- uid_(uid),
record_samples_(record_samples),
start_time_(Time::NowFromSystemTime()) {
timer_.Start();
@@ -486,9 +485,8 @@ CpuProfilesCollection::~CpuProfilesCollection() {
}
-bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid,
+bool CpuProfilesCollection::StartProfiling(const char* title,
bool record_samples) {
- ASSERT(uid > 0);
current_profiles_semaphore_.Wait();
if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
current_profiles_semaphore_.Signal();
@@ -501,7 +499,7 @@ bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid,
return false;
}
}
- current_profiles_.Add(new CpuProfile(title, uid, record_samples));
+ current_profiles_.Add(new CpuProfile(title, record_samples));
current_profiles_semaphore_.Signal();
return true;
}
@@ -537,9 +535,8 @@ bool CpuProfilesCollection::IsLastProfile(const char* title) {
void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
// Called from VM thread for a completed profile.
- unsigned uid = profile->uid();
for (int i = 0; i < finished_profiles_.length(); i++) {
- if (uid == finished_profiles_[i]->uid()) {
+ if (profile == finished_profiles_[i]) {
finished_profiles_.Remove(i);
return;
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 6e4758bece..81980bfc99 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -196,14 +196,13 @@ class ProfileTree {
class CpuProfile {
public:
- CpuProfile(const char* title, unsigned uid, bool record_samples);
+ CpuProfile(const char* title, bool record_samples);
// Add pc -> ... -> main() call path to the profile.
void AddPath(const Vector<CodeEntry*>& path);
void CalculateTotalTicksAndSamplingRate();
const char* title() const { return title_; }
- unsigned uid() const { return uid_; }
const ProfileTree* top_down() const { return &top_down_; }
int samples_count() const { return samples_.length(); }
@@ -218,7 +217,6 @@ class CpuProfile {
private:
const char* title_;
- unsigned uid_;
bool record_samples_;
Time start_time_;
Time end_time_;
@@ -281,7 +279,7 @@ class CpuProfilesCollection {
explicit CpuProfilesCollection(Heap* heap);
~CpuProfilesCollection();
- bool StartProfiling(const char* title, unsigned uid, bool record_samples);
+ bool StartProfiling(const char* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
List<CpuProfile*>* profiles() { return &finished_profiles_; }
const char* GetName(Name* name) {
diff --git a/deps/v8/src/promise.js b/deps/v8/src/promise.js
new file mode 100644
index 0000000000..82aa99027a
--- /dev/null
+++ b/deps/v8/src/promise.js
@@ -0,0 +1,308 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"use strict";
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Object = global.Object
+// var $WeakMap = global.WeakMap
+
+
+var $Promise = Promise;
+
+
+//-------------------------------------------------------------------
+
+// Core functionality.
+
+// Event queue format: [(value, [(handler, deferred)*])*]
+// I.e., a list of value/tasks pairs, where the value is a resolution value or
+// rejection reason, and the tasks are a respective list of handler/deferred
+// pairs waiting for notification of this value. Each handler is an onResolve or
+// onReject function provided to the same call of 'chain' that produced the
+// associated deferred.
+var promiseEvents = new InternalArray;
+
+// Status values: 0 = pending, +1 = resolved, -1 = rejected
+var promiseStatus = NEW_PRIVATE("Promise#status");
+var promiseValue = NEW_PRIVATE("Promise#value");
+var promiseOnResolve = NEW_PRIVATE("Promise#onResolve");
+var promiseOnReject = NEW_PRIVATE("Promise#onReject");
+var promiseRaw = NEW_PRIVATE("Promise#raw");
+
+function IsPromise(x) {
+ return IS_SPEC_OBJECT(x) && %HasLocalProperty(x, promiseStatus);
+}
+
+function Promise(resolver) {
+ if (resolver === promiseRaw) return;
+ if (!%_IsConstructCall()) throw MakeTypeError('not_a_promise', [this]);
+ if (typeof resolver !== 'function')
+ throw MakeTypeError('resolver_not_a_function', [resolver]);
+ var promise = PromiseInit(this);
+ try {
+ resolver(function(x) { PromiseResolve(promise, x) },
+ function(r) { PromiseReject(promise, r) });
+ } catch (e) {
+ PromiseReject(promise, e);
+ }
+}
+
+function PromiseSet(promise, status, value, onResolve, onReject) {
+ SET_PRIVATE(promise, promiseStatus, status);
+ SET_PRIVATE(promise, promiseValue, value);
+ SET_PRIVATE(promise, promiseOnResolve, onResolve);
+ SET_PRIVATE(promise, promiseOnReject, onReject);
+ return promise;
+}
+
+function PromiseInit(promise) {
+ return PromiseSet(promise, 0, UNDEFINED, new InternalArray, new InternalArray)
+}
+
+function PromiseDone(promise, status, value, promiseQueue) {
+ if (GET_PRIVATE(promise, promiseStatus) === 0) {
+ PromiseEnqueue(value, GET_PRIVATE(promise, promiseQueue));
+ PromiseSet(promise, status, value);
+ }
+}
+
+function PromiseResolve(promise, x) {
+ PromiseDone(promise, +1, x, promiseOnResolve)
+}
+
+function PromiseReject(promise, r) {
+ PromiseDone(promise, -1, r, promiseOnReject)
+}
+
+
+// Convenience.
+
+function PromiseDeferred() {
+ if (this === $Promise) {
+ // Optimized case, avoid extra closure.
+ var promise = PromiseInit(new Promise(promiseRaw));
+ return {
+ promise: promise,
+ resolve: function(x) { PromiseResolve(promise, x) },
+ reject: function(r) { PromiseReject(promise, r) }
+ };
+ } else {
+ var result = {};
+ result.promise = new this(function(resolve, reject) {
+ result.resolve = resolve;
+ result.reject = reject;
+ })
+ return result;
+ }
+}
+
+function PromiseResolved(x) {
+ if (this === $Promise) {
+ // Optimized case, avoid extra closure.
+ return PromiseSet(new Promise(promiseRaw), +1, x);
+ } else {
+ return new this(function(resolve, reject) { resolve(x) });
+ }
+}
+
+function PromiseRejected(r) {
+ if (this === $Promise) {
+ // Optimized case, avoid extra closure.
+ return PromiseSet(new Promise(promiseRaw), -1, r);
+ } else {
+ return new this(function(resolve, reject) { reject(r) });
+ }
+}
+
+
+// Simple chaining.
+
+function PromiseIdResolveHandler(x) { return x }
+function PromiseIdRejectHandler(r) { throw r }
+
+function PromiseChain(onResolve, onReject) { // a.k.a. flatMap
+ onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
+ onReject = IS_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject;
+ var deferred = %_CallFunction(this.constructor, PromiseDeferred);
+ switch (GET_PRIVATE(this, promiseStatus)) {
+ case UNDEFINED:
+ throw MakeTypeError('not_a_promise', [this]);
+ case 0: // Pending
+ GET_PRIVATE(this, promiseOnResolve).push(onResolve, deferred);
+ GET_PRIVATE(this, promiseOnReject).push(onReject, deferred);
+ break;
+ case +1: // Resolved
+ PromiseEnqueue(GET_PRIVATE(this, promiseValue), [onResolve, deferred]);
+ break;
+ case -1: // Rejected
+ PromiseEnqueue(GET_PRIVATE(this, promiseValue), [onReject, deferred]);
+ break;
+ }
+ return deferred.promise;
+}
+
+function PromiseCatch(onReject) {
+ return this.chain(UNDEFINED, onReject);
+}
+
+function PromiseEnqueue(value, tasks) {
+ GetMicrotaskQueue().push(function() {
+ for (var i = 0; i < tasks.length; i += 2) {
+ PromiseHandle(value, tasks[i], tasks[i + 1])
+ }
+ });
+
+ %SetMicrotaskPending(true);
+}
+
+function PromiseHandle(value, handler, deferred) {
+ try {
+ var result = handler(value);
+ if (result === deferred.promise)
+ throw MakeTypeError('promise_cyclic', [result]);
+ else if (IsPromise(result))
+ result.chain(deferred.resolve, deferred.reject);
+ else
+ deferred.resolve(result);
+ } catch(e) {
+ // TODO(rossberg): perhaps log uncaught exceptions below.
+ try { deferred.reject(e) } catch(e) {}
+ }
+}
+
+
+// Multi-unwrapped chaining with thenable coercion.
+
+function PromiseThen(onResolve, onReject) {
+ onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
+ var that = this;
+ var constructor = this.constructor;
+ return this.chain(
+ function(x) {
+ x = PromiseCoerce(constructor, x);
+ return x === that ? onReject(MakeTypeError('promise_cyclic', [x])) :
+ IsPromise(x) ? x.then(onResolve, onReject) : onResolve(x);
+ },
+ onReject
+ );
+}
+
+PromiseCoerce.table = new $WeakMap;
+
+function PromiseCoerce(constructor, x) {
+ if (!(IsPromise(x) || IS_NULL_OR_UNDEFINED(x))) {
+ var then = x.then;
+ if (typeof then === 'function') {
+ if (PromiseCoerce.table.has(x)) {
+ return PromiseCoerce.table.get(x);
+ } else {
+ var deferred = %_CallFunction(constructor, PromiseDeferred);
+ PromiseCoerce.table.set(x, deferred.promise);
+ try {
+ %_CallFunction(x, deferred.resolve, deferred.reject, then);
+ } catch(e) {
+ deferred.reject(e);
+ }
+ return deferred.promise;
+ }
+ }
+ }
+ return x;
+}
+
+
+// Combinators.
+
+function PromiseCast(x) {
+ // TODO(rossberg): cannot do better until we support @@create.
+ return IsPromise(x) ? x : this.resolve(x);
+}
+
+function PromiseAll(values) {
+ var deferred = %_CallFunction(this, PromiseDeferred);
+ var resolutions = [];
+ try {
+ var count = values.length;
+ if (count === 0) {
+ deferred.resolve(resolutions);
+ } else {
+ for (var i = 0; i < values.length; ++i) {
+ this.cast(values[i]).chain(
+ function(i, x) {
+ resolutions[i] = x;
+ if (--count === 0) deferred.resolve(resolutions);
+ }.bind(UNDEFINED, i), // TODO(rossberg): use let loop once available
+ function(r) { deferred.reject(r) }
+ );
+ }
+ }
+ } catch (e) {
+ deferred.reject(e)
+ }
+ return deferred.promise;
+}
+
+function PromiseOne(values) {
+ var deferred = %_CallFunction(this, PromiseDeferred);
+ try {
+ for (var i = 0; i < values.length; ++i) {
+ this.cast(values[i]).chain(
+ function(x) { deferred.resolve(x) },
+ function(r) { deferred.reject(r) }
+ );
+ }
+ } catch (e) {
+ deferred.reject(e)
+ }
+ return deferred.promise;
+}
+
+//-------------------------------------------------------------------
+
+function SetUpPromise() {
+ %CheckIsBootstrapping()
+ var global_receiver = %GlobalReceiver(global);
+ global_receiver.Promise = $Promise;
+ InstallFunctions($Promise, DONT_ENUM, [
+ "defer", PromiseDeferred,
+ "resolve", PromiseResolved,
+ "reject", PromiseRejected,
+ "all", PromiseAll,
+ "race", PromiseOne,
+ "cast", PromiseCast
+ ]);
+ InstallFunctions($Promise.prototype, DONT_ENUM, [
+ "chain", PromiseChain,
+ "then", PromiseThen,
+ "catch", PromiseCatch
+ ]);
+}
+
+SetUpPromise();
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 659fbd1da6..7bc553a46b 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -42,9 +42,12 @@ enum PropertyAttributes {
SEALED = DONT_DELETE,
FROZEN = SEALED | READ_ONLY,
- SYMBOLIC = 8, // Used to filter symbol names
- DONT_SHOW = DONT_ENUM | SYMBOLIC,
- ABSENT = 16 // Used in runtime to indicate a property is absent.
+ STRING = 8, // Used to filter symbols and string names
+ SYMBOLIC = 16,
+ PRIVATE_SYMBOL = 32,
+
+ DONT_SHOW = DONT_ENUM | SYMBOLIC | PRIVATE_SYMBOL,
+ ABSENT = 64 // Used in runtime to indicate a property is absent.
// ABSENT can never be stored in or returned from a descriptor's attributes
// bitfield. It is only used as a return value meaning the attributes of
// a non-existent property.
@@ -55,7 +58,9 @@ namespace v8 {
namespace internal {
class Smi;
-class Type;
+template<class> class TypeImpl;
+struct ZoneTypeConfig;
+typedef TypeImpl<ZoneTypeConfig> Type;
class TypeInfo;
// Type of properties.
@@ -82,7 +87,10 @@ class Representation {
public:
enum Kind {
kNone,
- kByte,
+ kInteger8,
+ kUInteger8,
+ kInteger16,
+ kUInteger16,
kSmi,
kInteger32,
kDouble,
@@ -96,7 +104,10 @@ class Representation {
static Representation None() { return Representation(kNone); }
static Representation Tagged() { return Representation(kTagged); }
- static Representation Byte() { return Representation(kByte); }
+ static Representation Integer8() { return Representation(kInteger8); }
+ static Representation UInteger8() { return Representation(kUInteger8); }
+ static Representation Integer16() { return Representation(kInteger16); }
+ static Representation UInteger16() { return Representation(kUInteger16); }
static Representation Smi() { return Representation(kSmi); }
static Representation Integer32() { return Representation(kInteger32); }
static Representation Double() { return Representation(kDouble); }
@@ -105,9 +116,7 @@ class Representation {
static Representation FromKind(Kind kind) { return Representation(kind); }
- // TODO(rossberg): this should die eventually.
- static Representation FromType(TypeInfo info);
- static Representation FromType(Handle<Type> type);
+ static Representation FromType(Type* type);
bool Equals(const Representation& other) const {
return kind_ == other.kind_;
@@ -123,9 +132,15 @@ class Representation {
}
bool is_more_general_than(const Representation& other) const {
+ if (kind_ == kExternal && other.kind_ == kNone) return true;
+ if (kind_ == kExternal && other.kind_ == kExternal) return false;
+ if (kind_ == kNone && other.kind_ == kExternal) return false;
+
ASSERT(kind_ != kExternal);
ASSERT(other.kind_ != kExternal);
if (IsHeapObject()) return other.IsDouble() || other.IsNone();
+ if (kind_ == kUInteger8 && other.kind_ == kInteger8) return false;
+ if (kind_ == kUInteger16 && other.kind_ == kInteger16) return false;
return kind_ > other.kind_;
}
@@ -139,9 +154,26 @@ class Representation {
return Representation::Tagged();
}
+ int size() const {
+ ASSERT(!IsNone());
+ if (IsInteger8() || IsUInteger8()) {
+ return sizeof(uint8_t);
+ }
+ if (IsInteger16() || IsUInteger16()) {
+ return sizeof(uint16_t);
+ }
+ if (IsInteger32()) {
+ return sizeof(uint32_t);
+ }
+ return kPointerSize;
+ }
+
Kind kind() const { return static_cast<Kind>(kind_); }
bool IsNone() const { return kind_ == kNone; }
- bool IsByte() const { return kind_ == kByte; }
+ bool IsInteger8() const { return kind_ == kInteger8; }
+ bool IsUInteger8() const { return kind_ == kUInteger8; }
+ bool IsInteger16() const { return kind_ == kInteger16; }
+ bool IsUInteger16() const { return kind_ == kUInteger16; }
bool IsTagged() const { return kind_ == kTagged; }
bool IsSmi() const { return kind_ == kSmi; }
bool IsSmiOrTagged() const { return IsSmi() || IsTagged(); }
@@ -151,7 +183,9 @@ class Representation {
bool IsHeapObject() const { return kind_ == kHeapObject; }
bool IsExternal() const { return kind_ == kExternal; }
bool IsSpecialization() const {
- return IsByte() || IsSmi() || IsInteger32() || IsDouble();
+ return IsInteger8() || IsUInteger8() ||
+ IsInteger16() || IsUInteger16() ||
+ IsSmi() || IsInteger32() || IsDouble();
}
const char* Mnemonic() const;
@@ -165,6 +199,15 @@ class Representation {
};
+static const int kDescriptorIndexBitCount = 10;
+// The maximum number of descriptors we want in a descriptor array (should
+// fit in a page).
+static const int kMaxNumberOfDescriptors =
+ (1 << kDescriptorIndexBitCount) - 2;
+static const int kInvalidEnumCacheSentinel =
+ (1 << kDescriptorIndexBitCount) - 1;
+
+
// PropertyDetails captures type and attributes for a property.
// They are used both in property dictionaries and instance descriptors.
class PropertyDetails BASE_EMBEDDED {
@@ -190,11 +233,11 @@ class PropertyDetails BASE_EMBEDDED {
| FieldIndexField::encode(field_index);
}
- int pointer() { return DescriptorPointer::decode(value_); }
+ int pointer() const { return DescriptorPointer::decode(value_); }
PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
- PropertyDetails CopyWithRepresentation(Representation representation) {
+ PropertyDetails CopyWithRepresentation(Representation representation) const {
return PropertyDetails(value_, representation);
}
PropertyDetails CopyAddAttributes(PropertyAttributes new_attributes) {
@@ -205,7 +248,7 @@ class PropertyDetails BASE_EMBEDDED {
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
- inline Smi* AsSmi();
+ inline Smi* AsSmi() const;
static uint8_t EncodeRepresentation(Representation representation) {
return representation.kind();
@@ -215,26 +258,26 @@ class PropertyDetails BASE_EMBEDDED {
return Representation::FromKind(static_cast<Representation::Kind>(bits));
}
- PropertyType type() { return TypeField::decode(value_); }
+ PropertyType type() const { return TypeField::decode(value_); }
PropertyAttributes attributes() const {
return AttributesField::decode(value_);
}
- int dictionary_index() {
+ int dictionary_index() const {
return DictionaryStorageField::decode(value_);
}
- Representation representation() {
+ Representation representation() const {
ASSERT(type() != NORMAL);
return DecodeRepresentation(RepresentationField::decode(value_));
}
- int field_index() {
+ int field_index() const {
return FieldIndexField::decode(value_);
}
- inline PropertyDetails AsDeleted();
+ inline PropertyDetails AsDeleted() const;
static bool IsValidIndex(int index) {
return DictionaryStorageField::is_valid(index);
@@ -255,9 +298,14 @@ class PropertyDetails BASE_EMBEDDED {
class DictionaryStorageField: public BitField<uint32_t, 7, 24> {};
// Bit fields for fast objects.
- class DescriptorPointer: public BitField<uint32_t, 6, 11> {};
- class RepresentationField: public BitField<uint32_t, 17, 3> {};
- class FieldIndexField: public BitField<uint32_t, 20, 11> {};
+ class RepresentationField: public BitField<uint32_t, 6, 4> {};
+ class DescriptorPointer: public BitField<uint32_t, 10,
+ kDescriptorIndexBitCount> {}; // NOLINT
+ class FieldIndexField: public BitField<uint32_t,
+ 10 + kDescriptorIndexBitCount,
+ kDescriptorIndexBitCount> {}; // NOLINT
+ // All bits for fast objects must fix in a smi.
+ STATIC_ASSERT(10 + kDescriptorIndexBitCount + kDescriptorIndexBitCount <= 31);
static const int kInitialIndex = 1;
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 83a6a365b8..2f72eec48e 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -35,6 +35,7 @@ void LookupResult::Iterate(ObjectVisitor* visitor) {
LookupResult* current = this; // Could be NULL.
while (current != NULL) {
visitor->VisitPointer(BitCast<Object**>(&current->holder_));
+ visitor->VisitPointer(BitCast<Object**>(&current->transition_));
current = current->next_;
}
}
@@ -82,13 +83,13 @@ void LookupResult::Print(FILE* out) {
case FIELD:
PrintF(out, " -type = map transition\n");
PrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
+ GetTransitionTarget()->Print(out);
PrintF(out, "\n");
return;
case CONSTANT:
PrintF(out, " -type = constant property transition\n");
PrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
+ GetTransitionTarget()->Print(out);
PrintF(out, "\n");
return;
case CALLBACKS:
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 0f78ba478e..baa5a0f993 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -184,14 +184,15 @@ class LookupResult BASE_EMBEDDED {
next_(isolate->top_lookup_result()),
lookup_type_(NOT_FOUND),
holder_(NULL),
+ transition_(NULL),
cacheable_(true),
details_(NONE, NONEXISTENT, Representation::None()) {
- isolate->SetTopLookupResult(this);
+ isolate->set_top_lookup_result(this);
}
~LookupResult() {
ASSERT(isolate()->top_lookup_result() == this);
- isolate()->SetTopLookupResult(next_);
+ isolate()->set_top_lookup_result(next_);
}
Isolate* isolate() const { return isolate_; }
@@ -199,6 +200,7 @@ class LookupResult BASE_EMBEDDED {
void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
lookup_type_ = DESCRIPTOR_TYPE;
holder_ = holder;
+ transition_ = NULL;
details_ = details;
number_ = number;
}
@@ -209,16 +211,18 @@ class LookupResult BASE_EMBEDDED {
return value->FitsRepresentation(details_.representation());
}
- void TransitionResult(JSObject* holder, int number) {
+ void TransitionResult(JSObject* holder, Map* target) {
lookup_type_ = TRANSITION_TYPE;
details_ = PropertyDetails(NONE, TRANSITION, Representation::None());
holder_ = holder;
- number_ = number;
+ transition_ = target;
+ number_ = 0xAAAA;
}
void DictionaryResult(JSObject* holder, int entry) {
lookup_type_ = DICTIONARY_TYPE;
holder_ = holder;
+ transition_ = NULL;
details_ = holder->property_dictionary()->DetailsAt(entry);
number_ = entry;
}
@@ -226,6 +230,7 @@ class LookupResult BASE_EMBEDDED {
void HandlerResult(JSProxy* proxy) {
lookup_type_ = HANDLER_TYPE;
holder_ = proxy;
+ transition_ = NULL;
details_ = PropertyDetails(NONE, HANDLER, Representation::Tagged());
cacheable_ = false;
}
@@ -233,6 +238,7 @@ class LookupResult BASE_EMBEDDED {
void InterceptorResult(JSObject* holder) {
lookup_type_ = INTERCEPTOR_TYPE;
holder_ = holder;
+ transition_ = NULL;
details_ = PropertyDetails(NONE, INTERCEPTOR, Representation::Tagged());
}
@@ -240,92 +246,93 @@ class LookupResult BASE_EMBEDDED {
lookup_type_ = NOT_FOUND;
details_ = PropertyDetails(NONE, NONEXISTENT, Representation::None());
holder_ = NULL;
+ transition_ = NULL;
}
- JSObject* holder() {
+ JSObject* holder() const {
ASSERT(IsFound());
return JSObject::cast(holder_);
}
- JSProxy* proxy() {
- ASSERT(IsFound());
+ JSProxy* proxy() const {
+ ASSERT(IsHandler());
return JSProxy::cast(holder_);
}
- PropertyType type() {
+ PropertyType type() const {
ASSERT(IsFound());
return details_.type();
}
- Representation representation() {
+ Representation representation() const {
ASSERT(IsFound());
ASSERT(!IsTransition());
ASSERT(details_.type() != NONEXISTENT);
return details_.representation();
}
- PropertyAttributes GetAttributes() {
+ PropertyAttributes GetAttributes() const {
ASSERT(!IsTransition());
ASSERT(IsFound());
ASSERT(details_.type() != NONEXISTENT);
return details_.attributes();
}
- PropertyDetails GetPropertyDetails() {
+ PropertyDetails GetPropertyDetails() const {
ASSERT(!IsTransition());
return details_;
}
- bool IsFastPropertyType() {
+ bool IsFastPropertyType() const {
ASSERT(IsFound());
return IsTransition() || type() != NORMAL;
}
// Property callbacks does not include transitions to callbacks.
- bool IsPropertyCallbacks() {
+ bool IsPropertyCallbacks() const {
ASSERT(!(details_.type() == CALLBACKS && !IsFound()));
return details_.type() == CALLBACKS;
}
- bool IsReadOnly() {
+ bool IsReadOnly() const {
ASSERT(IsFound());
ASSERT(!IsTransition());
ASSERT(details_.type() != NONEXISTENT);
return details_.IsReadOnly();
}
- bool IsField() {
+ bool IsField() const {
ASSERT(!(details_.type() == FIELD && !IsFound()));
return details_.type() == FIELD;
}
- bool IsNormal() {
+ bool IsNormal() const {
ASSERT(!(details_.type() == NORMAL && !IsFound()));
return details_.type() == NORMAL;
}
- bool IsConstant() {
+ bool IsConstant() const {
ASSERT(!(details_.type() == CONSTANT && !IsFound()));
return details_.type() == CONSTANT;
}
- bool IsConstantFunction() {
+ bool IsConstantFunction() const {
return IsConstant() && GetValue()->IsJSFunction();
}
- bool IsDontDelete() { return details_.IsDontDelete(); }
- bool IsDontEnum() { return details_.IsDontEnum(); }
- bool IsFound() { return lookup_type_ != NOT_FOUND; }
- bool IsTransition() { return lookup_type_ == TRANSITION_TYPE; }
- bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
- bool IsInterceptor() { return lookup_type_ == INTERCEPTOR_TYPE; }
+ bool IsDontDelete() const { return details_.IsDontDelete(); }
+ bool IsDontEnum() const { return details_.IsDontEnum(); }
+ bool IsFound() const { return lookup_type_ != NOT_FOUND; }
+ bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
+ bool IsHandler() const { return lookup_type_ == HANDLER_TYPE; }
+ bool IsInterceptor() const { return lookup_type_ == INTERCEPTOR_TYPE; }
// Is the result is a property excluding transitions and the null descriptor?
- bool IsProperty() {
+ bool IsProperty() const {
return IsFound() && !IsTransition();
}
- bool IsDataProperty() {
+ bool IsDataProperty() const {
switch (type()) {
case FIELD:
case NORMAL:
@@ -345,10 +352,10 @@ class LookupResult BASE_EMBEDDED {
return false;
}
- bool IsCacheable() { return cacheable_; }
+ bool IsCacheable() const { return cacheable_; }
void DisallowCaching() { cacheable_ = false; }
- Object* GetLazyValue() {
+ Object* GetLazyValue() const {
switch (type()) {
case FIELD:
return holder()->RawFastPropertyAt(GetFieldIndex().field_index());
@@ -373,88 +380,62 @@ class LookupResult BASE_EMBEDDED {
return NULL;
}
- Map* GetTransitionTarget(Map* map) {
- ASSERT(IsTransition());
- TransitionArray* transitions = map->transitions();
- return transitions->GetTarget(number_);
- }
-
- Map* GetTransitionTarget() {
- return GetTransitionTarget(holder()->map());
+ Map* GetTransitionTarget() const {
+ return transition_;
}
- PropertyDetails GetTransitionDetails(Map* map) {
+ PropertyDetails GetTransitionDetails() const {
ASSERT(IsTransition());
- TransitionArray* transitions = map->transitions();
- return transitions->GetTargetDetails(number_);
- }
-
- PropertyDetails GetTransitionDetails() {
- return GetTransitionDetails(holder()->map());
- }
-
- bool IsTransitionToField(Map* map) {
- return IsTransition() && GetTransitionDetails(map).type() == FIELD;
+ return transition_->GetLastDescriptorDetails();
}
- bool IsTransitionToConstant(Map* map) {
- return IsTransition() && GetTransitionDetails(map).type() == CONSTANT;
+ bool IsTransitionToField() const {
+ return IsTransition() && GetTransitionDetails().type() == FIELD;
}
- Map* GetTransitionMap() {
- ASSERT(IsTransition());
- return Map::cast(GetValue());
- }
-
- Map* GetTransitionMapFromMap(Map* map) {
- ASSERT(IsTransition());
- return map->transitions()->GetTarget(number_);
- }
-
- int GetTransitionIndex() {
- ASSERT(IsTransition());
- return number_;
+ bool IsTransitionToConstant() const {
+ return IsTransition() && GetTransitionDetails().type() == CONSTANT;
}
- int GetDescriptorIndex() {
+ int GetDescriptorIndex() const {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
return number_;
}
- PropertyIndex GetFieldIndex() {
+ PropertyIndex GetFieldIndex() const {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
return PropertyIndex::NewFieldIndex(GetFieldIndexFromMap(holder()->map()));
}
- int GetLocalFieldIndexFromMap(Map* map) {
+ int GetLocalFieldIndexFromMap(Map* map) const {
return GetFieldIndexFromMap(map) - map->inobject_properties();
}
- int GetDictionaryEntry() {
+ int GetDictionaryEntry() const {
ASSERT(lookup_type_ == DICTIONARY_TYPE);
return number_;
}
- JSFunction* GetConstantFunction() {
+ JSFunction* GetConstantFunction() const {
ASSERT(type() == CONSTANT);
return JSFunction::cast(GetValue());
}
- Object* GetConstantFromMap(Map* map) {
+ Object* GetConstantFromMap(Map* map) const {
ASSERT(type() == CONSTANT);
return GetValueFromMap(map);
}
- JSFunction* GetConstantFunctionFromMap(Map* map) {
+ JSFunction* GetConstantFunctionFromMap(Map* map) const {
return JSFunction::cast(GetConstantFromMap(map));
}
- Object* GetConstant() {
+ Object* GetConstant() const {
ASSERT(type() == CONSTANT);
return GetValue();
}
- Object* GetCallbackObject() {
+ Object* GetCallbackObject() const {
ASSERT(type() == CALLBACKS && !IsTransition());
return GetValue();
}
@@ -463,7 +444,7 @@ class LookupResult BASE_EMBEDDED {
void Print(FILE* out);
#endif
- Object* GetValue() {
+ Object* GetValue() const {
if (lookup_type_ == DESCRIPTOR_TYPE) {
return GetValueFromMap(holder()->map());
}
@@ -501,6 +482,7 @@ class LookupResult BASE_EMBEDDED {
} lookup_type_;
JSReceiver* holder_;
+ Map* transition_;
int number_;
bool cacheable_;
PropertyDetails details_;
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
index 4c03f21538..06be087c71 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/proxy.js
@@ -72,7 +72,8 @@ function ProxyCreateFunction(handler, callTrap, constructTrap) {
function SetUpProxy() {
%CheckIsBootstrapping()
- global.Proxy = $Proxy;
+ var global_receiver = %GlobalReceiver(global);
+ global_receiver.Proxy = $Proxy;
// Set up non-enumerable properties of the Proxy object.
InstallFunctions($Proxy, DONT_ENUM, [
diff --git a/deps/v8/src/qnx-math.h b/deps/v8/src/qnx-math.h
new file mode 100644
index 0000000000..bd8474599b
--- /dev/null
+++ b/deps/v8/src/qnx-math.h
@@ -0,0 +1,42 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_QNX_MATH_H_
+#define V8_QNX_MATH_H_
+
+#include <cmath>
+
+#undef fpclassify
+#undef isfinite
+#undef isinf
+#undef isnan
+#undef isnormal
+#undef signbit
+
+using std::lrint;
+
+#endif // V8_QNX_MATH_H_
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index 1ce1fa4b24..bf289186e2 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -38,8 +38,8 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
RegExpMacroAssembler(assembler->zone()),
assembler_(assembler) {
unsigned int type = assembler->Implementation();
- ASSERT(type < 5);
- const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
+ ASSERT(type < 6);
+ const char* impl_names[] = {"IA32", "ARM", "A64", "MIPS", "X64", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
@@ -427,7 +427,7 @@ RegExpMacroAssembler::IrregexpImplementation
Handle<HeapObject> RegExpMacroAssemblerTracer::GetCode(Handle<String> source) {
- PrintF(" GetCode(%s);\n", *(source->ToCString()));
+ PrintF(" GetCode(%s);\n", source->ToCString().get());
return assembler_->GetCode(source);
}
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index 1ff8bd9797..2ac9c86d82 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -53,6 +53,7 @@ class RegExpMacroAssembler {
enum IrregexpImplementation {
kIA32Implementation,
kARMImplementation,
+ kA64Implementation,
kMIPSImplementation,
kX64Implementation,
kBytecodeImplementation
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index 70b362fd7d..2b68ed1c90 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -43,8 +43,8 @@ class Processor: public AstVisitor {
result_assigned_(false),
is_set_(false),
in_try_(false),
- factory_(zone->isolate(), zone) {
- InitializeAstVisitor(zone->isolate());
+ factory_(zone) {
+ InitializeAstVisitor(zone);
}
virtual ~Processor() { }
@@ -207,11 +207,6 @@ void Processor::VisitSwitchStatement(SwitchStatement* node) {
}
-void Processor::VisitCaseClause(CaseClause* clause) {
- UNREACHABLE();
-}
-
-
void Processor::VisitContinueStatement(ContinueStatement* node) {
is_set_ = false;
}
@@ -263,7 +258,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
ZoneList<Statement*>* body = function->body();
if (!body->is_empty()) {
Variable* result = scope->NewTemporary(
- info->isolate()->factory()->result_string());
+ info->isolate()->factory()->dot_result_string());
Processor processor(result, info->zone());
processor.Process(body);
if (processor.HasStackOverflow()) return false;
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 7c900b37d9..5784e4d5aa 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -45,24 +45,6 @@ namespace v8 {
namespace internal {
-// Optimization sampler constants.
-static const int kSamplerFrameCount = 2;
-
-// Constants for statistical profiler.
-static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
-
-static const int kSamplerTicksBetweenThresholdAdjustment = 32;
-
-static const int kSamplerThresholdInit = 3;
-static const int kSamplerThresholdMin = 1;
-static const int kSamplerThresholdDelta = 1;
-
-static const int kSamplerThresholdSizeFactorInit = 3;
-
-static const int kSizeLimit = 1500;
-
-// Constants for counter based profiler.
-
// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
@@ -84,7 +66,7 @@ static const int kOSRCodeSizeAllowanceBase =
100 * FullCodeGenerator::kCodeSizeMultiplier;
static const int kOSRCodeSizeAllowancePerTick =
- 3 * FullCodeGenerator::kCodeSizeMultiplier;
+ 4 * FullCodeGenerator::kCodeSizeMultiplier;
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
@@ -94,14 +76,7 @@ static const int kMaxSizeEarlyOpt =
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
: isolate_(isolate),
- sampler_threshold_(kSamplerThresholdInit),
- sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
- sampler_ticks_until_threshold_adjustment_(
- kSamplerTicksBetweenThresholdAdjustment),
- sampler_window_position_(0),
- any_ic_changed_(false),
- code_generated_(false) {
- ClearSampleBuffer();
+ any_ic_changed_(false) {
}
@@ -139,8 +114,9 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
}
- if (FLAG_concurrent_recompilation && !isolate_->bootstrapper()->IsActive()) {
- if (FLAG_concurrent_osr &&
+ if (isolate_->concurrent_recompilation_enabled() &&
+ !isolate_->bootstrapper()->IsActive()) {
+ if (isolate_->concurrent_osr_enabled() &&
isolate_->optimizing_compiler_thread()->IsQueuedForOSR(function)) {
// Do not attempt regular recompilation if we already queued this for OSR.
// TODO(yangguo): This is necessary so that we don't install optimized
@@ -148,11 +124,11 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
// recompilation race. This goes away as soon as OSR becomes one-shot.
return;
}
- ASSERT(!function->IsInRecompileQueue());
- function->MarkForConcurrentRecompilation();
+ ASSERT(!function->IsInOptimizationQueue());
+ function->MarkForConcurrentOptimization();
} else {
// The next call to the function will trigger optimization.
- function->MarkForLazyRecompilation();
+ function->MarkForOptimization();
}
}
@@ -188,38 +164,6 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
}
-void RuntimeProfiler::ClearSampleBuffer() {
- memset(sampler_window_, 0, sizeof(sampler_window_));
- memset(sampler_window_weight_, 0, sizeof(sampler_window_weight_));
-}
-
-
-int RuntimeProfiler::LookupSample(JSFunction* function) {
- int weight = 0;
- for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* sample = sampler_window_[i];
- if (sample != NULL) {
- bool fits = FLAG_lookup_sample_by_shared
- ? (function->shared() == JSFunction::cast(sample)->shared())
- : (function == JSFunction::cast(sample));
- if (fits) {
- weight += sampler_window_weight_[i];
- }
- }
- }
- return weight;
-}
-
-
-void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
- ASSERT(IsPowerOf2(kSamplerWindowSize));
- sampler_window_[sampler_window_position_] = function;
- sampler_window_weight_[sampler_window_position_] = weight;
- sampler_window_position_ = (sampler_window_position_ + 1) &
- (kSamplerWindowSize - 1);
-}
-
-
void RuntimeProfiler::OptimizeNow() {
HandleScope scope(isolate_);
@@ -230,39 +174,19 @@ void RuntimeProfiler::OptimizeNow() {
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
// (eagerly or lazily).
- JSFunction* samples[kSamplerFrameCount];
- int sample_count = 0;
int frame_count = 0;
- int frame_count_limit = FLAG_watch_ic_patching ? FLAG_frame_count
- : kSamplerFrameCount;
+ int frame_count_limit = FLAG_frame_count;
for (JavaScriptFrameIterator it(isolate_);
frame_count++ < frame_count_limit && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
JSFunction* function = frame->function();
- if (!FLAG_watch_ic_patching) {
- // Adjust threshold each time we have processed
- // a certain number of ticks.
- if (sampler_ticks_until_threshold_adjustment_ > 0) {
- sampler_ticks_until_threshold_adjustment_--;
- if (sampler_ticks_until_threshold_adjustment_ <= 0) {
- // If the threshold is not already at the minimum
- // modify and reset the ticks until next adjustment.
- if (sampler_threshold_ > kSamplerThresholdMin) {
- sampler_threshold_ -= kSamplerThresholdDelta;
- sampler_ticks_until_threshold_adjustment_ =
- kSamplerTicksBetweenThresholdAdjustment;
- }
- }
- }
- }
-
SharedFunctionInfo* shared = function->shared();
Code* shared_code = shared->code();
if (shared_code->kind() != Code::FUNCTION) continue;
- if (function->IsInRecompileQueue()) continue;
+ if (function->IsInOptimizationQueue()) continue;
if (FLAG_always_osr &&
shared_code->allow_osr_at_loop_nesting_level() == 0) {
@@ -274,8 +198,8 @@ void RuntimeProfiler::OptimizeNow() {
}
// Fall through and do a normal optimized compile as well.
} else if (!frame->is_optimized() &&
- (function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForConcurrentRecompilation() ||
+ (function->IsMarkedForOptimization() ||
+ function->IsMarkedForConcurrentOptimization() ||
function->IsOptimized())) {
// Attempt OSR if we are still running unoptimized code even though the
// the function has long been marked or even already been optimized.
@@ -321,116 +245,36 @@ void RuntimeProfiler::OptimizeNow() {
}
if (!function->IsOptimizable()) continue;
- if (FLAG_watch_ic_patching) {
- int ticks = shared_code->profiler_ticks();
+ int ticks = shared_code->profiler_ticks();
- if (ticks >= kProfilerTicksBeforeOptimization) {
- int typeinfo, total, percentage;
- GetICCounts(shared_code, &typeinfo, &total, &percentage);
- if (percentage >= FLAG_type_info_threshold) {
- // If this particular function hasn't had any ICs patched for enough
- // ticks, optimize it now.
- Optimize(function, "hot and stable");
- } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
- Optimize(function, "not much type info but very hot");
- } else {
- shared_code->set_profiler_ticks(ticks + 1);
- if (FLAG_trace_opt_verbose) {
- PrintF("[not yet optimizing ");
- function->PrintName();
- PrintF(", not enough type info: %d/%d (%d%%)]\n",
- typeinfo, total, percentage);
- }
- }
- } else if (!any_ic_changed_ &&
- shared_code->instruction_size() < kMaxSizeEarlyOpt) {
- // If no IC was patched since the last tick and this function is very
- // small, optimistically optimize it now.
- Optimize(function, "small function");
+ if (ticks >= kProfilerTicksBeforeOptimization) {
+ int typeinfo, total, percentage;
+ GetICCounts(shared_code, &typeinfo, &total, &percentage);
+ if (percentage >= FLAG_type_info_threshold) {
+ // If this particular function hasn't had any ICs patched for enough
+ // ticks, optimize it now.
+ Optimize(function, "hot and stable");
+ } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
+ Optimize(function, "not much type info but very hot");
} else {
shared_code->set_profiler_ticks(ticks + 1);
+ if (FLAG_trace_opt_verbose) {
+ PrintF("[not yet optimizing ");
+ function->PrintName();
+ PrintF(", not enough type info: %d/%d (%d%%)]\n",
+ typeinfo, total, percentage);
+ }
}
- } else { // !FLAG_watch_ic_patching
- samples[sample_count++] = function;
-
- int function_size = function->shared()->SourceSize();
- int threshold_size_factor = (function_size > kSizeLimit)
- ? sampler_threshold_size_factor_
- : 1;
-
- int threshold = sampler_threshold_ * threshold_size_factor;
-
- if (LookupSample(function) >= threshold) {
- Optimize(function, "sampler window lookup");
- }
- }
- }
- if (FLAG_watch_ic_patching) {
- any_ic_changed_ = false;
- } else { // !FLAG_watch_ic_patching
- // Add the collected functions as samples. It's important not to do
- // this as part of collecting them because this will interfere with
- // the sample lookup in case of recursive functions.
- for (int i = 0; i < sample_count; i++) {
- AddSample(samples[i], kSamplerFrameWeight[i]);
- }
- }
-}
-
-
-void RuntimeProfiler::SetUp() {
- if (!FLAG_watch_ic_patching) {
- ClearSampleBuffer();
- }
-}
-
-
-void RuntimeProfiler::Reset() {
- if (!FLAG_watch_ic_patching) {
- sampler_threshold_ = kSamplerThresholdInit;
- sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
- sampler_ticks_until_threshold_adjustment_ =
- kSamplerTicksBetweenThresholdAdjustment;
- }
-}
-
-
-void RuntimeProfiler::TearDown() {
- // Nothing to do.
-}
-
-
-// Update the pointers in the sampler window after a GC.
-void RuntimeProfiler::UpdateSamplesAfterScavenge() {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* function = sampler_window_[i];
- if (function != NULL && isolate_->heap()->InNewSpace(function)) {
- MapWord map_word = HeapObject::cast(function)->map_word();
- if (map_word.IsForwardingAddress()) {
- sampler_window_[i] = map_word.ToForwardingAddress();
- } else {
- sampler_window_[i] = NULL;
- }
+ } else if (!any_ic_changed_ &&
+ shared_code->instruction_size() < kMaxSizeEarlyOpt) {
+ // If no IC was patched since the last tick and this function is very
+ // small, optimistically optimize it now.
+ Optimize(function, "small function");
+ } else {
+ shared_code->set_profiler_ticks(ticks + 1);
}
}
-}
-
-
-void RuntimeProfiler::RemoveDeadSamples() {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* function = sampler_window_[i];
- if (function != NULL &&
- !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
- sampler_window_[i] = NULL;
- }
- }
-}
-
-
-void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- visitor->VisitPointer(&sampler_window_[i]);
- }
+ any_ic_changed_ = false;
}
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index 28d6d322fd..efd9b50eb5 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -45,47 +45,18 @@ class RuntimeProfiler {
void OptimizeNow();
- void SetUp();
- void Reset();
- void TearDown();
-
void NotifyICChanged() { any_ic_changed_ = true; }
- // Rate limiting support.
-
- void UpdateSamplesAfterScavenge();
- void RemoveDeadSamples();
- void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
-
void AttemptOnStackReplacement(JSFunction* function);
private:
- static const int kSamplerWindowSize = 16;
-
void Optimize(JSFunction* function, const char* reason);
- void ClearSampleBuffer();
-
- void ClearSampleBufferNewSpaceEntries();
-
- int LookupSample(JSFunction* function);
-
- void AddSample(JSFunction* function, int weight);
-
bool CodeSizeOKForOSR(Code* shared_code);
Isolate* isolate_;
- int sampler_threshold_;
- int sampler_threshold_size_factor_;
- int sampler_ticks_until_threshold_adjustment_;
-
- Object* sampler_window_[kSamplerWindowSize];
- int sampler_window_position_;
- int sampler_window_weight_[kSamplerWindowSize];
-
bool any_ic_changed_;
- bool code_generated_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index adef16cdd1..50621a997d 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -259,9 +259,11 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
constant_properties,
&is_result_from_cache);
+ PretenureFlag pretenure_flag =
+ isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
+
Handle<JSObject> boilerplate =
- isolate->factory()->NewJSObjectFromMap(
- map, isolate->heap()->GetPretenureMode());
+ isolate->factory()->NewJSObjectFromMap(map, pretenure_flag);
// Normalize the elements of the boilerplate to save space if needed.
if (!should_have_fast_elements) JSObject::NormalizeElements(boilerplate);
@@ -367,9 +369,11 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
Handle<JSFunction> constructor(
JSFunction::NativeContextFromLiterals(*literals)->array_function());
+ PretenureFlag pretenure_flag =
+ isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
+
Handle<JSArray> object = Handle<JSArray>::cast(
- isolate->factory()->NewJSObject(
- constructor, isolate->heap()->GetPretenureMode()));
+ isolate->factory()->NewJSObject(constructor, pretenure_flag));
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(elements->get(0))->value());
@@ -554,50 +558,52 @@ static Handle<AllocationSite> GetLiteralAllocationSite(
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
-
+static MaybeObject* CreateArrayLiteralImpl(Isolate* isolate,
+ Handle<FixedArray> literals,
+ int literals_index,
+ Handle<FixedArray> elements,
+ int flags) {
Handle<AllocationSite> site = GetLiteralAllocationSite(isolate, literals,
literals_index, elements);
RETURN_IF_EMPTY_HANDLE(isolate, site);
+ bool enable_mementos = (flags & ArrayLiteral::kDisableMementos) == 0;
Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()));
- AllocationSiteUsageContext usage_context(isolate, site, true);
+ AllocationSiteUsageContext usage_context(isolate, site, enable_mementos);
usage_context.EnterNewScope();
- Handle<JSObject> copy = JSObject::DeepCopy(boilerplate, &usage_context);
+ JSObject::DeepCopyHints hints = (flags & ArrayLiteral::kShallowElements) == 0
+ ? JSObject::kNoHints
+ : JSObject::kObjectIsShallowArray;
+ Handle<JSObject> copy = JSObject::DeepCopy(boilerplate, &usage_context,
+ hints);
usage_context.ExitScope(site, boilerplate);
RETURN_IF_EMPTY_HANDLE(isolate, copy);
return *copy;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
HandleScope scope(isolate);
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
+ CONVERT_SMI_ARG_CHECKED(flags, 3);
- Handle<AllocationSite> site = GetLiteralAllocationSite(isolate, literals,
- literals_index, elements);
- RETURN_IF_EMPTY_HANDLE(isolate, site);
+ return CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
+ flags);
+}
- JSObject* boilerplate = JSObject::cast(site->transition_info());
- if (boilerplate->elements()->map() ==
- isolate->heap()->fixed_cow_array_map()) {
- isolate->counters()->cow_arrays_created_runtime()->Increment();
- }
- if (AllocationSite::GetMode(boilerplate->GetElementsKind()) ==
- TRACK_ALLOCATION_SITE) {
- return isolate->heap()->CopyJSObject(boilerplate, *site);
- }
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralStubBailout) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
- return isolate->heap()->CopyJSObject(boilerplate);
+ return CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
+ ArrayLiteral::kShallowElements);
}
@@ -614,6 +620,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateSymbol) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreatePrivateSymbol) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ Handle<Object> name(args[0], isolate);
+ RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
+ Symbol* symbol;
+ MaybeObject* maybe = isolate->heap()->AllocatePrivateSymbol();
+ if (!maybe->To(&symbol)) return maybe;
+ if (name->IsString()) symbol->set_name(*name);
+ return symbol;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -622,6 +641,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolIsPrivate) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Symbol, symbol, 0);
+ return isolate->heap()->ToBoolean(symbol->is_private());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -699,13 +726,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
void Runtime::FreeArrayBuffer(Isolate* isolate,
JSArrayBuffer* phantom_array_buffer) {
+ if (phantom_array_buffer->should_be_freed()) {
+ ASSERT(phantom_array_buffer->is_external());
+ free(phantom_array_buffer->backing_store());
+ }
if (phantom_array_buffer->is_external()) return;
size_t allocated_length = NumberToSize(
isolate, phantom_array_buffer->byte_length());
isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
- -static_cast<intptr_t>(allocated_length));
+ -static_cast<int64_t>(allocated_length));
CHECK(V8::ArrayBufferAllocator() != NULL);
V8::ArrayBufferAllocator()->Free(
phantom_array_buffer->backing_store(),
@@ -836,58 +867,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferIsView) {
}
-enum TypedArrayId {
- // arrayIds below should be synchromized with typedarray.js natives.
- ARRAY_ID_UINT8 = 1,
- ARRAY_ID_INT8 = 2,
- ARRAY_ID_UINT16 = 3,
- ARRAY_ID_INT16 = 4,
- ARRAY_ID_UINT32 = 5,
- ARRAY_ID_INT32 = 6,
- ARRAY_ID_FLOAT32 = 7,
- ARRAY_ID_FLOAT64 = 8,
- ARRAY_ID_UINT8C = 9
-};
-
-static void ArrayIdToTypeAndSize(
+void Runtime::ArrayIdToTypeAndSize(
int arrayId, ExternalArrayType* array_type, size_t* element_size) {
switch (arrayId) {
- case ARRAY_ID_UINT8:
- *array_type = kExternalUnsignedByteArray;
- *element_size = 1;
- break;
- case ARRAY_ID_INT8:
- *array_type = kExternalByteArray;
- *element_size = 1;
- break;
- case ARRAY_ID_UINT16:
- *array_type = kExternalUnsignedShortArray;
- *element_size = 2;
- break;
- case ARRAY_ID_INT16:
- *array_type = kExternalShortArray;
- *element_size = 2;
- break;
- case ARRAY_ID_UINT32:
- *array_type = kExternalUnsignedIntArray;
- *element_size = 4;
- break;
- case ARRAY_ID_INT32:
- *array_type = kExternalIntArray;
- *element_size = 4;
- break;
- case ARRAY_ID_FLOAT32:
- *array_type = kExternalFloatArray;
- *element_size = 4;
- break;
- case ARRAY_ID_FLOAT64:
- *array_type = kExternalDoubleArray;
- *element_size = 8;
- break;
- case ARRAY_ID_UINT8C:
- *array_type = kExternalPixelArray;
- *element_size = 1;
+#define ARRAY_ID_CASE(Type, type, TYPE, ctype, size) \
+ case ARRAY_ID_##TYPE: \
+ *array_type = kExternal##Type##Array; \
+ *element_size = size; \
break;
+
+ TYPED_ARRAYS(ARRAY_ID_CASE)
+#undef ARRAY_ID_CASE
+
default:
UNREACHABLE();
}
@@ -909,9 +900,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
holder->SetInternalField(i, Smi::FromInt(0));
}
- ExternalArrayType array_type = kExternalByteArray; // Bogus initialization.
+ ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
+ Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
holder->set_buffer(*buffer);
holder->set_byte_offset(*byte_offset_object);
@@ -961,9 +952,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
holder->SetInternalField(i, Smi::FromInt(0));
}
- ExternalArrayType array_type = kExternalByteArray; // Bogus initialization.
+ ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
+ Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
size_t length = NumberToSize(isolate, *length_obj);
@@ -1217,7 +1208,10 @@ inline static bool DataViewGetValue(
Handle<Object> byte_offset_obj,
bool is_little_endian,
T* result) {
- size_t byte_offset = NumberToSize(isolate, *byte_offset_obj);
+ size_t byte_offset = 0;
+ if (!TryNumberToSize(isolate, *byte_offset_obj, &byte_offset)) {
+ return false;
+ }
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
size_t data_view_byte_offset =
@@ -1258,7 +1252,10 @@ static bool DataViewSetValue(
Handle<Object> byte_offset_obj,
bool is_little_endian,
T data) {
- size_t byte_offset = NumberToSize(isolate, *byte_offset_obj);
+ size_t byte_offset = 0;
+ if (!TryNumberToSize(isolate, *byte_offset_obj, &byte_offset)) {
+ return false;
+ }
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
size_t data_view_byte_offset =
@@ -1421,7 +1418,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) {
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
Handle<Object> key(args[1], isolate);
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
- table = ObjectHashSetAdd(table, key);
+ table = ObjectHashSet::Add(table, key);
holder->set_table(*table);
return isolate->heap()->undefined_value();
}
@@ -1443,7 +1440,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
Handle<Object> key(args[1], isolate);
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
- table = ObjectHashSetRemove(table, key);
+ table = ObjectHashSet::Remove(table, key);
holder->set_table(*table);
return isolate->heap()->undefined_value();
}
@@ -1498,7 +1495,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapDelete) {
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
Handle<Object> lookup(table->Lookup(*key), isolate);
Handle<ObjectHashTable> new_table =
- PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
+ ObjectHashTable::Put(table, key, isolate->factory()->the_hole_value());
holder->set_table(*new_table);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
@@ -1511,7 +1508,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) {
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
+ Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value);
holder->set_table(*new_table);
return isolate->heap()->undefined_value();
}
@@ -1577,7 +1574,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionDelete) {
weak_collection->table()));
Handle<Object> lookup(table->Lookup(*key), isolate);
Handle<ObjectHashTable> new_table =
- PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
+ ObjectHashTable::Put(table, key, isolate->factory()->the_hole_value());
weak_collection->set_table(*new_table);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
@@ -1591,7 +1588,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionSet) {
Handle<Object> value(args[2], isolate);
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
- Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
+ Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value);
weak_collection->set_table(*new_table);
return isolate->heap()->undefined_value();
}
@@ -1654,7 +1651,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
Handle<Object> new_value(
GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
if (!new_value->SameValue(*old_value)) {
- JSObject::EnqueueChangeRecord(obj, "prototype",
+ JSObject::EnqueueChangeRecord(obj, "setPrototype",
isolate->factory()->proto_string(),
old_value);
}
@@ -2929,7 +2926,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
Handle<SharedFunctionInfo> target_shared(target->shared());
Handle<SharedFunctionInfo> source_shared(source->shared());
- if (!JSFunction::EnsureCompiled(source, KEEP_EXCEPTION)) {
+ if (!Compiler::EnsureCompiled(source, KEEP_EXCEPTION)) {
return Failure::Exception();
}
@@ -3008,30 +3005,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
- JSFunction* function = frame->function();
+ Handle<JSFunction> function(frame->function());
RUNTIME_ASSERT(function->shared()->is_generator());
- JSGeneratorObject* generator;
+ Handle<JSGeneratorObject> generator;
if (frame->IsConstructor()) {
- generator = JSGeneratorObject::cast(frame->receiver());
+ generator = handle(JSGeneratorObject::cast(frame->receiver()));
} else {
- MaybeObject* maybe_generator =
- isolate->heap()->AllocateJSGeneratorObject(function);
- if (!maybe_generator->To(&generator)) return maybe_generator;
+ generator = isolate->factory()->NewJSGeneratorObject(function);
}
- generator->set_function(function);
+ generator->set_function(*function);
generator->set_context(Context::cast(frame->context()));
generator->set_receiver(frame->receiver());
generator->set_continuation(0);
generator->set_operand_stack(isolate->heap()->empty_fixed_array());
generator->set_stack_handler_index(-1);
- return generator;
+ return *generator;
}
@@ -3098,8 +3093,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
ASSERT_EQ(frame->function(), generator_object->function());
ASSERT(frame->function()->is_compiled());
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
Address pc = generator_object->function()->code()->instruction_start();
int offset = generator_object->continuation();
@@ -4423,10 +4418,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(String, value, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
int start, end;
// We have a fast integer-only case here to avoid a conversion to double in
// the common case where from and to are Smis.
@@ -4443,13 +4438,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
}
RUNTIME_ASSERT(end >= start);
RUNTIME_ASSERT(start >= 0);
- RUNTIME_ASSERT(end <= value->length());
+ RUNTIME_ASSERT(end <= string->length());
isolate->counters()->sub_string_runtime()->Increment();
- if (end - start == 1) {
- return isolate->heap()->LookupSingleCharacterStringFromCode(
- value->Get(start));
- }
- return value->SubString(start, end);
+
+ return *isolate->factory()->NewSubString(string, start, end);
}
@@ -4826,6 +4818,19 @@ MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
}
+static Handle<Name> ToName(Isolate* isolate, Handle<Object> key) {
+ if (key->IsName()) {
+ return Handle<Name>::cast(key);
+ } else {
+ bool has_pending_exception = false;
+ Handle<Object> converted =
+ Execution::ToString(isolate, key, &has_pending_exception);
+ if (has_pending_exception) return Handle<Name>();
+ return Handle<Name>::cast(converted);
+ }
+}
+
+
MaybeObject* Runtime::HasObjectProperty(Isolate* isolate,
Handle<JSReceiver> object,
Handle<Object> key) {
@@ -4838,16 +4843,8 @@ MaybeObject* Runtime::HasObjectProperty(Isolate* isolate,
}
// Convert the key to a name - possibly by calling back into JavaScript.
- Handle<Name> name;
- if (key->IsName()) {
- name = Handle<Name>::cast(key);
- } else {
- bool has_pending_exception = false;
- Handle<Object> converted =
- Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- name = Handle<Name>::cast(converted);
- }
+ Handle<Name> name = ToName(isolate, key);
+ RETURN_IF_EMPTY_HANDLE(isolate, name);
return isolate->heap()->ToBoolean(JSReceiver::HasProperty(object, name));
}
@@ -4880,16 +4877,8 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
}
// Convert the key to a name - possibly by calling back into JavaScript.
- Handle<Name> name;
- if (key->IsName()) {
- name = Handle<Name>::cast(key);
- } else {
- bool has_pending_exception = false;
- Handle<Object> converted =
- Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- name = Handle<Name>::cast(converted);
- }
+ Handle<Name> name = ToName(isolate, key);
+ RETURN_IF_EMPTY_HANDLE(isolate, name);
// Check if the name is trivially convertible to an index and get
// the element if so.
@@ -5067,12 +5056,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
- LookupResult result(isolate);
- js_object->LocalLookupRealNamedProperty(*name, &result);
+ LookupResult lookup(isolate);
+ js_object->LocalLookupRealNamedProperty(*name, &lookup);
// Special case for callback properties.
- if (result.IsPropertyCallbacks()) {
- Object* callback = result.GetCallbackObject();
+ if (lookup.IsPropertyCallbacks()) {
+ Handle<Object> callback(lookup.GetCallbackObject(), isolate);
// To be compatible with Safari we do not change the value on API objects
// in Object.defineProperty(). Firefox disagrees here, and actually changes
// the value.
@@ -5083,13 +5072,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
// setter to update the value instead.
// TODO(mstarzinger): So far this only works if property attributes don't
// change, this should be fixed once we cleanup the underlying code.
- if (callback->IsForeign() && result.GetAttributes() == attr) {
+ if (callback->IsForeign() && lookup.GetAttributes() == attr) {
Handle<Object> result_object =
JSObject::SetPropertyWithCallback(js_object,
- handle(callback, isolate),
+ callback,
name,
obj_value,
- handle(result.holder()),
+ handle(lookup.holder()),
kStrictMode);
RETURN_IF_EMPTY_HANDLE(isolate, result_object);
return *result_object;
@@ -5102,8 +5091,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
// map. The current version of SetObjectProperty does not handle attributes
// correctly in the case where a property is a field and is reset with
// new attributes.
- if (result.IsFound() &&
- (attr != result.GetAttributes() || result.IsPropertyCallbacks())) {
+ if (lookup.IsFound() &&
+ (attr != lookup.GetAttributes() || lookup.IsPropertyCallbacks())) {
// New attributes - normalize to avoid writing to instance descriptor
if (js_object->IsJSGlobalProxy()) {
// Since the result is a property, the prototype will exist so
@@ -5119,11 +5108,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
return *result;
}
- return Runtime::ForceSetObjectProperty(isolate,
- js_object,
- name,
- obj_value,
- attr);
+ Handle<Object> result = Runtime::ForceSetObjectProperty(isolate, js_object,
+ name,
+ obj_value,
+ attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -5157,49 +5147,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) {
}
-MaybeObject* Runtime::SetObjectPropertyOrFail(
- Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION_PASS_EXCEPTION(isolate,
- SetObjectProperty(isolate, object, key, value, attr, strict_mode));
-}
-
-
-MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode) {
+Handle<Object> Runtime::SetObjectProperty(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attr,
+ StrictModeFlag strict_mode) {
SetPropertyMode set_mode = attr == NONE ? SET_PROPERTY : DEFINE_PROPERTY;
- HandleScope scope(isolate);
if (object->IsUndefined() || object->IsNull()) {
Handle<Object> args[2] = { key, object };
Handle<Object> error =
isolate->factory()->NewTypeError("non_object_property_store",
HandleVector(args, 2));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
if (object->IsJSProxy()) {
bool has_pending_exception = false;
Handle<Object> name_object = key->IsSymbol()
? key : Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
+ if (has_pending_exception) return Handle<Object>(); // exception
Handle<Name> name = Handle<Name>::cast(name_object);
- Handle<Object> result = JSReceiver::SetProperty(
- Handle<JSProxy>::cast(object), name, value, attr, strict_mode);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
+ return JSReceiver::SetProperty(Handle<JSProxy>::cast(object), name, value,
+ attr,
+ strict_mode);
}
// If the object isn't a JavaScript object, we ignore the store.
- if (!object->IsJSObject()) return *value;
+ if (!object->IsJSObject()) return value;
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
@@ -5214,24 +5191,26 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
// string does nothing with the assignment then we can ignore such
// assignments.
if (js_object->IsStringObjectWithCharacterAt(index)) {
- return *value;
+ return value;
}
js_object->ValidateElements();
- if (js_object->HasExternalArrayElements()) {
+ if (js_object->HasExternalArrayElements() ||
+ js_object->HasFixedTypedArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
Handle<Object> number =
Execution::ToNumber(isolate, value, &has_exception);
- if (has_exception) return Failure::Exception();
+ if (has_exception) return Handle<Object>(); // exception
value = number;
}
}
- MaybeObject* result = js_object->SetElement(
- index, *value, attr, strict_mode, true, set_mode);
+ Handle<Object> result = JSObject::SetElement(js_object, index, value, attr,
+ strict_mode,
+ true,
+ set_mode);
js_object->ValidateElements();
- if (result->IsFailure()) return result;
- return *value;
+ return result.is_null() ? result : value;
}
if (key->IsName()) {
@@ -5242,48 +5221,41 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
bool has_exception;
Handle<Object> number =
Execution::ToNumber(isolate, value, &has_exception);
- if (has_exception) return Failure::Exception();
+ if (has_exception) return Handle<Object>(); // exception
value = number;
}
}
- MaybeObject* result = js_object->SetElement(
- index, *value, attr, strict_mode, true, set_mode);
- if (result->IsFailure()) return result;
+ return JSObject::SetElement(js_object, index, value, attr, strict_mode,
+ true,
+ set_mode);
} else {
if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- Handle<Object> result =
- JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
}
- return *value;
}
// Call-back into JavaScript to convert the key to a string.
bool has_pending_exception = false;
Handle<Object> converted =
Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
+ if (has_pending_exception) return Handle<Object>(); // exception
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(
- index, *value, attr, strict_mode, true, set_mode);
+ return JSObject::SetElement(js_object, index, value, attr, strict_mode,
+ true,
+ set_mode);
} else {
- Handle<Object> result =
- JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
+ return JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
}
}
-MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
- Handle<JSObject> js_object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr) {
- HandleScope scope(isolate);
-
+Handle<Object> Runtime::ForceSetObjectProperty(Isolate* isolate,
+ Handle<JSObject> js_object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attr) {
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
@@ -5295,24 +5267,24 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
// string does nothing with the assignment then we can ignore such
// assignments.
if (js_object->IsStringObjectWithCharacterAt(index)) {
- return *value;
+ return value;
}
- return js_object->SetElement(
- index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
+ return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
+ false,
+ DEFINE_PROPERTY);
}
if (key->IsName()) {
Handle<Name> name = Handle<Name>::cast(key);
if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(
- index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
+ return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
+ false,
+ DEFINE_PROPERTY);
} else {
if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
- js_object, name, value, attr);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
+ return JSObject::SetLocalPropertyIgnoreAttributes(js_object, name,
+ value, attr);
}
}
@@ -5320,17 +5292,16 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
bool has_pending_exception = false;
Handle<Object> converted =
Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
+ if (has_pending_exception) return Handle<Object>(); // exception
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(
- index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
+ return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
+ false,
+ DEFINE_PROPERTY);
} else {
- Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
- js_object, name, value, attr);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
+ return JSObject::SetLocalPropertyIgnoreAttributes(js_object, name, value,
+ attr);
}
}
@@ -5379,12 +5350,12 @@ MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
RUNTIME_ASSERT(
(unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
@@ -5398,12 +5369,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
strict_mode = strict_mode_flag;
}
- return Runtime::SetObjectProperty(isolate,
- object,
- key,
- value,
- attributes,
- strict_mode);
+ Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
+ value,
+ attributes,
+ strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -5424,11 +5395,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, object, 0);
+
+ if (object->IsJSFunction()) {
+ JSFunction* func = JSFunction::cast(object);
+ func->shared()->set_native(true);
+ }
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInlineBuiltinFlag) {
+ SealHandleScope shs(isolate);
+ RUNTIME_ASSERT(args.length() == 1);
+
Handle<Object> object = args.at<Object>(0);
if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(*object);
- func->shared()->set_native(true);
+ func->shared()->set_inline_builtin(true);
}
return isolate->heap()->undefined_value();
}
@@ -5729,6 +5714,7 @@ static int LocalPrototypeChainLength(JSObject* obj) {
// Return the names of the local named properties.
// args[0]: object
+// args[1]: PropertyAttributes as int
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -5736,8 +5722,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
return isolate->heap()->undefined_value();
}
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(include_symbols, 1);
- PropertyAttributes filter = include_symbols ? NONE : SYMBOLIC;
+ CONVERT_SMI_ARG_CHECKED(filter_value, 1);
+ PropertyAttributes filter = static_cast<PropertyAttributes>(filter_value);
// Skip the global proxy as it has no properties and always delegates to the
// real global object.
@@ -5786,32 +5772,55 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
// Get the property names.
jsproto = obj;
- int proto_with_hidden_properties = 0;
int next_copy_index = 0;
+ int hidden_strings = 0;
for (int i = 0; i < length; i++) {
jsproto->GetLocalPropertyNames(*names, next_copy_index, filter);
+ if (i > 0) {
+ // Names from hidden prototypes may already have been added
+ // for inherited function template instances. Count the duplicates
+ // and stub them out; the final copy pass at the end ignores holes.
+ for (int j = next_copy_index;
+ j < next_copy_index + local_property_count[i];
+ j++) {
+ Object* name_from_hidden_proto = names->get(j);
+ for (int k = 0; k < next_copy_index; k++) {
+ if (names->get(k) != isolate->heap()->hidden_string()) {
+ Object* name = names->get(k);
+ if (name_from_hidden_proto == name) {
+ names->set(j, isolate->heap()->hidden_string());
+ hidden_strings++;
+ break;
+ }
+ }
+ }
+ }
+ }
next_copy_index += local_property_count[i];
if (jsproto->HasHiddenProperties()) {
- proto_with_hidden_properties++;
+ hidden_strings++;
}
if (i < length - 1) {
jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
}
}
- // Filter out name of hidden properties object.
- if (proto_with_hidden_properties > 0) {
+ // Filter out name of hidden properties object and
+ // hidden prototype duplicates.
+ if (hidden_strings > 0) {
Handle<FixedArray> old_names = names;
names = isolate->factory()->NewFixedArray(
- names->length() - proto_with_hidden_properties);
+ names->length() - hidden_strings);
int dest_pos = 0;
for (int i = 0; i < total_property_count; i++) {
Object* name = old_names->get(i);
if (name == isolate->heap()->hidden_string()) {
+ hidden_strings--;
continue;
}
names->set(dest_pos++, name);
}
+ ASSERT_EQ(0, hidden_strings);
}
return *isolate->factory()->NewJSArrayWithElements(names);
@@ -6487,7 +6496,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
if (!maybe_o->ToObject(&o)) return maybe_o;
}
SeqOneByteString* result = SeqOneByteString::cast(o);
- bool has_changed_character;
+ bool has_changed_character = false;
bool is_ascii = FastAsciiConvert<Converter>(
reinterpret_cast<char*>(result->GetChars()),
reinterpret_cast<char*>(SeqOneByteString::cast(s)->GetChars()),
@@ -6532,36 +6541,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
}
-static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
- return unibrow::WhiteSpace::Is(c) || c == 0x200b || c == 0xfeff;
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(String, s, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1);
CONVERT_BOOLEAN_ARG_CHECKED(trimRight, 2);
- s->TryFlatten();
- int length = s->length();
+ string = FlattenGetString(string);
+ int length = string->length();
int left = 0;
+ UnicodeCache* unicode_cache = isolate->unicode_cache();
if (trimLeft) {
- while (left < length && IsTrimWhiteSpace(s->Get(left))) {
+ while (left < length &&
+ unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
left++;
}
}
int right = length;
if (trimRight) {
- while (right > left && IsTrimWhiteSpace(s->Get(right - 1))) {
+ while (right > left &&
+ unicode_cache->IsWhiteSpaceOrLineTerminator(
+ string->Get(right - 1))) {
right--;
}
}
- return s->SubString(left, right);
+
+ return *isolate->factory()->NewSubString(string, left, right);
}
@@ -6776,8 +6785,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
Object* number = args[0];
RUNTIME_ASSERT(number->IsNumber());
- return isolate->heap()->NumberToString(
- number, false, isolate->heap()->GetPretenureMode());
+ return isolate->heap()->NumberToString(number, false);
}
@@ -6964,12 +6972,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberImul) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, str1, 0);
- CONVERT_ARG_CHECKED(String, str2, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
isolate->counters()->string_add_runtime()->Increment();
- return isolate->heap()->AllocateConsString(str1, str2);
+ return *isolate->factory()->NewConsString(str1, str2);
}
@@ -7254,7 +7262,7 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, elements_array, 0);
RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
@@ -7314,8 +7322,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
}
}
if (overflow) {
- // Throw OutOfMemory exception for creating too large a string.
- V8::FatalProcessOutOfMemory("Array join result too large.");
+ // Throw an exception if the resulting string is too large. See
+ // https://code.google.com/p/chromium/issues/detail?id=336820
+ // for details.
+ return isolate->Throw(*isolate->factory()->
+ NewRangeError("invalid_string_length",
+ HandleVector<Object>(NULL, 0)));
}
if (is_ascii) {
@@ -7641,7 +7653,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
isolate->counters()->math_acos()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::ACOS, x);
+ return isolate->heap()->AllocateHeapNumber(std::acos(x));
}
@@ -7651,7 +7663,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
isolate->counters()->math_asin()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::ASIN, x);
+ return isolate->heap()->AllocateHeapNumber(std::asin(x));
}
@@ -7661,7 +7673,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
isolate->counters()->math_atan()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::ATAN, x);
+ return isolate->heap()->AllocateHeapNumber(std::atan(x));
}
@@ -7685,32 +7697,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
if (y < 0) multiplier *= 3;
result = multiplier * kPiDividedBy4;
} else {
- result = atan2(x, y);
+ result = std::atan2(x, y);
}
return isolate->heap()->AllocateHeapNumber(result);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_ceil()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->NumberFromDouble(ceiling(x));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_cos()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::COS, x);
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -7728,7 +7720,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
isolate->counters()->math_floor()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->NumberFromDouble(floor(x));
+ return isolate->heap()->NumberFromDouble(std::floor(x));
}
@@ -7738,7 +7730,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
isolate->counters()->math_log()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
+ return isolate->heap()->AllocateHeapNumber(std::log(x));
}
@@ -7823,17 +7815,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
if (sign && value >= -0.5) return isolate->heap()->minus_zero_value();
// Do not call NumberFromDouble() to avoid extra checks.
- return isolate->heap()->AllocateHeapNumber(floor(value + 0.5));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_sin()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::SIN, x);
+ return isolate->heap()->AllocateHeapNumber(std::floor(value + 0.5));
}
@@ -7847,13 +7829,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_fround) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- isolate->counters()->math_tan()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::TAN, x);
+ float xf = static_cast<float>(x);
+ return isolate->heap()->AllocateHeapNumber(xf);
}
@@ -8080,23 +8062,22 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(
if (functions.length() > 1) {
int inlined_jsframe_index = functions.length() - 1;
JSFunction* inlined_function = functions[inlined_jsframe_index];
- Vector<SlotRef> args_slots =
- SlotRef::ComputeSlotMappingForArguments(
- frame,
- inlined_jsframe_index,
- inlined_function->shared()->formal_parameter_count());
+ SlotRefValueBuilder slot_refs(
+ frame,
+ inlined_jsframe_index,
+ inlined_function->shared()->formal_parameter_count());
- int args_count = args_slots.length();
+ int args_count = slot_refs.args_length();
*total_argc = prefix_argc + args_count;
SmartArrayPointer<Handle<Object> > param_data(
NewArray<Handle<Object> >(*total_argc));
+ slot_refs.Prepare(isolate);
for (int i = 0; i < args_count; i++) {
- Handle<Object> val = args_slots[i].GetValue(isolate);
+ Handle<Object> val = slot_refs.GetNext(isolate, 0);
param_data[prefix_argc + i] = val;
}
-
- args_slots.Dispose();
+ slot_refs.Finish(isolate);
return param_data;
} else {
@@ -8230,7 +8211,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
bool exception = false;
Handle<Object> result =
Execution::New(Handle<JSFunction>::cast(bound_function),
- total_argc, *param_data, &exception);
+ total_argc, param_data.get(), &exception);
if (exception) {
return Failure::Exception();
}
@@ -8291,7 +8272,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
// The function should be compiled for the optimization hints to be
// available.
- JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION);
+ Compiler::EnsureCompiled(function, CLEAR_EXCEPTION);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
if (!function->has_initial_map() &&
@@ -8323,42 +8304,53 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyCompile) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileUnoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<JSFunction> function = args.at<JSFunction>(0);
#ifdef DEBUG
if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
- PrintF("[lazy: ");
+ PrintF("[unoptimized: ");
function->PrintName();
PrintF("]\n");
}
#endif
// Compile the target function.
- ASSERT(!function->is_compiled());
- if (!JSFunction::CompileLazy(function, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
+ ASSERT(function->shared()->allows_lazy_compilation());
+
+ Handle<Code> code = Compiler::GetUnoptimizedCode(function);
+ RETURN_IF_EMPTY_HANDLE(isolate, code);
+ function->ReplaceCode(*code);
// All done. Return the compiled code.
ASSERT(function->is_compiled());
- return function->code();
+ ASSERT(function->code()->kind() == Code::FUNCTION ||
+ (FLAG_always_opt &&
+ function->code()->kind() == Code::OPTIMIZED_FUNCTION));
+ return *code;
}
-bool AllowOptimization(Isolate* isolate, Handle<JSFunction> function) {
- // If the function is not compiled ignore the lazy
- // recompilation. This can happen if the debugger is activated and
- // the function is returned to the not compiled state.
- if (!function->shared()->is_compiled()) return false;
-
- // If the function is not optimizable or debugger is active continue using the
- // code from the full compiler.
- if (!isolate->use_crankshaft() ||
- function->shared()->optimization_disabled() ||
- isolate->DebuggerHasBreakPoints()) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileOptimized) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ Handle<JSFunction> function = args.at<JSFunction>(0);
+ CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1);
+
+ Handle<Code> unoptimized(function->shared()->code());
+ if (!function->shared()->is_compiled()) {
+ // If the function is not compiled, do not optimize.
+ // This can happen if the debugger is activated and
+ // the function is returned to the not compiled state.
+ // TODO(yangguo): reconsider this.
+ function->ReplaceCode(function->shared()->code());
+ } else if (!isolate->use_crankshaft() ||
+ function->shared()->optimization_disabled() ||
+ isolate->DebuggerHasBreakPoints()) {
+ // If the function is not optimizable or debugger is active continue
+ // using the code from the full compiler.
if (FLAG_trace_opt) {
PrintF("[failed to optimize ");
function->PrintName();
@@ -8366,52 +8358,21 @@ bool AllowOptimization(Isolate* isolate, Handle<JSFunction> function) {
function->shared()->optimization_disabled() ? "F" : "T",
isolate->DebuggerHasBreakPoints() ? "T" : "F");
}
- return false;
+ function->ReplaceCode(*unoptimized);
+ } else {
+ Compiler::ConcurrencyMode mode = concurrent ? Compiler::CONCURRENT
+ : Compiler::NOT_CONCURRENT;
+ Handle<Code> code = Compiler::GetOptimizedCode(function, unoptimized, mode);
+ function->ReplaceCode(code.is_null() ? *unoptimized : *code);
}
- return true;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- Handle<JSFunction> function = args.at<JSFunction>(0);
- if (!AllowOptimization(isolate, function)) {
- function->ReplaceCode(function->shared()->code());
- return function->code();
- }
- function->shared()->code()->set_profiler_ticks(0);
- if (JSFunction::CompileOptimized(function, CLEAR_EXCEPTION)) {
- return function->code();
- }
- if (FLAG_trace_opt) {
- PrintF("[failed to optimize ");
- function->PrintName();
- PrintF(": optimized compilation failed]\n");
- }
- function->ReplaceCode(function->shared()->code());
+ ASSERT(function->code()->kind() == Code::FUNCTION ||
+ function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
+ function->IsInOptimizationQueue());
return function->code();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ConcurrentRecompile) {
- HandleScope handle_scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (!AllowOptimization(isolate, function)) {
- function->ReplaceCode(function->shared()->code());
- return isolate->heap()->undefined_value();
- }
- function->shared()->code()->set_profiler_ticks(0);
- ASSERT(FLAG_concurrent_recompilation);
- if (!Compiler::RecompileConcurrent(function)) {
- function->ReplaceCode(function->shared()->code());
- }
- return isolate->heap()->undefined_value();
-}
-
-
class ActivationsFinder : public ThreadVisitor {
public:
Code* code_;
@@ -8523,7 +8484,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) {
Code* unoptimized = function->shared()->code();
if (unoptimized->kind() == Code::FUNCTION) {
unoptimized->ClearInlineCaches();
- unoptimized->ClearTypeFeedbackCells(isolate->heap());
+ unoptimized->ClearTypeFeedbackInfo(isolate->heap());
}
return isolate->heap()->undefined_value();
}
@@ -8541,7 +8502,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConcurrentRecompilationSupported) {
HandleScope scope(isolate);
- return FLAG_concurrent_recompilation
+ return isolate->concurrent_recompilation_enabled()
? isolate->heap()->true_value() : isolate->heap()->false_value();
}
@@ -8551,8 +8512,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (!function->IsOptimizable()) return isolate->heap()->undefined_value();
- function->MarkForLazyRecompilation();
+ if (!function->IsOptimizable() &&
+ !function->IsMarkedForConcurrentOptimization() &&
+ !function->IsInOptimizationQueue()) {
+ return isolate->heap()->undefined_value();
+ }
+
+ function->MarkForOptimization();
Code* unoptimized = function->shared()->code();
if (args.length() == 2 &&
@@ -8566,8 +8532,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
unoptimized->set_allow_osr_at_loop_nesting_level(i);
isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
}
- } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent"))) {
- function->MarkForConcurrentRecompilation();
+ } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent")) &&
+ isolate->concurrent_recompilation_enabled()) {
+ function->MarkForConcurrentOptimization();
}
}
@@ -8599,8 +8566,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
}
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_concurrent_recompilation && sync_with_compiler_thread) {
- while (function->IsInRecompileQueue()) {
+ if (isolate->concurrent_recompilation_enabled() &&
+ sync_with_compiler_thread) {
+ while (function->IsInOptimizationQueue()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
OS::Sleep(50);
}
@@ -8636,9 +8604,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
static bool IsSuitableForOnStackReplacement(Isolate* isolate,
Handle<JSFunction> function,
- Handle<Code> unoptimized) {
+ Handle<Code> current_code) {
// Keep track of whether we've succeeded in optimizing.
- if (!isolate->use_crankshaft() || !unoptimized->optimizable()) return false;
+ if (!isolate->use_crankshaft() || !current_code->optimizable()) return false;
// If we are trying to do OSR when there are already optimized
// activations of the function, it means (a) the function is directly or
// indirectly recursive and (b) an optimized invocation has been
@@ -8655,74 +8623,81 @@ static bool IsSuitableForOnStackReplacement(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
HandleScope scope(isolate);
- ASSERT(args.length() == 2);
+ ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, pc_offset, Uint32, args[1]);
- Handle<Code> unoptimized(function->shared()->code(), isolate);
+ Handle<Code> caller_code(function->shared()->code());
-#ifdef DEBUG
+ // We're not prepared to handle a function with arguments object.
+ ASSERT(!function->shared()->uses_arguments());
+
+ // Passing the PC in the javascript frame from the caller directly is
+ // not GC safe, so we walk the stack to get it.
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
- ASSERT_EQ(frame->function(), *function);
- ASSERT_EQ(frame->LookupCode(), *unoptimized);
- ASSERT(unoptimized->contains(frame->pc()));
+ if (!caller_code->contains(frame->pc())) {
+ // Code on the stack may not be the code object referenced by the shared
+ // function info. It may have been replaced to include deoptimization data.
+ caller_code = Handle<Code>(frame->LookupCode());
+ }
+
+ uint32_t pc_offset = static_cast<uint32_t>(
+ frame->pc() - caller_code->instruction_start());
- ASSERT(pc_offset ==
- static_cast<uint32_t>(frame->pc() - unoptimized->instruction_start()));
+#ifdef DEBUG
+ ASSERT_EQ(frame->function(), *function);
+ ASSERT_EQ(frame->LookupCode(), *caller_code);
+ ASSERT(caller_code->contains(frame->pc()));
#endif // DEBUG
- // We're not prepared to handle a function with arguments object.
- ASSERT(!function->shared()->uses_arguments());
+ BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset);
+ ASSERT(!ast_id.IsNone());
+
+ Compiler::ConcurrencyMode mode = isolate->concurrent_osr_enabled()
+ ? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT;
Handle<Code> result = Handle<Code>::null();
- BailoutId ast_id = BailoutId::None();
- if (FLAG_concurrent_osr) {
- if (isolate->optimizing_compiler_thread()->
- IsQueuedForOSR(function, pc_offset)) {
- // Still waiting for the optimizing compiler thread to finish. Carry on.
+ OptimizedCompileJob* job = NULL;
+ if (mode == Compiler::CONCURRENT) {
+ // Gate the OSR entry with a stack check.
+ BackEdgeTable::AddStackCheck(caller_code, pc_offset);
+ // Poll already queued compilation jobs.
+ OptimizingCompilerThread* thread = isolate->optimizing_compiler_thread();
+ if (thread->IsQueuedForOSR(function, ast_id)) {
if (FLAG_trace_osr) {
- PrintF("[COSR - polling recompile tasks for ");
+ PrintF("[OSR - Still waiting for queued: ");
function->PrintName();
- PrintF("]\n");
+ PrintF(" at AST id %d]\n", ast_id.ToInt());
}
return NULL;
}
- RecompileJob* job = isolate->optimizing_compiler_thread()->
- FindReadyOSRCandidate(function, pc_offset);
+ job = thread->FindReadyOSRCandidate(function, ast_id);
+ }
- if (job == NULL) {
- if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
- Compiler::RecompileConcurrent(function, pc_offset)) {
- if (function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForConcurrentRecompilation()) {
- // Prevent regular recompilation if we queue this for OSR.
- // TODO(yangguo): remove this as soon as OSR becomes one-shot.
- function->ReplaceCode(*unoptimized);
- }
- return NULL;
- }
- // Fall through to the end in case of failure.
- } else {
- // TODO(titzer): don't install the OSR code into the function.
- ast_id = job->info()->osr_ast_id();
- result = Compiler::InstallOptimizedCode(job);
+ if (job != NULL) {
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - Found ready: ");
+ function->PrintName();
+ PrintF(" at AST id %d]\n", ast_id.ToInt());
}
- } else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
- ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
- ASSERT(!ast_id.IsNone());
+ result = Compiler::GetConcurrentlyOptimizedCode(job);
+ } else if (result.is_null() &&
+ IsSuitableForOnStackReplacement(isolate, function, caller_code)) {
if (FLAG_trace_osr) {
- PrintF("[OSR - replacing at AST id %d in ", ast_id.ToInt());
+ PrintF("[OSR - Compiling: ");
function->PrintName();
- PrintF("]\n");
+ PrintF(" at AST id %d]\n", ast_id.ToInt());
+ }
+ result = Compiler::GetOptimizedCode(function, caller_code, mode, ast_id);
+ if (result.is_identical_to(isolate->builtins()->InOptimizationQueue())) {
+ // Optimization is queued. Return to check later.
+ return NULL;
}
- // Attempt OSR compilation.
- result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION);
}
// Revert the patched back edge table, regardless of whether OSR succeeds.
- BackEdgeTable::Revert(isolate, *unoptimized);
+ BackEdgeTable::Revert(isolate, *caller_code);
// Check whether we ended up with usable optimized code.
if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
@@ -8732,38 +8707,48 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
if (data->OsrPcOffset()->value() >= 0) {
ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
if (FLAG_trace_osr) {
- PrintF("[OSR - entry at AST id %d, offset %d in optimized code]\n",
+ PrintF("[OSR - Entry at AST id %d, offset %d in optimized code]\n",
ast_id.ToInt(), data->OsrPcOffset()->value());
}
// TODO(titzer): this is a massive hack to make the deopt counts
// match. Fix heuristics for reenabling optimizations!
function->shared()->increment_deopt_count();
+
+ // TODO(titzer): Do not install code into the function.
+ function->ReplaceCode(*result);
return *result;
}
}
+ // Failed.
if (FLAG_trace_osr) {
- PrintF("[OSR - optimization failed for ");
+ PrintF("[OSR - Failed: ");
function->PrintName();
- PrintF("]\n");
+ PrintF(" at AST id %d]\n", ast_id.ToInt());
}
- if (function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForConcurrentRecompilation()) {
- function->ReplaceCode(function->shared()->code());
- }
+ function->ReplaceCode(function->shared()->code());
return NULL;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAllocationTimeout) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 2);
+ ASSERT(args.length() == 2 || args.length() == 3);
#ifdef DEBUG
CONVERT_SMI_ARG_CHECKED(interval, 0);
CONVERT_SMI_ARG_CHECKED(timeout, 1);
isolate->heap()->set_allocation_timeout(timeout);
FLAG_gc_interval = interval;
+ if (args.length() == 3) {
+ // Enable/disable inline allocation if requested.
+ CONVERT_BOOLEAN_ARG_CHECKED(inline_allocation, 2);
+ if (inline_allocation) {
+ isolate->heap()->EnableInlineAllocation();
+ } else {
+ isolate->heap()->DisableInlineAllocation();
+ }
+ }
#endif
return isolate->heap()->undefined_value();
}
@@ -8883,7 +8868,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
ASSERT(function->context() == isolate->context());
ASSERT(function->context()->global_object() == result->global_object());
- isolate->set_context(result);
result->global_object()->set_global_context(result);
return result; // non-failure
@@ -8896,14 +8880,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
CONVERT_ARG_CHECKED(JSFunction, function, 0);
int length = function->shared()->scope_info()->ContextLength();
- Context* result;
- MaybeObject* maybe_result =
- isolate->heap()->AllocateFunctionContext(length, function);
- if (!maybe_result->To(&result)) return maybe_result;
-
- isolate->set_context(result);
-
- return result; // non-failure
+ return isolate->heap()->AllocateFunctionContext(length, function);
}
@@ -9188,9 +9165,8 @@ static Object* ComputeReceiverForNonGlobal(Isolate* isolate,
if (constructor != context_extension_function) return holder;
// Fall back to using the global object as the implicit receiver if
// the property turns out to be a local variable allocated in a
- // context extension object - introduced via eval. Implicit global
- // receivers are indicated with the hole value.
- return isolate->heap()->the_hole_value();
+ // context extension object - introduced via eval.
+ return isolate->heap()->undefined_value();
}
@@ -9224,11 +9200,7 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
ASSERT(holder->IsContext());
// If the "property" we were looking for is a local variable, the
// receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
- //
- // Use the hole as the receiver to signal that the receiver is implicit
- // and that the global receiver should be used (as distinguished from an
- // explicit receiver that happens to be a global object).
- Handle<Object> receiver = isolate->factory()->the_hole_value();
+ Handle<Object> receiver = isolate->factory()->undefined_value();
Object* value = Context::cast(*holder)->get(index);
// Check for uninitialized bindings.
switch (binding_flags) {
@@ -9263,7 +9235,7 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
// GetProperty below can cause GC.
Handle<Object> receiver_handle(
object->IsGlobalObject()
- ? GlobalObject::cast(*object)->global_receiver()
+ ? Object::cast(isolate->heap()->undefined_value())
: object->IsJSProxy() ? static_cast<Object*>(*object)
: ComputeReceiverForNonGlobal(isolate, JSObject::cast(*object)),
isolate);
@@ -9428,6 +9400,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowMessage) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ const char* message = GetBailoutReason(
+ static_cast<BailoutReason>(message_id));
+ Handle<Name> message_handle =
+ isolate->factory()->NewStringFromAscii(CStrVector(message));
+ return isolate->Throw(*message_handle);
+}
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
SealHandleScope shs(isolate);
@@ -9442,7 +9425,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallRecompiledCode) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallOptimizedCode) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -9557,7 +9540,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) {
// the number in a Date object representing a particular instant in
// time is milliseconds. Therefore, we floor the result of getting
// the OS time.
- double millis = floor(OS::TimeCurrentMillis());
+ double millis = std::floor(OS::TimeCurrentMillis());
return isolate->heap()->NumberFromDouble(millis);
}
@@ -9630,6 +9613,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAttachedGlobal) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ Object* global = args[0];
+ if (!global->IsJSGlobalObject()) return isolate->heap()->false_value();
+ return isolate->heap()->ToBoolean(
+ !JSGlobalObject::cast(global)->IsDetached());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
HandleScope scope(isolate);
ASSERT_EQ(1, args.length());
@@ -9691,13 +9684,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
// Compile source string in the native context.
ParseRestriction restriction = function_literal_only
? ONLY_SINGLE_FUNCTION_LITERAL : NO_PARSE_RESTRICTION;
- Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
- source, context, true, CLASSIC_MODE, restriction, RelocInfo::kNoPosition);
- RETURN_IF_EMPTY_HANDLE(isolate, shared);
- Handle<JSFunction> fun =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
- context,
- NOT_TENURED);
+ Handle<JSFunction> fun = Compiler::GetFunctionFromEval(
+ source, context, CLASSIC_MODE, restriction, RelocInfo::kNoPosition);
+ RETURN_IF_EMPTY_HANDLE(isolate, fun);
return *fun;
}
@@ -9723,18 +9712,11 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
// Deal with a normal eval call with a string argument. Compile it
// and return the compiled function bound in the local context.
- Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
- source,
- context,
- context->IsNativeContext(),
- language_mode,
- NO_PARSE_RESTRICTION,
- scope_position);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, shared,
+ static const ParseRestriction restriction = NO_PARSE_RESTRICTION;
+ Handle<JSFunction> compiled = Compiler::GetFunctionFromEval(
+ source, context, language_mode, restriction, scope_position);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, compiled,
MakePair(Failure::Exception(), NULL));
- Handle<JSFunction> compiled =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, NOT_TENURED);
return MakePair(*compiled, *receiver);
}
@@ -9752,7 +9734,7 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
// the first argument without doing anything).
if (*callee != isolate->native_context()->global_eval_fun() ||
!args[1]->IsString()) {
- return MakePair(*callee, isolate->heap()->the_hole_value());
+ return MakePair(*callee, isolate->heap()->undefined_value());
}
CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
@@ -9765,54 +9747,45 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
}
+// Allocate a block of memory in the given space (filled with a filler).
+// Used as a fall-back for generated code when the space is full.
static MaybeObject* Allocate(Isolate* isolate,
int size,
+ bool double_align,
AllocationSpace space) {
- // Allocate a block of memory in the given space (filled with a filler).
- // Use as fallback for allocation in generated code when the space
- // is full.
- SealHandleScope shs(isolate);
+ Heap* heap = isolate->heap();
RUNTIME_ASSERT(IsAligned(size, kPointerSize));
RUNTIME_ASSERT(size > 0);
- Heap* heap = isolate->heap();
- RUNTIME_ASSERT(size <= heap->MaxRegularSpaceAllocationSize());
- Object* allocation;
- { MaybeObject* maybe_allocation;
- if (space == NEW_SPACE) {
- maybe_allocation = heap->new_space()->AllocateRaw(size);
- } else {
- ASSERT(space == OLD_POINTER_SPACE || space == OLD_DATA_SPACE);
- maybe_allocation = heap->paged_space(space)->AllocateRaw(size);
- }
- if (maybe_allocation->ToObject(&allocation)) {
- heap->CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size);
- }
- return maybe_allocation;
+ RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize);
+ HeapObject* allocation;
+ { MaybeObject* maybe_allocation = heap->AllocateRaw(size, space, space);
+ if (!maybe_allocation->To(&allocation)) return maybe_allocation;
}
+#ifdef DEBUG
+ MemoryChunk* chunk = MemoryChunk::FromAddress(allocation->address());
+ ASSERT(chunk->owner()->identity() == space);
+#endif
+ heap->CreateFillerObjectAt(allocation->address(), size);
+ return allocation;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
- return Allocate(isolate, size_smi->value(), NEW_SPACE);
+ CONVERT_SMI_ARG_CHECKED(size, 0);
+ return Allocate(isolate, size, false, NEW_SPACE);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldPointerSpace) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInTargetSpace) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
- return Allocate(isolate, size_smi->value(), OLD_POINTER_SPACE);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldDataSpace) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
- return Allocate(isolate, size_smi->value(), OLD_DATA_SPACE);
+ ASSERT(args.length() == 2);
+ CONVERT_SMI_ARG_CHECKED(size, 0);
+ CONVERT_SMI_ARG_CHECKED(flags, 1);
+ bool double_align = AllocateDoubleAlignFlag::decode(flags);
+ AllocationSpace space = AllocateTargetSpace::decode(flags);
+ return Allocate(isolate, size, double_align, space);
}
@@ -9820,22 +9793,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldDataSpace) {
// array. Returns true if the element was pushed on the stack and
// false otherwise.
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
- CONVERT_ARG_CHECKED(JSReceiver, element, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, element, 1);
RUNTIME_ASSERT(array->HasFastSmiOrObjectElements());
int length = Smi::cast(array->length())->value();
FixedArray* elements = FixedArray::cast(array->elements());
for (int i = 0; i < length; i++) {
- if (elements->get(i) == element) return isolate->heap()->false_value();
+ if (elements->get(i) == *element) return isolate->heap()->false_value();
}
- Object* obj;
+
// Strict not needed. Used for cycle detection in Array join implementation.
- { MaybeObject* maybe_obj =
- array->SetFastElement(length, element, kNonStrictMode, true);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ RETURN_IF_EMPTY_HANDLE(isolate, JSObject::SetFastElement(array, length,
+ element,
+ kNonStrictMode,
+ true));
return isolate->heap()->true_value();
}
@@ -9954,8 +9927,7 @@ class ArrayConcatVisitor {
}
inline void clear_storage() {
- isolate_->global_handles()->Destroy(
- Handle<Object>::cast(storage_).location());
+ GlobalHandles::Destroy(Handle<Object>::cast(storage_).location());
}
inline void set_storage(FixedArray* storage) {
@@ -10021,15 +9993,12 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
break;
}
case NON_STRICT_ARGUMENTS_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS: \
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
// External arrays are always dense.
return length;
}
@@ -10137,51 +10106,16 @@ static void CollectElementIndices(Handle<JSObject> object,
default: {
int dense_elements_length;
switch (kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- dense_elements_length =
- ExternalPixelArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS: {
- dense_elements_length =
- ExternalByteArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedByteArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_SHORT_ELEMENTS: {
- dense_elements_length =
- ExternalShortArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedShortArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_INT_ELEMENTS: {
- dense_elements_length =
- ExternalIntArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedIntArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS: {
- dense_elements_length =
- ExternalFloatArray::cast(object->elements())->length();
- break;
- }
- case EXTERNAL_DOUBLE_ELEMENTS: {
- dense_elements_length =
- ExternalDoubleArray::cast(object->elements())->length();
- break;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: { \
+ dense_elements_length = \
+ External##Type##Array::cast(object->elements())->length(); \
+ break; \
}
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
default:
UNREACHABLE();
dense_elements_length = 0;
@@ -10298,8 +10232,8 @@ static bool IterateElements(Isolate* isolate,
}
break;
}
- case EXTERNAL_PIXEL_ELEMENTS: {
- Handle<ExternalPixelArray> pixels(ExternalPixelArray::cast(
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS: {
+ Handle<ExternalUint8ClampedArray> pixels(ExternalUint8ClampedArray::cast(
receiver->elements()));
for (uint32_t j = 0; j < length; j++) {
Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
@@ -10307,43 +10241,43 @@ static bool IterateElements(Isolate* isolate,
}
break;
}
- case EXTERNAL_BYTE_ELEMENTS: {
- IterateExternalArrayElements<ExternalByteArray, int8_t>(
+ case EXTERNAL_INT8_ELEMENTS: {
+ IterateExternalArrayElements<ExternalInt8Array, int8_t>(
isolate, receiver, true, true, visitor);
break;
}
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>(
+ case EXTERNAL_UINT8_ELEMENTS: {
+ IterateExternalArrayElements<ExternalUint8Array, uint8_t>(
isolate, receiver, true, true, visitor);
break;
}
- case EXTERNAL_SHORT_ELEMENTS: {
- IterateExternalArrayElements<ExternalShortArray, int16_t>(
+ case EXTERNAL_INT16_ELEMENTS: {
+ IterateExternalArrayElements<ExternalInt16Array, int16_t>(
isolate, receiver, true, true, visitor);
break;
}
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>(
+ case EXTERNAL_UINT16_ELEMENTS: {
+ IterateExternalArrayElements<ExternalUint16Array, uint16_t>(
isolate, receiver, true, true, visitor);
break;
}
- case EXTERNAL_INT_ELEMENTS: {
- IterateExternalArrayElements<ExternalIntArray, int32_t>(
+ case EXTERNAL_INT32_ELEMENTS: {
+ IterateExternalArrayElements<ExternalInt32Array, int32_t>(
isolate, receiver, true, false, visitor);
break;
}
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>(
+ case EXTERNAL_UINT32_ELEMENTS: {
+ IterateExternalArrayElements<ExternalUint32Array, uint32_t>(
isolate, receiver, true, false, visitor);
break;
}
- case EXTERNAL_FLOAT_ELEMENTS: {
- IterateExternalArrayElements<ExternalFloatArray, float>(
+ case EXTERNAL_FLOAT32_ELEMENTS: {
+ IterateExternalArrayElements<ExternalFloat32Array, float>(
isolate, receiver, false, false, visitor);
break;
}
- case EXTERNAL_DOUBLE_ELEMENTS: {
- IterateExternalArrayElements<ExternalDoubleArray, double>(
+ case EXTERNAL_FLOAT64_ELEMENTS: {
+ IterateExternalArrayElements<ExternalFloat64Array, double>(
isolate, receiver, false, false, visitor);
break;
}
@@ -11354,11 +11288,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// by creating correct wrapper object based on the calling frame's
// native context.
it.Advance();
- Handle<Context> calling_frames_native_context(
- Context::cast(Context::cast(it.frame()->context())->native_context()));
- ASSERT(!receiver->IsUndefined() && !receiver->IsNull());
- receiver =
- isolate->factory()->ToObject(receiver, calling_frames_native_context);
+ if (receiver->IsUndefined()) {
+ Context* context = function->context();
+ receiver = handle(context->global_object()->global_receiver());
+ } else {
+ ASSERT(!receiver->IsNull());
+ Context* context = Context::cast(it.frame()->context());
+ Handle<Context> native_context(Context::cast(context->native_context()));
+ receiver = isolate->factory()->ToObject(receiver, native_context);
+ }
}
details->set(kFrameDetailsReceiverIndex, *receiver);
@@ -11379,6 +11317,12 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
// First fill all parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ Handle<String> name(scope_info->ParameterName(i));
+ VariableMode mode;
+ InitializationFlag init_flag;
+ // Do not materialize the parameter if it is shadowed by a context local.
+ if (scope_info->ContextSlotIndex(*name, &mode, &init_flag) != -1) continue;
+
Handle<Object> value(i < frame_inspector->GetParametersCount()
? frame_inspector->GetParameter(i)
: isolate->heap()->undefined_value(),
@@ -11387,28 +11331,21 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- target,
- Handle<String>(scope_info->ParameterName(i)),
- value,
- NONE,
- kNonStrictMode),
+ Runtime::SetObjectProperty(
+ isolate, target, name, value, NONE, kNonStrictMode),
Handle<JSObject>());
}
// Second fill all stack locals.
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ Handle<String> name(scope_info->StackLocalName(i));
Handle<Object> value(frame_inspector->GetExpression(i), isolate);
if (value->IsTheHole()) continue;
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- target,
- Handle<String>(scope_info->StackLocalName(i)),
- value,
- NONE,
- kNonStrictMode),
+ Runtime::SetObjectProperty(
+ isolate, target, name, value, NONE, kNonStrictMode),
Handle<JSObject>());
}
@@ -11486,12 +11423,12 @@ static Handle<JSObject> MaterializeLocalContext(Isolate* isolate,
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- target,
- key,
- GetProperty(isolate, ext, key),
- NONE,
- kNonStrictMode),
+ Runtime::SetObjectProperty(isolate,
+ target,
+ key,
+ GetProperty(isolate, ext, key),
+ NONE,
+ kNonStrictMode),
Handle<JSObject>());
}
}
@@ -11591,12 +11528,9 @@ static bool SetLocalVariableValue(Isolate* isolate,
if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing
// property value.
- SetProperty(isolate,
- ext,
- variable_name,
- new_value,
- NONE,
- kNonStrictMode);
+ Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
+ NONE,
+ kNonStrictMode);
return true;
}
}
@@ -11642,12 +11576,10 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- closure_scope,
- key,
- GetProperty(isolate, ext, key),
- NONE,
- kNonStrictMode),
+ Runtime::SetObjectProperty(isolate, closure_scope, key,
+ GetProperty(isolate, ext, key),
+ NONE,
+ kNonStrictMode),
Handle<JSObject>());
}
}
@@ -11678,12 +11610,9 @@ static bool SetClosureVariableValue(Isolate* isolate,
Handle<JSObject> ext(JSObject::cast(context->extension()));
if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing property value.
- SetProperty(isolate,
- ext,
- variable_name,
- new_value,
- NONE,
- kNonStrictMode);
+ Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
+ NONE,
+ kNonStrictMode);
return true;
}
}
@@ -11704,12 +11633,9 @@ static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
isolate->factory()->NewJSObject(isolate->object_function());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- catch_scope,
- name,
- thrown_object,
- NONE,
- kNonStrictMode),
+ Runtime::SetObjectProperty(isolate, catch_scope, name, thrown_object,
+ NONE,
+ kNonStrictMode),
Handle<JSObject>());
return catch_scope;
}
@@ -12588,7 +12514,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
&source_position,
alignment)) {
- return isolate->heap()->undefined_value();
+ return isolate->heap()->undefined_value();
}
return Smi::FromInt(source_position);
@@ -12727,12 +12653,11 @@ static Handle<JSObject> MaterializeArgumentsObject(
// FunctionGetArguments can't throw an exception.
Handle<JSObject> arguments = Handle<JSObject>::cast(
Accessors::FunctionGetArguments(function));
- SetProperty(isolate,
- target,
- isolate->factory()->arguments_string(),
- arguments,
- ::NONE,
- kNonStrictMode);
+ Runtime::SetObjectProperty(isolate, target,
+ isolate->factory()->arguments_string(),
+ arguments,
+ ::NONE,
+ kNonStrictMode);
return target;
}
@@ -12749,18 +12674,14 @@ static MaybeObject* DebugEvaluate(Isolate* isolate,
context = isolate->factory()->NewWithContext(closure, context, extension);
}
- Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
- source,
- context,
- context->IsNativeContext(),
- CLASSIC_MODE,
- NO_PARSE_RESTRICTION,
- RelocInfo::kNoPosition);
- RETURN_IF_EMPTY_HANDLE(isolate, shared);
-
Handle<JSFunction> eval_fun =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, NOT_TENURED);
+ Compiler::GetFunctionFromEval(source,
+ context,
+ CLASSIC_MODE,
+ NO_PARSE_RESTRICTION,
+ RelocInfo::kNoPosition);
+ RETURN_IF_EMPTY_HANDLE(isolate, eval_fun);
+
bool pending_exception;
Handle<Object> result = Execution::Call(
isolate, eval_fun, receiver, 0, NULL, &pending_exception);
@@ -13176,7 +13097,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
+ if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) {
return Failure::Exception();
}
func->code()->PrintLn();
@@ -13191,7 +13112,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
+ if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) {
return Failure::Exception();
}
func->shared()->construct_stub()->PrintLn();
@@ -13564,7 +13485,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
CONVERT_ARG_CHECKED(String, arg, 0);
SmartArrayPointer<char> flags =
arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- FlagList::SetFlagsFromString(*flags, StrLength(*flags));
+ FlagList::SetFlagsFromString(flags.get(), StrLength(flags.get()));
return isolate->heap()->undefined_value();
}
@@ -13822,8 +13743,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateDateTimeFormat) {
// Make object handle weak so we can delete the data format once GC kicks in.
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
- NULL,
+ GlobalHandles::MakeWeak(wrapper.location(),
+ reinterpret_cast<void*>(wrapper.location()),
DateFormat::DeleteDateFormat);
return *local_object;
}
@@ -13926,8 +13847,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateNumberFormat) {
NONE));
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
- NULL,
+ GlobalHandles::MakeWeak(wrapper.location(),
+ reinterpret_cast<void*>(wrapper.location()),
NumberFormat::DeleteNumberFormat);
return *local_object;
}
@@ -14038,8 +13959,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCollator) {
NONE));
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
- NULL,
+ GlobalHandles::MakeWeak(wrapper.location(),
+ reinterpret_cast<void*>(wrapper.location()),
Collator::DeleteCollator);
return *local_object;
}
@@ -14073,6 +13994,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalCompare) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringNormalize) {
+ HandleScope scope(isolate);
+ static const UNormalizationMode normalizationForms[] =
+ { UNORM_NFC, UNORM_NFD, UNORM_NFKC, UNORM_NFKD };
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, stringValue, 0);
+ CONVERT_NUMBER_CHECKED(int, form_id, Int32, args[1]);
+
+ v8::String::Value string_value(v8::Utils::ToLocal(stringValue));
+ const UChar* u_value = reinterpret_cast<const UChar*>(*string_value);
+
+ // TODO(mnita): check Normalizer2 (not available in ICU 46)
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString result;
+ icu::Normalizer::normalize(u_value, normalizationForms[form_id], 0,
+ result, status);
+ if (U_FAILURE(status)) {
+ return isolate->heap()->undefined_value();
+ }
+
+ return *isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length()));
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateBreakIterator) {
HandleScope scope(isolate);
@@ -14114,8 +14064,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateBreakIterator) {
// Make object handle weak so we can delete the break iterator once GC kicks
// in.
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
- NULL,
+ GlobalHandles::MakeWeak(wrapper.location(),
+ reinterpret_cast<void*>(wrapper.location()),
BreakIterator::DeleteBreakIterator);
return *local_object;
}
@@ -14325,9 +14275,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 2);
- OS::PrintError("abort: %s\n",
- reinterpret_cast<char*>(args[0]) + args.smi_at(1));
+ ASSERT(args.length() == 1);
+ CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ const char* message = GetBailoutReason(
+ static_cast<BailoutReason>(message_id));
+ OS::PrintError("abort: %s\n", message);
+ isolate->PrintStack(stderr);
+ OS::Abort();
+ UNREACHABLE();
+ return NULL;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AbortJS) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
+ OS::PrintError("abort: %s\n", message->ToCString().get());
isolate->PrintStack(stderr);
OS::Abort();
UNREACHABLE();
@@ -14352,14 +14316,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyContextDisposed) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MigrateInstance) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TryMigrateInstance) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
if (!object->IsJSObject()) return Smi::FromInt(0);
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
if (!js_object->map()->is_deprecated()) return Smi::FromInt(0);
- JSObject::MigrateInstance(js_object);
+ // This call must not cause lazy deopts, because it's called from deferred
+ // code where we can't handle lazy deopts for lack of a suitable bailout
+ // ID. So we just try migration and signal failure if necessary,
+ // which will also trigger a deopt.
+ Handle<Object> result = JSObject::TryMigrateInstance(js_object);
+ if (result.is_null()) return Smi::FromInt(0);
return *object;
}
@@ -14557,22 +14526,24 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(NonStrictArgumentsElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalPixelElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalByteElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedByteElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalShortElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedShortElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalIntElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedIntElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalFloatElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements)
// Properties test sitting with elements tests - not fooling anyone.
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
+#define TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, size) \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasExternal##Type##Elements) { \
+ CONVERT_ARG_CHECKED(JSObject, obj, 0); \
+ return isolate->heap()->ToBoolean(obj->HasExternal##Type##Elements()); \
+ }
+
+TYPED_ARRAYS(TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
+
+#undef TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -14627,14 +14598,31 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetObserverDeliveryPending) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetMicrotaskPending) {
SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_BOOLEAN_ARG_CHECKED(new_state, 0);
+ bool old_state = isolate->microtask_pending();
+ isolate->set_microtask_pending(new_state);
+ return isolate->heap()->ToBoolean(old_state);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RunMicrotasks) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
- isolate->set_observer_delivery_pending(true);
+ Execution::RunMicrotasks(isolate);
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetMicrotaskState) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 0);
+ return isolate->heap()->microtask_state();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
@@ -14673,7 +14661,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) {
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
- ASSERT(object->IsAccessCheckNeeded());
+ ASSERT(object->map()->is_access_check_needed());
Handle<Object> key = args.at<Object>(2);
SaveContext save(isolate);
isolate->set_context(observer->context());
@@ -14698,7 +14686,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) {
static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
Handle<JSFunction> constructor,
- Handle<Object> type_info,
+ Handle<AllocationSite> site,
Arguments* caller_args) {
bool holey = false;
bool can_use_type_feedback = true;
@@ -14720,14 +14708,7 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
JSArray* array;
MaybeObject* maybe_array;
- if (!type_info.is_null() &&
- *type_info != isolate->heap()->undefined_value() &&
- Cell::cast(*type_info)->value()->IsAllocationSite() &&
- can_use_type_feedback) {
- Handle<Cell> cell = Handle<Cell>::cast(type_info);
- Handle<AllocationSite> site = Handle<AllocationSite>(
- AllocationSite::cast(cell->value()), isolate);
- ASSERT(!site->SitePointsToLiteral());
+ if (!site.is_null() && can_use_type_feedback) {
ElementsKind to_kind = site->GetElementsKind();
if (holey && !IsFastHoleyElementsKind(to_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
@@ -14753,8 +14734,17 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0,
DONT_INITIALIZE_ARRAY_ELEMENTS);
if (maybe_array->IsFailure()) return maybe_array;
+ ElementsKind old_kind = array->GetElementsKind();
maybe_array = ArrayConstructInitializeElements(array, caller_args);
if (maybe_array->IsFailure()) return maybe_array;
+ if (!site.is_null() &&
+ (old_kind != array->GetElementsKind() ||
+ !can_use_type_feedback)) {
+ // The arguments passed in caused a transition. This kind of complexity
+ // can't be dealt with in the inlined hydrogen array constructor case.
+ // We must mark the allocationsite as un-inlinable.
+ site->SetDoNotInlineCall();
+ }
return array;
}
@@ -14762,21 +14752,37 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConstructor) {
HandleScope scope(isolate);
// If we get 2 arguments then they are the stub parameters (constructor, type
- // info). If we get 3, then the first one is a pointer to the arguments
- // passed by the caller.
+ // info). If we get 4, then the first one is a pointer to the arguments
+ // passed by the caller, and the last one is the length of the arguments
+ // passed to the caller (redundant, but useful to check on the deoptimizer
+ // with an assert).
Arguments empty_args(0, NULL);
bool no_caller_args = args.length() == 2;
- ASSERT(no_caller_args || args.length() == 3);
+ ASSERT(no_caller_args || args.length() == 4);
int parameters_start = no_caller_args ? 0 : 1;
Arguments* caller_args = no_caller_args
? &empty_args
: reinterpret_cast<Arguments*>(args[0]);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
CONVERT_ARG_HANDLE_CHECKED(Object, type_info, parameters_start + 1);
+#ifdef DEBUG
+ if (!no_caller_args) {
+ CONVERT_SMI_ARG_CHECKED(arg_count, parameters_start + 2);
+ ASSERT(arg_count == caller_args->length());
+ }
+#endif
+
+ Handle<AllocationSite> site;
+ if (!type_info.is_null() &&
+ *type_info != isolate->heap()->null_value() &&
+ *type_info != isolate->heap()->undefined_value()) {
+ site = Handle<AllocationSite>::cast(type_info);
+ ASSERT(!site->SitePointsToLiteral());
+ }
return ArrayConstructorCommon(isolate,
constructor,
- type_info,
+ site,
caller_args);
}
@@ -14785,16 +14791,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalArrayConstructor) {
HandleScope scope(isolate);
Arguments empty_args(0, NULL);
bool no_caller_args = args.length() == 1;
- ASSERT(no_caller_args || args.length() == 2);
+ ASSERT(no_caller_args || args.length() == 3);
int parameters_start = no_caller_args ? 0 : 1;
Arguments* caller_args = no_caller_args
? &empty_args
: reinterpret_cast<Arguments*>(args[0]);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
-
+#ifdef DEBUG
+ if (!no_caller_args) {
+ CONVERT_SMI_ARG_CHECKED(arg_count, parameters_start + 1);
+ ASSERT(arg_count == caller_args->length());
+ }
+#endif
return ArrayConstructorCommon(isolate,
constructor,
- Handle<Object>::null(),
+ Handle<AllocationSite>::null(),
caller_args);
}
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index d67f781e69..6e79dbed30 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -85,10 +85,9 @@ namespace internal {
F(GetConstructorDelegate, 1, 1) \
F(NewArgumentsFast, 3, 1) \
F(NewStrictArgumentsFast, 3, 1) \
- F(LazyCompile, 1, 1) \
- F(LazyRecompile, 1, 1) \
- F(ConcurrentRecompile, 1, 1) \
- F(TryInstallRecompiledCode, 1, 1) \
+ F(CompileUnoptimized, 1, 1) \
+ F(CompileOptimized, 2, 1) \
+ F(TryInstallOptimizedCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyStubFailure, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
@@ -100,17 +99,17 @@ namespace internal {
F(GetOptimizationStatus, -1, 1) \
F(GetOptimizationCount, 1, 1) \
F(UnblockConcurrentRecompilation, 0, 1) \
- F(CompileForOnStackReplacement, 2, 1) \
- F(SetAllocationTimeout, 2, 1) \
+ F(CompileForOnStackReplacement, 1, 1) \
+ F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
F(AllocateInNewSpace, 1, 1) \
- F(AllocateInOldPointerSpace, 1, 1) \
- F(AllocateInOldDataSpace, 1, 1) \
+ F(AllocateInTargetSpace, 2, 1) \
F(SetNativeFlag, 1, 1) \
+ F(SetInlineBuiltinFlag, 1, 1) \
F(StoreArrayLiteralElement, 5, 1) \
F(DebugCallbackSupportsStepping, 1, 1) \
F(DebugPrepareStepInIfStepping, 1, 1) \
F(FlattenString, 1, 1) \
- F(MigrateInstance, 1, 1) \
+ F(TryMigrateInstance, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
F(MaxSmi, 0, 1) \
\
@@ -179,17 +178,14 @@ namespace internal {
F(Math_asin, 1, 1) \
F(Math_atan, 1, 1) \
F(Math_atan2, 2, 1) \
- F(Math_ceil, 1, 1) \
- F(Math_cos, 1, 1) \
F(Math_exp, 1, 1) \
F(Math_floor, 1, 1) \
F(Math_log, 1, 1) \
F(Math_pow, 2, 1) \
F(Math_pow_cfunction, 2, 1) \
F(RoundNumber, 1, 1) \
- F(Math_sin, 1, 1) \
F(Math_sqrt, 1, 1) \
- F(Math_tan, 1, 1) \
+ F(Math_fround, 1, 1) \
\
/* Regular expressions */ \
F(RegExpCompile, 3, 1) \
@@ -278,6 +274,7 @@ namespace internal {
\
/* Eval */ \
F(GlobalReceiver, 1, 1) \
+ F(IsAttachedGlobal, 1, 1) \
F(ResolvePossiblyDirectEval, 5, 2) \
\
F(SetProperty, -1 /* 4 or 5 */, 1) \
@@ -300,8 +297,8 @@ namespace internal {
/* Literals */ \
F(MaterializeRegExpLiteral, 4, 1)\
F(CreateObjectLiteral, 4, 1) \
- F(CreateArrayLiteral, 3, 1) \
- F(CreateArrayLiteralShallow, 3, 1) \
+ F(CreateArrayLiteral, 4, 1) \
+ F(CreateArrayLiteralStubBailout, 3, 1) \
\
/* Harmony generators */ \
F(CreateJSGeneratorObject, 0, 1) \
@@ -312,12 +309,17 @@ namespace internal {
/* ES5 */ \
F(ObjectFreeze, 1, 1) \
\
+ /* Harmony Microtasks */ \
+ F(GetMicrotaskState, 0, 1) \
+ \
/* Harmony modules */ \
F(IsJSModule, 1, 1) \
\
/* Harmony symbols */ \
F(CreateSymbol, 1, 1) \
+ F(CreatePrivateSymbol, 1, 1) \
F(SymbolName, 1, 1) \
+ F(SymbolIsPrivate, 1, 1) \
\
/* Harmony proxies */ \
F(CreateJSProxy, 2, 1) \
@@ -351,10 +353,13 @@ namespace internal {
F(WeakCollectionDelete, 2, 1) \
F(WeakCollectionSet, 3, 1) \
\
+ /* Harmony events */ \
+ F(SetMicrotaskPending, 1, 1) \
+ F(RunMicrotasks, 0, 1) \
+ \
/* Harmony observe */ \
F(IsObserved, 1, 1) \
F(SetIsObserved, 1, 1) \
- F(SetObserverDeliveryPending, 0, 1) \
F(GetObservationState, 0, 1) \
F(ObservationWeakMapCreate, 0, 1) \
F(UnwrapGlobalProxy, 1, 1) \
@@ -406,6 +411,7 @@ namespace internal {
F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \
F(ThrowNotDateError, 0, 1) \
+ F(ThrowMessage, 1, 1) \
F(StackGuard, 0, 1) \
F(Interrupt, 0, 1) \
F(PromoteScheduledException, 0, 1) \
@@ -436,7 +442,8 @@ namespace internal {
F(DebugTrace, 0, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
- F(Abort, 2, 1) \
+ F(Abort, 1, 1) \
+ F(AbortJS, 1, 1) \
/* Logging */ \
F(Log, 2, 1) \
/* ES5 */ \
@@ -459,16 +466,16 @@ namespace internal {
F(HasFastHoleyElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
F(HasNonStrictArgumentsElements, 1, 1) \
- F(HasExternalPixelElements, 1, 1) \
+ F(HasExternalUint8ClampedElements, 1, 1) \
F(HasExternalArrayElements, 1, 1) \
- F(HasExternalByteElements, 1, 1) \
- F(HasExternalUnsignedByteElements, 1, 1) \
- F(HasExternalShortElements, 1, 1) \
- F(HasExternalUnsignedShortElements, 1, 1) \
- F(HasExternalIntElements, 1, 1) \
- F(HasExternalUnsignedIntElements, 1, 1) \
- F(HasExternalFloatElements, 1, 1) \
- F(HasExternalDoubleElements, 1, 1) \
+ F(HasExternalInt8Elements, 1, 1) \
+ F(HasExternalUint8Elements, 1, 1) \
+ F(HasExternalInt16Elements, 1, 1) \
+ F(HasExternalUint16Elements, 1, 1) \
+ F(HasExternalInt32Elements, 1, 1) \
+ F(HasExternalUint32Elements, 1, 1) \
+ F(HasExternalFloat32Elements, 1, 1) \
+ F(HasExternalFloat64Elements, 1, 1) \
F(HasFastProperties, 1, 1) \
F(TransitionElementsKind, 2, 1) \
F(HaveSameMap, 2, 1) \
@@ -566,6 +573,9 @@ namespace internal {
F(CreateCollator, 3, 1) \
F(InternalCompare, 3, 1) \
\
+ /* String.prototype.normalize. */ \
+ F(StringNormalize, 2, 1) \
+ \
/* Break iterator. */ \
F(CreateBreakIterator, 3, 1) \
F(BreakIteratorAdoptText, 2, 1) \
@@ -620,19 +630,15 @@ namespace internal {
F(OneByteSeqStringSetChar, 3, 1) \
F(TwoByteSeqStringSetChar, 3, 1) \
F(ObjectEquals, 2, 1) \
- F(RandomHeapNumber, 0, 1) \
F(IsObject, 1, 1) \
F(IsFunction, 1, 1) \
F(IsUndetectableObject, 1, 1) \
F(IsSpecObject, 1, 1) \
F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
F(MathPow, 2, 1) \
- F(MathSin, 1, 1) \
- F(MathCos, 1, 1) \
- F(MathTan, 1, 1) \
F(MathSqrt, 1, 1) \
F(MathLog, 1, 1) \
- F(IsRegExpEquivalent, 2, 1) \
+ F(IsMinusZero, 1, 1) \
F(HasCachedArrayIndex, 1, 1) \
F(GetCachedArrayIndex, 1, 1) \
F(FastAsciiArrayJoin, 2, 1) \
@@ -778,15 +784,7 @@ class Runtime : public AllStatic {
Handle<Object> object,
uint32_t index);
- MUST_USE_RESULT static MaybeObject* SetObjectProperty(
- Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT static MaybeObject* SetObjectPropertyOrFail(
+ static Handle<Object> SetObjectProperty(
Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
@@ -794,7 +792,7 @@ class Runtime : public AllStatic {
PropertyAttributes attr,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ForceSetObjectProperty(
+ static Handle<Object> ForceSetObjectProperty(
Isolate* isolate,
Handle<JSObject> object,
Handle<Object> key,
@@ -838,6 +836,22 @@ class Runtime : public AllStatic {
Isolate* isolate,
JSArrayBuffer* phantom_array_buffer);
+ enum TypedArrayId {
+ // arrayIds below should be synchromized with typedarray.js natives.
+ ARRAY_ID_UINT8 = 1,
+ ARRAY_ID_INT8 = 2,
+ ARRAY_ID_UINT16 = 3,
+ ARRAY_ID_INT16 = 4,
+ ARRAY_ID_UINT32 = 5,
+ ARRAY_ID_INT32 = 6,
+ ARRAY_ID_FLOAT32 = 7,
+ ARRAY_ID_FLOAT64 = 8,
+ ARRAY_ID_UINT8_CLAMPED = 9
+ };
+
+ static void ArrayIdToTypeAndSize(int array_id,
+ ExternalArrayType *type, size_t *element_size);
+
// Helper functions used stubs.
static void PerformGC(Object* result, Isolate* isolate);
@@ -852,6 +866,9 @@ class Runtime : public AllStatic {
//---------------------------------------------------------------------------
// Constants used by interface to runtime functions.
+class AllocateDoubleAlignFlag: public BitField<bool, 0, 1> {};
+class AllocateTargetSpace: public BitField<AllocationSpace, 1, 3> {};
+
class DeclareGlobalsEvalFlag: public BitField<bool, 0, 1> {};
class DeclareGlobalsNativeFlag: public BitField<bool, 1, 1> {};
class DeclareGlobalsLanguageMode: public BitField<LanguageMode, 2, 2> {};
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index ce11c37079..2a949ae8d1 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -48,7 +48,6 @@ var $Number = global.Number;
var $Function = global.Function;
var $Boolean = global.Boolean;
var $NaN = %GetRootNaN();
-var builtins = this;
// ECMA-262 Section 11.9.3.
function EQUALS(y) {
@@ -361,7 +360,7 @@ function IN(x) {
function INSTANCE_OF(F) {
var V = this;
if (!IS_SPEC_FUNCTION(F)) {
- throw %MakeTypeError('instanceof_function_expected', [V]);
+ throw %MakeTypeError('instanceof_function_expected', [F]);
}
// If V is not an object, return false.
@@ -606,7 +605,9 @@ function SameValue(x, y) {
if (IS_NUMBER(x)) {
if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
// x is +0 and y is -0 or vice versa.
- if (x === 0 && y === 0 && (1 / x) != (1 / y)) return false;
+ if (x === 0 && y === 0 && %_IsMinusZero(x) != %_IsMinusZero(y)) {
+ return false;
+ }
}
return x === y;
}
@@ -663,7 +664,7 @@ function DefaultString(x) {
function ToPositiveInteger(x, rangeErrorName) {
var i = TO_INTEGER(x);
- if (i < 0) throw %MakeRangeError(rangeErrorName);
+ if (i < 0) throw MakeRangeError(rangeErrorName);
return i;
}
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index b56556572e..beecb27582 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -83,7 +83,7 @@ SafepointEntry SafepointTable::FindEntry(Address pc) const {
}
-void SafepointTable::PrintEntry(unsigned index) const {
+void SafepointTable::PrintEntry(unsigned index, FILE* out) const {
disasm::NameConverter converter;
SafepointEntry entry = GetEntry(index);
uint8_t* bits = entry.bits();
@@ -93,25 +93,25 @@ void SafepointTable::PrintEntry(unsigned index) const {
ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
const int first = kNumSafepointRegisters >> kBitsPerByteLog2;
int last = entry_size_ - 1;
- for (int i = first; i < last; i++) PrintBits(bits[i], kBitsPerByte);
+ for (int i = first; i < last; i++) PrintBits(out, bits[i], kBitsPerByte);
int last_bits = code_->stack_slots() - ((last - first) * kBitsPerByte);
- PrintBits(bits[last], last_bits);
+ PrintBits(out, bits[last], last_bits);
// Print the registers (if any).
if (!entry.HasRegisters()) return;
for (int j = 0; j < kNumSafepointRegisters; j++) {
if (entry.HasRegisterAt(j)) {
- PrintF(" | %s", converter.NameOfCPURegister(j));
+ PrintF(out, " | %s", converter.NameOfCPURegister(j));
}
}
}
}
-void SafepointTable::PrintBits(uint8_t byte, int digits) {
+void SafepointTable::PrintBits(FILE* out, uint8_t byte, int digits) {
ASSERT(digits >= 0 && digits <= kBitsPerByte);
for (int i = 0; i < digits; i++) {
- PrintF("%c", ((byte & (1 << i)) == 0) ? '0' : '1');
+ PrintF(out, "%c", ((byte & (1 << i)) == 0) ? '0' : '1');
}
}
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index fc8bf7a411..ea35253ff8 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -126,7 +126,7 @@ class SafepointTable BASE_EMBEDDED {
// Returns the entry for the given pc.
SafepointEntry FindEntry(Address pc) const;
- void PrintEntry(unsigned index) const;
+ void PrintEntry(unsigned index, FILE* out = stdout) const;
private:
static const uint8_t kNoRegisters = 0xFF;
@@ -149,7 +149,7 @@ class SafepointTable BASE_EMBEDDED {
return GetPcOffsetLocation(index) + kPcSize;
}
- static void PrintBits(uint8_t byte, int digits);
+ static void PrintBits(FILE* out, uint8_t byte, int digits);
DisallowHeapAllocation no_allocation_;
Code* code_;
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index 684ef486c7..33f46c7701 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -35,7 +35,10 @@
#include <pthread.h>
#include <signal.h>
#include <sys/time.h>
+
+#if !V8_OS_QNX
#include <sys/syscall.h>
+#endif
#if V8_OS_MACOSX
#include <mach/mach.h>
@@ -45,12 +48,14 @@
&& !V8_OS_OPENBSD
#include <ucontext.h>
#endif
+
#include <unistd.h>
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+ (defined(__arm__) || defined(__aarch64__)) && \
+ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
@@ -93,6 +98,18 @@ typedef struct ucontext {
// Other fields are not used by V8, don't define them here.
} ucontext_t;
+#elif defined(__aarch64__)
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+ uint64_t uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
#elif defined(__mips__)
// MIPS version of sigcontext, for Android bionic.
typedef struct {
@@ -222,13 +239,27 @@ class SimulatorHelper {
}
inline void FillRegisters(RegisterState* state) {
+#if V8_TARGET_ARCH_ARM
state->pc = reinterpret_cast<Address>(simulator_->get_pc());
state->sp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::sp));
-#if V8_TARGET_ARCH_ARM
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::r11));
+#elif V8_TARGET_ARCH_A64
+ if (simulator_->sp() == 0 || simulator_->fp() == 0) {
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. A64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ return;
+ }
+ state->pc = reinterpret_cast<Address>(simulator_->pc());
+ state->sp = reinterpret_cast<Address>(simulator_->sp());
+ state->fp = reinterpret_cast<Address>(simulator_->fp());
#elif V8_TARGET_ARCH_MIPS
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ state->sp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
#endif
@@ -266,7 +297,11 @@ class SignalHandler : public AllStatic {
struct sigaction sa;
sa.sa_sigaction = &HandleProfilerSignal;
sigemptyset(&sa.sa_mask);
+#if V8_OS_QNX
+ sa.sa_flags = SA_SIGINFO;
+#else
sa.sa_flags = SA_RESTART | SA_SIGINFO;
+#endif
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
@@ -321,6 +356,11 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
SimulatorHelper helper;
if (!helper.Init(sampler, isolate)) return;
helper.FillRegisters(&state);
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. A64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ if (state.sp == 0 || state.fp == 0) return;
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
@@ -350,6 +390,11 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
// (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+#elif V8_HOST_ARCH_A64
+ state.pc = reinterpret_cast<Address>(mcontext.pc);
+ state.sp = reinterpret_cast<Address>(mcontext.sp);
+ // FP is an alias for x29.
+ state.fp = reinterpret_cast<Address>(mcontext.regs[29]);
#elif V8_HOST_ARCH_MIPS
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
@@ -415,7 +460,17 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
-#endif // V8_OS_SOLARIS
+#elif V8_OS_QNX
+#if V8_HOST_ARCH_IA32
+ state.pc = reinterpret_cast<Address>(mcontext.cpu.eip);
+ state.sp = reinterpret_cast<Address>(mcontext.cpu.esp);
+ state.fp = reinterpret_cast<Address>(mcontext.cpu.ebp);
+#elif V8_HOST_ARCH_ARM
+ state.pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]);
+ state.sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]);
+ state.fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]);
+#endif // V8_HOST_ARCH_*
+#endif // V8_OS_QNX
#endif // USE_SIMULATOR
sampler->SampleStack(state);
#endif // V8_OS_NACL
diff --git a/deps/v8/src/scanner-character-streams.cc b/deps/v8/src/scanner-character-streams.cc
index fb503459f7..cbef3f95bb 100644
--- a/deps/v8/src/scanner-character-streams.cc
+++ b/deps/v8/src/scanner-character-streams.cc
@@ -213,11 +213,11 @@ unsigned Utf8ToUtf16CharacterStream::FillBuffer(unsigned char_position,
static const byte kUtf8MultiByteMask = 0xC0;
-static const byte kUtf8MultiByteCharStart = 0xC0;
static const byte kUtf8MultiByteCharFollower = 0x80;
#ifdef DEBUG
+static const byte kUtf8MultiByteCharStart = 0xC0;
static bool IsUtf8MultiCharacterStart(byte first_byte) {
return (first_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharStart;
}
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 26f840b23a..27768547fb 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -246,7 +246,8 @@ Token::Value Scanner::Next() {
}
-static inline bool IsByteOrderMark(uc32 c) {
+// TODO(yangguo): check whether this is actually necessary.
+static inline bool IsLittleEndianByteOrderMark(uc32 c) {
// The Unicode value U+FFFE is guaranteed never to be assigned as a
// Unicode character; this implies that in a Unicode context the
// 0xFF, 0xFE byte pattern can only be interpreted as the U+FEFF
@@ -254,7 +255,7 @@ static inline bool IsByteOrderMark(uc32 c) {
// not be a U+FFFE character expressed in big-endian byte
// order). Nevertheless, we check for it to be compatible with
// Spidermonkey.
- return c == 0xFEFF || c == 0xFFFE;
+ return c == 0xFFFE;
}
@@ -262,14 +263,14 @@ bool Scanner::SkipWhiteSpace() {
int start_position = source_pos();
while (true) {
- // We treat byte-order marks (BOMs) as whitespace for better
- // compatibility with Spidermonkey and other JavaScript engines.
- while (unicode_cache_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
- // IsWhiteSpace() includes line terminators!
+ while (true) {
+ // Advance as long as character is a WhiteSpace or LineTerminator.
+ // Remember if the latter is the case.
if (unicode_cache_->IsLineTerminator(c0_)) {
- // Ignore line terminators, but remember them. This is necessary
- // for automatic semicolon insertion.
has_line_terminator_before_next_ = true;
+ } else if (!unicode_cache_->IsWhiteSpace(c0_) &&
+ !IsLittleEndianByteOrderMark(c0_)) {
+ break;
}
Advance();
}
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 3cefc833ac..b08692b3ae 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -139,12 +139,17 @@ class UnicodeCache {
bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
+ bool IsWhiteSpaceOrLineTerminator(unibrow::uchar c) {
+ return kIsWhiteSpaceOrLineTerminator.get(c);
+ }
private:
unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
- unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
+ unibrow::Predicate<WhiteSpace, 128> kIsWhiteSpace;
+ unibrow::Predicate<WhiteSpaceOrLineTerminator, 128>
+ kIsWhiteSpaceOrLineTerminator;
StaticResource<Utf8Decoder> utf8_decoder_;
DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index f1ae876ca3..03e69bf384 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -32,8 +32,6 @@
#include "scopeinfo.h"
#include "scopes.h"
-#include "allocation-inl.h"
-
namespace v8 {
namespace internal {
@@ -374,15 +372,14 @@ bool ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
int end = start + local_count;
for (int i = start; i < end; ++i) {
int context_index = Context::MIN_CONTEXT_SLOTS + i - start;
- RETURN_IF_EMPTY_HANDLE_VALUE(
+ Handle<Object> result = Runtime::SetObjectProperty(
isolate,
- SetProperty(isolate,
- scope_object,
- Handle<String>(String::cast(scope_info->get(i))),
- Handle<Object>(context->get(context_index), isolate),
- ::NONE,
- kNonStrictMode),
- false);
+ scope_object,
+ Handle<String>(String::cast(scope_info->get(i))),
+ Handle<Object>(context->get(context_index), isolate),
+ ::NONE,
+ kNonStrictMode);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, false);
}
return true;
}
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index ee327fb79f..650f57c616 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -35,8 +35,6 @@
#include "messages.h"
#include "scopeinfo.h"
-#include "allocation-inl.h"
-
namespace v8 {
namespace internal {
@@ -292,8 +290,7 @@ bool Scope::Analyze(CompilationInfo* info) {
// Allocate the variables.
{
- AstNodeFactory<AstNullVisitor> ast_node_factory(info->isolate(),
- info->zone());
+ AstNodeFactory<AstNullVisitor> ast_node_factory(info->zone());
if (!top->AllocateVariables(info, &ast_node_factory)) return false;
}
@@ -310,7 +307,7 @@ bool Scope::Analyze(CompilationInfo* info) {
}
#endif
- info->SetScope(scope);
+ info->PrepareForCompilation(scope);
return true;
}
@@ -802,7 +799,7 @@ static void Indent(int n, const char* str) {
static void PrintName(Handle<String> name) {
SmartArrayPointer<char> s = name->ToCString(DISALLOW_NULLS);
- PrintF("%s", *s);
+ PrintF("%s", s.get());
}
@@ -1302,7 +1299,7 @@ void Scope::AllocateParameterLocals() {
void Scope::AllocateNonParameterLocal(Variable* var) {
ASSERT(var->scope() == this);
- ASSERT(!var->IsVariable(isolate_->factory()->result_string()) ||
+ ASSERT(!var->IsVariable(isolate_->factory()->dot_result_string()) ||
!var->IsStackLocal());
if (var->IsUnallocated() && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 7ed36665e2..14b1b9cc96 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -297,15 +297,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
RUNTIME_ENTRY,
1,
"Runtime::PerformGC");
- Add(ExternalReference::fill_heap_number_with_random_function(
- isolate).address(),
- RUNTIME_ENTRY,
- 2,
- "V8::FillHeapNumberWithRandom");
- Add(ExternalReference::random_uint32_function(isolate).address(),
- RUNTIME_ENTRY,
- 3,
- "V8::Random");
Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
RUNTIME_ENTRY,
4,
@@ -325,8 +316,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
7,
"IncrementalMarking::RecordWrite");
-
-
// Miscellaneous
Add(ExternalReference::roots_array_start(isolate).address(),
UNCLASSIFIED,
@@ -389,30 +378,10 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
17,
"Debug::step_in_fp_addr()");
#endif
- Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(),
- UNCLASSIFIED,
- 18,
- "add_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(),
- UNCLASSIFIED,
- 19,
- "sub_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(),
- UNCLASSIFIED,
- 20,
- "mul_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(),
- UNCLASSIFIED,
- 21,
- "div_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(),
+ Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
UNCLASSIFIED,
22,
"mod_two_doubles");
- Add(ExternalReference::compare_doubles(isolate).address(),
- UNCLASSIFIED,
- 23,
- "compare_doubles");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
UNCLASSIFIED,
@@ -440,10 +409,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
29,
"KeyedLookupCache::field_offsets()");
- Add(ExternalReference::transcendental_cache_array_address(isolate).address(),
- UNCLASSIFIED,
- 30,
- "TranscendentalCache::caches()");
Add(ExternalReference::handle_scope_next_address(isolate).address(),
UNCLASSIFIED,
31,
@@ -536,20 +501,20 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
53,
"Runtime::AllocateInNewSpace");
+ Add(ExternalReference(Runtime::kAllocateInTargetSpace, isolate).address(),
+ UNCLASSIFIED,
+ 54,
+ "Runtime::AllocateInTargetSpace");
Add(ExternalReference::old_pointer_space_allocation_top_address(
isolate).address(),
UNCLASSIFIED,
- 54,
+ 55,
"Heap::OldPointerSpaceAllocationTopAddress");
Add(ExternalReference::old_pointer_space_allocation_limit_address(
isolate).address(),
UNCLASSIFIED,
- 55,
- "Heap::OldPointerSpaceAllocationLimitAddress");
- Add(ExternalReference(Runtime::kAllocateInOldPointerSpace, isolate).address(),
- UNCLASSIFIED,
56,
- "Runtime::AllocateInOldPointerSpace");
+ "Heap::OldPointerSpaceAllocationLimitAddress");
Add(ExternalReference::old_data_space_allocation_top_address(
isolate).address(),
UNCLASSIFIED,
@@ -560,30 +525,22 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
58,
"Heap::OldDataSpaceAllocationLimitAddress");
- Add(ExternalReference(Runtime::kAllocateInOldDataSpace, isolate).address(),
- UNCLASSIFIED,
- 59,
- "Runtime::AllocateInOldDataSpace");
Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate).
address(),
UNCLASSIFIED,
- 60,
+ 59,
"Heap::NewSpaceAllocationLimitAddress");
Add(ExternalReference::allocation_sites_list_address(isolate).address(),
UNCLASSIFIED,
- 61,
+ 60,
"Heap::allocation_sites_list_address()");
- Add(ExternalReference::record_object_allocation_function(isolate).address(),
- UNCLASSIFIED,
- 62,
- "HeapProfiler::RecordObjectAllocationFromMasm");
Add(ExternalReference::address_of_uint32_bias().address(),
UNCLASSIFIED,
- 63,
+ 61,
"uint32_bias");
Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
UNCLASSIFIED,
- 64,
+ 62,
"Code::MarkCodeAsExecuted");
// Add a small set of deopt entry addresses to encoder without generating the
@@ -595,7 +552,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
entry,
Deoptimizer::LAZY,
Deoptimizer::CALCULATE_ENTRY_ADDRESS);
- Add(address, LAZY_DEOPTIMIZATION, 64 + entry, "lazy_deopt");
+ Add(address, LAZY_DEOPTIMIZATION, entry, "lazy_deopt");
}
}
@@ -813,6 +770,15 @@ Deserializer::Deserializer(SnapshotByteSource* source)
}
+void Deserializer::FlushICacheForNewCodeObjects() {
+ PageIterator it(isolate_->heap()->code_space());
+ while (it.has_next()) {
+ Page* p = it.next();
+ CPU::FlushICache(p->area_start(), p->area_end() - p->area_start());
+ }
+}
+
+
void Deserializer::Deserialize(Isolate* isolate) {
isolate_ = isolate;
ASSERT(isolate_ != NULL);
@@ -823,6 +789,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
+ isolate_->heap()->IterateSmiRoots(this);
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate_->heap()->RepairFreeListsAfterBoot();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
@@ -849,6 +816,8 @@ void Deserializer::Deserialize(Isolate* isolate) {
}
}
+ FlushICacheForNewCodeObjects();
+
// Issue code events for newly deserialized code objects.
LOG_CODE_EVENT(isolate_, LogCodeObjects());
LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
@@ -1285,7 +1254,6 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
: isolate_(isolate),
sink_(sink),
- current_root_index_(0),
external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
root_index_wave_front_(0) {
// The serializer is meant to be used only to generate initial heap images
@@ -1311,7 +1279,7 @@ void StartupSerializer::SerializeStrongReferences() {
CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
// We don't support serializing installed extensions.
CHECK(!isolate->has_installed_extensions());
-
+ isolate->heap()->IterateSmiRoots(this);
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
}
@@ -1668,86 +1636,71 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
- Object** current = rinfo->target_object_address();
-
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
- HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
- serializer_->SerializeObject(*current, representation, kStartOfObject, skip);
+ HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+ Object* object = rinfo->target_object();
+ serializer_->SerializeObject(object, how_to_code, kStartOfObject, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
- Address references_start = reinterpret_cast<Address>(p);
- int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
-
+ int skip = OutputRawData(reinterpret_cast<Address>(p),
+ kCanReturnSkipInsteadOfSkipping);
sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
sink_->PutInt(skip, "SkipB4ExternalRef");
- int reference_id = serializer_->EncodeExternalReference(*p);
- sink_->PutInt(reference_id, "reference id");
+ Address target = *p;
+ sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
bytes_processed_so_far_ += kPointerSize;
}
void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
- Address references_start = rinfo->target_address_address();
- int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
-
- Address* current = rinfo->target_reference_address();
- int representation = rinfo->IsCodedSpecially() ?
- kFromCode + kStartOfObject : kPlain + kStartOfObject;
- sink_->Put(kExternalReference + representation, "ExternalRef");
+ int skip = OutputRawData(rinfo->target_address_address(),
+ kCanReturnSkipInsteadOfSkipping);
+ HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+ sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
sink_->PutInt(skip, "SkipB4ExternalRef");
- int reference_id = serializer_->EncodeExternalReference(*current);
- sink_->PutInt(reference_id, "reference id");
+ Address target = rinfo->target_reference();
+ sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
- Address target_start = rinfo->target_address_address();
- int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping);
- Address target = rinfo->target_address();
- uint32_t encoding = serializer_->EncodeExternalReference(target);
- CHECK(target == NULL ? encoding == 0 : encoding != 0);
- int representation;
- // Can't use a ternary operator because of gcc.
- if (rinfo->IsCodedSpecially()) {
- representation = kStartOfObject + kFromCode;
- } else {
- representation = kStartOfObject + kPlain;
- }
- sink_->Put(kExternalReference + representation, "ExternalReference");
+ int skip = OutputRawData(rinfo->target_address_address(),
+ kCanReturnSkipInsteadOfSkipping);
+ HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+ sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
sink_->PutInt(skip, "SkipB4ExternalRef");
- sink_->PutInt(encoding, "reference id");
+ Address target = rinfo->target_address();
+ sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
- CHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Address target_start = rinfo->target_address_address();
- int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping);
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- serializer_->SerializeObject(target, kFromCode, kInnerPointer, skip);
+ int skip = OutputRawData(rinfo->target_address_address(),
+ kCanReturnSkipInsteadOfSkipping);
+ Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
- Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
- serializer_->SerializeObject(target, kPlain, kInnerPointer, skip);
+ Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
bytes_processed_so_far_ += kPointerSize;
}
void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::CELL);
- Cell* cell = Cell::cast(rinfo->target_cell());
int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
- serializer_->SerializeObject(cell, kPlain, kInnerPointer, skip);
+ Cell* object = Cell::cast(rinfo->target_cell());
+ serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
}
@@ -1776,10 +1729,29 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString(
}
+static Code* CloneCodeObject(HeapObject* code) {
+ Address copy = new byte[code->Size()];
+ OS::MemCopy(copy, code->address(), code->Size());
+ return Code::cast(HeapObject::FromAddress(copy));
+}
+
+
+static void WipeOutRelocations(Code* code) {
+ int mode_mask =
+ RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+ it.rinfo()->WipeOut();
+ }
+}
+
+
int Serializer::ObjectSerializer::OutputRawData(
Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
Address object_start = object_->address();
- Address base = object_start + bytes_processed_so_far_;
+ int base = bytes_processed_so_far_;
int up_to_offset = static_cast<int>(up_to - object_start);
int to_skip = up_to_offset - bytes_processed_so_far_;
int bytes_to_output = to_skip;
@@ -1809,10 +1781,22 @@ int Serializer::ObjectSerializer::OutputRawData(
sink_->Put(kRawData, "RawData");
sink_->PutInt(bytes_to_output, "length");
}
+
+ // To make snapshots reproducible, we need to wipe out all pointers in code.
+ if (code_object_) {
+ Code* code = CloneCodeObject(object_);
+ WipeOutRelocations(code);
+ // We need to wipe out the header fields *after* wiping out the
+ // relocations, because some of these fields are needed for the latter.
+ code->WipeOutHeader();
+ object_start = code->address();
+ }
+
+ const char* description = code_object_ ? "Code" : "Byte";
for (int i = 0; i < bytes_to_output; i++) {
- unsigned int data = base[i];
- sink_->PutSection(data, "Byte");
+ sink_->PutSection(object_start[base + i], description);
}
+ if (code_object_) delete[] object_start;
}
if (to_skip != 0 && return_skip == kIgnoringReturn) {
sink_->Put(kSkip, "Skip");
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 47627ac2dd..2ad9bb17ed 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -364,7 +364,7 @@ class Deserializer: public SerializerDeserializer {
high_water_[space_index] = address + size;
HeapProfiler* profiler = isolate_->heap_profiler();
if (profiler->is_tracking_allocations()) {
- profiler->NewObjectEvent(address, size);
+ profiler->AllocationEvent(address, size);
}
return address;
}
@@ -377,6 +377,7 @@ class Deserializer: public SerializerDeserializer {
return HeapObject::FromAddress(high_water_[space] - offset);
}
+ void FlushICacheForNewCodeObjects();
// Cached current isolate.
Isolate* isolate_;
@@ -578,7 +579,6 @@ class Serializer : public SerializerDeserializer {
// relative addresses for back references.
int fullness_[LAST_SPACE + 1];
SnapshotByteSink* sink_;
- int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
diff --git a/deps/v8/src/simulator.h b/deps/v8/src/simulator.h
index 485e930645..c16e7fffdc 100644
--- a/deps/v8/src/simulator.h
+++ b/deps/v8/src/simulator.h
@@ -32,6 +32,8 @@
#include "ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/simulator-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/simulator-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/smart-pointers.h b/deps/v8/src/smart-pointers.h
index 7c35b2aff2..7203c16ba9 100644
--- a/deps/v8/src/smart-pointers.h
+++ b/deps/v8/src/smart-pointers.h
@@ -36,35 +36,31 @@ template<typename Deallocator, typename T>
class SmartPointerBase {
public:
// Default constructor. Constructs an empty scoped pointer.
- inline SmartPointerBase() : p_(NULL) {}
+ SmartPointerBase() : p_(NULL) {}
// Constructs a scoped pointer from a plain one.
- explicit inline SmartPointerBase(T* ptr) : p_(ptr) {}
+ explicit SmartPointerBase(T* ptr) : p_(ptr) {}
// Copy constructor removes the pointer from the original to avoid double
// freeing.
- inline SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs)
+ SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs)
: p_(rhs.p_) {
const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
}
- // When the destructor of the scoped pointer is executed the plain pointer
- // is deleted using DeleteArray. This implies that you must allocate with
- // NewArray.
- inline ~SmartPointerBase() { if (p_) Deallocator::Delete(p_); }
+ T* operator->() const { return p_; }
- inline T* operator->() const { return p_; }
+ T& operator*() const { return *p_; }
- // You can get the underlying pointer out with the * operator.
- inline T* operator*() { return p_; }
+ T* get() const { return p_; }
// You can use [n] to index as if it was a plain pointer.
- inline T& operator[](size_t i) {
+ T& operator[](size_t i) {
return p_[i];
}
// You can use [n] to index as if it was a plain pointer.
- const inline T& operator[](size_t i) const {
+ const T& operator[](size_t i) const {
return p_[i];
}
@@ -76,13 +72,14 @@ class SmartPointerBase {
// If you want to take out the plain pointer and don't want it automatically
// deleted then call Detach(). Afterwards, the smart pointer is empty
// (NULL).
- inline T* Detach() {
+ T* Detach() {
T* temp = p_;
p_ = NULL;
return temp;
}
- inline void Reset(T* new_value) {
+ void Reset(T* new_value) {
+ ASSERT(p_ == NULL || p_ != new_value);
if (p_) Deallocator::Delete(p_);
p_ = new_value;
}
@@ -90,7 +87,7 @@ class SmartPointerBase {
// Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
// the copy constructor it removes the pointer in the original to avoid
// double freeing.
- inline SmartPointerBase<Deallocator, T>& operator=(
+ SmartPointerBase<Deallocator, T>& operator=(
const SmartPointerBase<Deallocator, T>& rhs) {
ASSERT(is_empty());
T* tmp = rhs.p_; // swap to handle self-assignment
@@ -99,7 +96,13 @@ class SmartPointerBase {
return *this;
}
- inline bool is_empty() { return p_ == NULL; }
+ bool is_empty() const { return p_ == NULL; }
+
+ protected:
+ // When the destructor of the scoped pointer is executed the plain pointer
+ // is deleted using DeleteArray. This implies that you must allocate with
+ // NewArray.
+ ~SmartPointerBase() { if (p_) Deallocator::Delete(p_); }
private:
T* p_;
@@ -119,10 +122,10 @@ struct ArrayDeallocator {
template<typename T>
class SmartArrayPointer: public SmartPointerBase<ArrayDeallocator<T>, T> {
public:
- inline SmartArrayPointer() { }
- explicit inline SmartArrayPointer(T* ptr)
+ SmartArrayPointer() { }
+ explicit SmartArrayPointer(T* ptr)
: SmartPointerBase<ArrayDeallocator<T>, T>(ptr) { }
- inline SmartArrayPointer(const SmartArrayPointer<T>& rhs)
+ SmartArrayPointer(const SmartArrayPointer<T>& rhs)
: SmartPointerBase<ArrayDeallocator<T>, T>(rhs) { }
};
@@ -138,10 +141,10 @@ struct ObjectDeallocator {
template<typename T>
class SmartPointer: public SmartPointerBase<ObjectDeallocator<T>, T> {
public:
- inline SmartPointer() { }
- explicit inline SmartPointer(T* ptr)
+ SmartPointer() { }
+ explicit SmartPointer(T* ptr)
: SmartPointerBase<ObjectDeallocator<T>, T>(ptr) { }
- inline SmartPointer(const SmartPointer<T>& rhs)
+ SmartPointer(const SmartPointer<T>& rhs)
: SmartPointerBase<ObjectDeallocator<T>, T>(rhs) { }
};
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index d5c114c5b0..778cb18d72 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -165,7 +165,7 @@ Page* Page::Initialize(Heap* heap,
Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
- ASSERT(page->area_size() <= kNonCodeObjectAreaSize);
+ ASSERT(page->area_size() <= kMaxRegularHeapObjectSize);
ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
@@ -274,18 +274,12 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
// Raw allocation.
-MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes,
- AllocationType event) {
- HeapProfiler* profiler = heap()->isolate()->heap_profiler();
-
+MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
- if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
- profiler->NewObjectEvent(object->address(), size_in_bytes);
- }
return object;
}
@@ -298,9 +292,6 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes,
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
- if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
- profiler->NewObjectEvent(object->address(), size_in_bytes);
- }
return object;
}
@@ -309,9 +300,6 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes,
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
- if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
- profiler->NewObjectEvent(object->address(), size_in_bytes);
- }
return object;
}
@@ -349,11 +337,6 @@ MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- HeapProfiler* profiler = heap()->isolate()->heap_profiler();
- if (profiler != NULL && profiler->is_tracking_allocations()) {
- profiler->NewObjectEvent(obj->address(), size_in_bytes);
- }
-
return obj;
}
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index fe5eeb5e43..6c03daa75a 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -483,7 +483,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
- chunk->parallel_sweeping_ = 0;
+ chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE);
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
chunk->available_in_large_free_list_ = 0;
@@ -560,21 +560,12 @@ bool MemoryChunk::CommitArea(size_t requested) {
void MemoryChunk::InsertAfter(MemoryChunk* other) {
- next_chunk_ = other->next_chunk_;
- prev_chunk_ = other;
+ MemoryChunk* other_next = other->next_chunk();
- // This memory barrier is needed since concurrent sweeper threads may iterate
- // over the list of pages while a new page is inserted.
- // TODO(hpayer): find a cleaner way to guarantee that the page list can be
- // expanded concurrently
- MemoryBarrier();
-
- // The following two write operations can take effect in arbitrary order
- // since pages are always iterated by the sweeper threads in LIFO order, i.e,
- // the inserted page becomes visible for the sweeper threads after
- // other->next_chunk_ = this;
- other->next_chunk_->prev_chunk_ = this;
- other->next_chunk_ = this;
+ set_next_chunk(other_next);
+ set_prev_chunk(other);
+ other_next->set_prev_chunk(this);
+ other->set_next_chunk(this);
}
@@ -583,10 +574,12 @@ void MemoryChunk::Unlink() {
heap_->decrement_scan_on_scavenge_pages();
ClearFlag(SCAN_ON_SCAVENGE);
}
- next_chunk_->prev_chunk_ = prev_chunk_;
- prev_chunk_->next_chunk_ = next_chunk_;
- prev_chunk_ = NULL;
- next_chunk_ = NULL;
+ MemoryChunk* next_element = next_chunk();
+ MemoryChunk* prev_element = prev_chunk();
+ next_element->set_prev_chunk(prev_element);
+ prev_element->set_next_chunk(next_element);
+ set_prev_chunk(NULL);
+ set_next_chunk(NULL);
}
@@ -1079,12 +1072,7 @@ intptr_t PagedSpace::SizeOfFirstPage() {
// upgraded to handle small pages.
size = AreaSize();
} else {
-#if V8_TARGET_ARCH_MIPS
- // TODO(plind): Investigate larger code stubs size on MIPS.
size = 480 * KB;
-#else
- size = 416 * KB;
-#endif
}
break;
default:
@@ -1122,6 +1110,11 @@ void PagedSpace::ResetFreeListStatistics() {
}
+void PagedSpace::IncreaseCapacity(int size) {
+ accounting_stats_.ExpandSpace(size);
+}
+
+
void PagedSpace::ReleasePage(Page* page, bool unlink) {
ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size());
@@ -1142,6 +1135,11 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) {
DecreaseUnsweptFreeBytes(page);
}
+ // TODO(hpayer): This check is just used for debugging purpose and
+ // should be removed or turned into an assert after investigating the
+ // crash in concurrent sweeping.
+ CHECK(!free_list_.ContainsPageFreeListItems(page));
+
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
allocation_info_.set_top(NULL);
allocation_info_.set_limit(NULL);
@@ -1345,7 +1343,6 @@ void NewSpace::Shrink() {
}
}
}
- allocation_info_.set_limit(to_space_.page_high());
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -1354,14 +1351,7 @@ void NewSpace::UpdateAllocationInfo() {
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_top(to_space_.page_low());
allocation_info_.set_limit(to_space_.page_high());
-
- // Lower limit during incremental marking.
- if (heap()->incremental_marking()->IsMarking() &&
- inline_allocation_limit_step() != 0) {
- Address new_limit =
- allocation_info_.top() + inline_allocation_limit_step();
- allocation_info_.set_limit(Min(new_limit, allocation_info_.limit()));
- }
+ UpdateInlineAllocationLimit(0);
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -1378,6 +1368,26 @@ void NewSpace::ResetAllocationInfo() {
}
+void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
+ if (heap()->inline_allocation_disabled()) {
+ // Lowest limit when linear allocation was disabled.
+ Address high = to_space_.page_high();
+ Address new_top = allocation_info_.top() + size_in_bytes;
+ allocation_info_.set_limit(Min(new_top, high));
+ } else if (inline_allocation_limit_step() == 0) {
+ // Normal limit is the end of the current page.
+ allocation_info_.set_limit(to_space_.page_high());
+ } else {
+ // Lower limit during incremental marking.
+ Address high = to_space_.page_high();
+ Address new_top = allocation_info_.top() + size_in_bytes;
+ Address new_limit = new_top + inline_allocation_limit_step_;
+ allocation_info_.set_limit(Min(new_limit, high));
+ }
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
if (NewSpacePage::IsAtStart(top)) {
@@ -1412,18 +1422,16 @@ bool NewSpace::AddFreshPage() {
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top();
- Address new_top = old_top + size_in_bytes;
Address high = to_space_.page_high();
if (allocation_info_.limit() < high) {
- // Incremental marking has lowered the limit to get a
- // chance to do a step.
- Address new_limit = Min(
- allocation_info_.limit() + inline_allocation_limit_step_,
- high);
- allocation_info_.set_limit(new_limit);
+ // Either the limit has been lowered because linear allocation was disabled
+ // or because incremental marking wants to get a chance to do a step. Set
+ // the new limit accordingly.
+ Address new_top = old_top + size_in_bytes;
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
+ UpdateInlineAllocationLimit(size_in_bytes);
top_on_previous_step_ = new_top;
return AllocateRaw(size_in_bytes);
} else if (AddFreshPage()) {
@@ -1511,6 +1519,7 @@ void SemiSpace::SetUp(Address start,
initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
capacity_ = initial_capacity;
maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
+ maximum_committed_ = 0;
committed_ = false;
start_ = start;
address_mask_ = ~(maximum_capacity - 1);
@@ -1543,6 +1552,7 @@ bool SemiSpace::Commit() {
current = new_page;
}
+ SetCapacity(capacity_);
committed_ = true;
Reset();
return true;
@@ -1591,7 +1601,7 @@ bool SemiSpace::GrowTo(int new_capacity) {
start_ + capacity_, delta, executable())) {
return false;
}
- capacity_ = new_capacity;
+ SetCapacity(new_capacity);
NewSpacePage* last_page = anchor()->prev_page();
ASSERT(last_page != anchor());
for (int i = pages_before; i < pages_after; i++) {
@@ -1631,7 +1641,7 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page));
}
- capacity_ = new_capacity;
+ SetCapacity(new_capacity);
return true;
}
@@ -1694,6 +1704,14 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
}
+void SemiSpace::SetCapacity(int new_capacity) {
+ capacity_ = new_capacity;
+ if (capacity_ > maximum_committed_) {
+ maximum_committed_ = capacity_;
+ }
+}
+
+
void SemiSpace::set_age_mark(Address mark) {
ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
age_mark_ = mark;
@@ -2057,20 +2075,21 @@ void FreeListNode::set_next(FreeListNode* next) {
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
- if (category->top_ != NULL) {
- ASSERT(category->end_ != NULL);
+ if (category->top() != NULL) {
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order.
LockGuard<Mutex> target_lock_guard(mutex());
LockGuard<Mutex> source_lock_guard(category->mutex());
+ ASSERT(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
end_ = category->end();
} else {
- category->end()->set_next(top_);
+ category->end()->set_next(top());
}
- top_ = category->top();
+ set_top(category->top());
+ NoBarrier_Store(&top_, category->top_);
available_ += category->available();
category->Reset();
}
@@ -2079,15 +2098,16 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
void FreeListCategory::Reset() {
- top_ = NULL;
- end_ = NULL;
- available_ = 0;
+ set_top(NULL);
+ set_end(NULL);
+ set_available(0);
}
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
int sum = 0;
- FreeListNode** n = &top_;
+ FreeListNode* t = top();
+ FreeListNode** n = &t;
while (*n != NULL) {
if (Page::FromAddress((*n)->address()) == p) {
FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
@@ -2097,16 +2117,27 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
n = (*n)->next_address();
}
}
- if (top_ == NULL) {
- end_ = NULL;
+ set_top(t);
+ if (top() == NULL) {
+ set_end(NULL);
}
available_ -= sum;
return sum;
}
+bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
+ FreeListNode* node = top();
+ while (node != NULL) {
+ if (Page::FromAddress(node->address()) == p) return true;
+ node = node->next();
+ }
+ return false;
+}
+
+
FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
- FreeListNode* node = top_;
+ FreeListNode* node = top();
if (node == NULL) return NULL;
@@ -2145,8 +2176,8 @@ FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
- node->set_next(top_);
- top_ = node;
+ node->set_next(top());
+ set_top(node);
if (end_ == NULL) {
end_ = node;
}
@@ -2155,7 +2186,7 @@ void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
void FreeListCategory::RepairFreeList(Heap* heap) {
- FreeListNode* n = top_;
+ FreeListNode* n = top();
while (n != NULL) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == NULL) {
@@ -2264,7 +2295,8 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
int huge_list_available = huge_list_.available();
- for (FreeListNode** cur = huge_list_.GetTopAddress();
+ FreeListNode* top_node = huge_list_.top();
+ for (FreeListNode** cur = &top_node;
*cur != NULL;
cur = (*cur)->next_address()) {
FreeListNode* cur_node = *cur;
@@ -2298,6 +2330,7 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
}
+ huge_list_.set_top(top_node);
if (huge_list_.top() == NULL) {
huge_list_.set_end(NULL);
}
@@ -2359,7 +2392,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
int new_node_size = 0;
FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) {
- owner_->SetTop(NULL, NULL);
+ owner_->SetTopAndLimit(NULL, NULL);
return NULL;
}
@@ -2384,26 +2417,31 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// a little of this again immediately - see below.
owner_->Allocate(new_node_size);
- if (bytes_left > kThreshold &&
- owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
- FLAG_incremental_marking_steps) {
+ if (owner_->heap()->inline_allocation_disabled()) {
+ // Keep the linear allocation area empty if requested to do so, just
+ // return area back to the free list instead.
+ owner_->Free(new_node->address() + size_in_bytes, bytes_left);
+ ASSERT(owner_->top() == NULL && owner_->limit() == NULL);
+ } else if (bytes_left > kThreshold &&
+ owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
+ FLAG_incremental_marking_steps) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up.
owner_->Free(new_node->address() + size_in_bytes + linear_size,
new_node_size - size_in_bytes - linear_size);
- owner_->SetTop(new_node->address() + size_in_bytes,
- new_node->address() + size_in_bytes + linear_size);
+ owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+ new_node->address() + size_in_bytes + linear_size);
} else if (bytes_left > 0) {
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
- owner_->SetTop(new_node->address() + size_in_bytes,
- new_node->address() + new_node_size);
+ owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+ new_node->address() + new_node_size);
} else {
// TODO(gc) Try not freeing linear allocation region when bytes_left
// are zero.
- owner_->SetTop(NULL, NULL);
+ owner_->SetTopAndLimit(NULL, NULL);
}
return new_node;
@@ -2427,6 +2465,14 @@ intptr_t FreeList::EvictFreeListItems(Page* p) {
}
+bool FreeList::ContainsPageFreeListItems(Page* p) {
+ return huge_list_.EvictFreeListItemsInList(p) ||
+ small_list_.EvictFreeListItemsInList(p) ||
+ medium_list_.EvictFreeListItemsInList(p) ||
+ large_list_.EvictFreeListItemsInList(p);
+}
+
+
void FreeList::RepairLists(Heap* heap) {
small_list_.RepairFreeList(heap);
medium_list_.RepairFreeList(heap);
@@ -2438,7 +2484,7 @@ void FreeList::RepairLists(Heap* heap) {
#ifdef DEBUG
intptr_t FreeListCategory::SumFreeList() {
intptr_t sum = 0;
- FreeListNode* cur = top_;
+ FreeListNode* cur = top();
while (cur != NULL) {
ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
@@ -2454,7 +2500,7 @@ static const int kVeryLongFreeList = 500;
int FreeListCategory::FreeListLength() {
int length = 0;
- FreeListNode* cur = top_;
+ FreeListNode* cur = top();
while (cur != NULL) {
length++;
cur = cur->next();
@@ -2489,37 +2535,10 @@ intptr_t FreeList::SumFreeLists() {
// -----------------------------------------------------------------------------
// OldSpace implementation
-bool NewSpace::ReserveSpace(int bytes) {
- // We can't reliably unpack a partial snapshot that needs more new space
- // space than the minimum NewSpace size. The limit can be set lower than
- // the end of new space either because there is more space on the next page
- // or because we have lowered the limit in order to get periodic incremental
- // marking. The most reliable way to ensure that there is linear space is
- // to do the allocation, then rewind the limit.
- ASSERT(bytes <= InitialCapacity());
- MaybeObject* maybe = AllocateRaw(bytes);
- Object* object = NULL;
- if (!maybe->ToObject(&object)) return false;
- HeapObject* allocation = HeapObject::cast(object);
- Address top = allocation_info_.top();
- if ((top - bytes) == allocation->address()) {
- allocation_info_.set_top(allocation->address());
- return true;
- }
- // There may be a borderline case here where the allocation succeeded, but
- // the limit and top have moved on to a new page. In that case we try again.
- return ReserveSpace(bytes);
-}
-
-
void PagedSpace::PrepareForMarkCompact() {
// We don't have a linear allocation area while sweeping. It will be restored
// on the first allocation after the sweep.
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap.
- int old_linear_size = static_cast<int>(limit() - top());
- Free(top(), old_linear_size);
- SetTop(NULL, NULL);
+ EmptyAllocationInfo();
// Stop lazy sweeping and clear marking bits for unswept pages.
if (first_unswept_page_ != NULL) {
@@ -2546,28 +2565,6 @@ void PagedSpace::PrepareForMarkCompact() {
}
-bool PagedSpace::ReserveSpace(int size_in_bytes) {
- ASSERT(size_in_bytes <= AreaSize());
- ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
- Address current_top = allocation_info_.top();
- Address new_top = current_top + size_in_bytes;
- if (new_top <= allocation_info_.limit()) return true;
-
- HeapObject* new_area = free_list_.Allocate(size_in_bytes);
- if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
- if (new_area == NULL) return false;
-
- int old_linear_size = static_cast<int>(limit() - top());
- // Mark the old linear allocation area with a free space so it can be
- // skipped when scanning the heap. This also puts it back in the free list
- // if it is big enough.
- Free(top(), old_linear_size);
-
- SetTop(new_area->address(), new_area->address() + size_in_bytes);
- return true;
-}
-
-
intptr_t PagedSpace::SizeOfObjects() {
ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
@@ -2583,15 +2580,6 @@ void PagedSpace::RepairFreeListsAfterBoot() {
}
-// You have to call this last, since the implementation from PagedSpace
-// doesn't know that memory was 'promised' to large object space.
-bool LargeObjectSpace::ReserveSpace(int bytes) {
- return heap()->OldGenerationCapacityAvailable() >= bytes &&
- (!heap()->incremental_marking()->IsStopped() ||
- heap()->OldGenerationSpaceAvailable() >= bytes);
-}
-
-
bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
if (IsLazySweepingComplete()) return true;
@@ -2645,7 +2633,7 @@ bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->AreSweeperThreadsActivated()) {
if (collector->IsConcurrentSweepingInProgress()) {
- if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
+ if (collector->RefillFreeLists(this) < size_in_bytes) {
if (!collector->sequential_sweeping()) {
collector->WaitUntilSweepingCompleted();
return true;
@@ -2846,23 +2834,6 @@ void PagedSpace::ReportStatistics() {
}
#endif
-// -----------------------------------------------------------------------------
-// FixedSpace implementation
-
-void FixedSpace::PrepareForMarkCompact() {
- // Call prepare of the super class.
- PagedSpace::PrepareForMarkCompact();
-
- // During a non-compacting collection, everything below the linear
- // allocation pointer except wasted top-of-page blocks is considered
- // allocated and we will rediscover available bytes during the
- // collection.
- accounting_stats_.AllocateBytes(free_list_.available());
-
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_.Reset();
-}
-
// -----------------------------------------------------------------------------
// MapSpace implementation
@@ -2938,6 +2909,7 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap,
bool LargeObjectSpace::SetUp() {
first_page_ = NULL;
size_ = 0;
+ maximum_committed_ = 0;
page_count_ = 0;
objects_size_ = 0;
chunk_map_.Clear();
@@ -2984,6 +2956,10 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
page->set_next_page(first_page_);
first_page_ = page;
+ if (size_ > maximum_committed_) {
+ maximum_committed_ = size_;
+ }
+
// Register all MemoryChunk::kAlignment-aligned chunks covered by
// this large page in the chunk map.
uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 2cd92c59d8..770b88a9fb 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -103,7 +103,7 @@ class Isolate;
ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
#define ASSERT_OBJECT_SIZE(size) \
- ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
+ ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
#define ASSERT_PAGE_OFFSET(offset) \
ASSERT((Page::kObjectStartOffset <= offset) \
@@ -313,11 +313,21 @@ class MemoryChunk {
bool is_valid() { return address() != NULL; }
- MemoryChunk* next_chunk() const { return next_chunk_; }
- MemoryChunk* prev_chunk() const { return prev_chunk_; }
+ MemoryChunk* next_chunk() const {
+ return reinterpret_cast<MemoryChunk*>(Acquire_Load(&next_chunk_));
+ }
- void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
- void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
+ MemoryChunk* prev_chunk() const {
+ return reinterpret_cast<MemoryChunk*>(Acquire_Load(&prev_chunk_));
+ }
+
+ void set_next_chunk(MemoryChunk* next) {
+ Release_Store(&next_chunk_, reinterpret_cast<AtomicWord>(next));
+ }
+
+ void set_prev_chunk(MemoryChunk* prev) {
+ Release_Store(&prev_chunk_, reinterpret_cast<AtomicWord>(prev));
+ }
Space* owner() const {
if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
@@ -457,16 +467,32 @@ class MemoryChunk {
// Return all current flags.
intptr_t GetFlags() { return flags_; }
- intptr_t parallel_sweeping() const {
- return parallel_sweeping_;
+
+ // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
+ // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept or was
+ // swept by a sweeper thread.
+ // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
+ // sweeping must not be performed on that page.
+ enum ParallelSweepingState {
+ PARALLEL_SWEEPING_DONE,
+ PARALLEL_SWEEPING_IN_PROGRESS,
+ PARALLEL_SWEEPING_PENDING
+ };
+
+ ParallelSweepingState parallel_sweeping() {
+ return static_cast<ParallelSweepingState>(
+ NoBarrier_Load(&parallel_sweeping_));
}
- void set_parallel_sweeping(intptr_t state) {
- parallel_sweeping_ = state;
+ void set_parallel_sweeping(ParallelSweepingState state) {
+ NoBarrier_Store(&parallel_sweeping_, state);
}
bool TryParallelSweeping() {
- return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
+ return NoBarrier_CompareAndSwap(&parallel_sweeping_,
+ PARALLEL_SWEEPING_PENDING,
+ PARALLEL_SWEEPING_IN_PROGRESS) ==
+ PARALLEL_SWEEPING_PENDING;
}
// Manage live byte count (count of bytes known to be live,
@@ -536,7 +562,7 @@ class MemoryChunk {
static const intptr_t kAlignmentMask = kAlignment - 1;
- static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
+ static const intptr_t kSizeOffset = 0;
static const intptr_t kLiveBytesOffset =
kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
@@ -550,7 +576,8 @@ class MemoryChunk {
static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
kIntSize + kIntSize + kPointerSize +
- 5 * kPointerSize;
+ 5 * kPointerSize +
+ kPointerSize + kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -622,7 +649,7 @@ class MemoryChunk {
inline Heap* heap() { return heap_; }
- static const int kFlagsOffset = kPointerSize * 3;
+ static const int kFlagsOffset = kPointerSize;
bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
@@ -671,8 +698,6 @@ class MemoryChunk {
static inline void UpdateHighWaterMark(Address mark);
protected:
- MemoryChunk* next_chunk_;
- MemoryChunk* prev_chunk_;
size_t size_;
intptr_t flags_;
@@ -702,7 +727,7 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
- intptr_t parallel_sweeping_;
+ AtomicWord parallel_sweeping_;
// PagedSpace free-list statistics.
intptr_t available_in_small_free_list_;
@@ -719,6 +744,12 @@ class MemoryChunk {
Executability executable,
Space* owner);
+ private:
+ // next_chunk_ holds a pointer of type MemoryChunk
+ AtomicWord next_chunk_;
+ // prev_chunk_ holds a pointer of type MemoryChunk
+ AtomicWord prev_chunk_;
+
friend class MemoryAllocator;
};
@@ -779,15 +810,11 @@ class Page : public MemoryChunk {
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
- // Object area size in bytes.
- static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
-
// Maximum object size that fits in a page. Objects larger than that size
// are allocated in large object space and are never moved in memory. This
// also applies to new space allocation, since objects are never migrated
// from new space to large object space. Takes double alignment into account.
- static const int kMaxNonCodeHeapObjectSize =
- kNonCodeObjectAreaSize - kPointerSize;
+ static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
@@ -1080,7 +1107,7 @@ class MemoryAllocator {
// Returns maximum available bytes that the old space can have.
intptr_t MaxAvailable() {
- return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
+ return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
}
// Returns an indication of whether a pointer is in a space that has
@@ -1388,6 +1415,7 @@ class AllocationStats BASE_EMBEDDED {
// Zero out all the allocation statistics (i.e., no capacity).
void Clear() {
capacity_ = 0;
+ max_capacity_ = 0;
size_ = 0;
waste_ = 0;
}
@@ -1406,6 +1434,7 @@ class AllocationStats BASE_EMBEDDED {
// Accessors for the allocation statistics.
intptr_t Capacity() { return capacity_; }
+ intptr_t MaxCapacity() { return max_capacity_; }
intptr_t Size() { return size_; }
intptr_t Waste() { return waste_; }
@@ -1415,6 +1444,9 @@ class AllocationStats BASE_EMBEDDED {
void ExpandSpace(int size_in_bytes) {
capacity_ += size_in_bytes;
size_ += size_in_bytes;
+ if (capacity_ > max_capacity_) {
+ max_capacity_ = capacity_;
+ }
ASSERT(size_ >= 0);
}
@@ -1448,6 +1480,7 @@ class AllocationStats BASE_EMBEDDED {
private:
intptr_t capacity_;
+ intptr_t max_capacity_;
intptr_t size_;
intptr_t waste_;
};
@@ -1501,7 +1534,7 @@ class FreeListNode: public HeapObject {
class FreeListCategory {
public:
FreeListCategory() :
- top_(NULL),
+ top_(0),
end_(NULL),
available_(0) {}
@@ -1515,12 +1548,17 @@ class FreeListCategory {
FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
intptr_t EvictFreeListItemsInList(Page* p);
+ bool ContainsPageFreeListItemsInList(Page* p);
void RepairFreeList(Heap* heap);
- FreeListNode** GetTopAddress() { return &top_; }
- FreeListNode* top() const { return top_; }
- void set_top(FreeListNode* top) { top_ = top; }
+ FreeListNode* top() const {
+ return reinterpret_cast<FreeListNode*>(NoBarrier_Load(&top_));
+ }
+
+ void set_top(FreeListNode* top) {
+ NoBarrier_Store(&top_, reinterpret_cast<AtomicWord>(top));
+ }
FreeListNode** GetEndAddress() { return &end_; }
FreeListNode* end() const { return end_; }
@@ -1532,13 +1570,18 @@ class FreeListCategory {
Mutex* mutex() { return &mutex_; }
+ bool IsEmpty() {
+ return top() == 0;
+ }
+
#ifdef DEBUG
intptr_t SumFreeList();
int FreeListLength();
#endif
private:
- FreeListNode* top_;
+ // top_ points to the top FreeListNode* in the free list category.
+ AtomicWord top_;
FreeListNode* end_;
Mutex mutex_;
@@ -1570,7 +1613,7 @@ class FreeListCategory {
// These spaces are call large.
// At least 16384 words. This list is for objects of 2048 words or larger.
// Empty pages are added to this list. These spaces are called huge.
-class FreeList BASE_EMBEDDED {
+class FreeList {
public:
explicit FreeList(PagedSpace* owner);
@@ -1599,6 +1642,11 @@ class FreeList BASE_EMBEDDED {
// 'wasted_bytes'. The size should be a non-zero multiple of the word size.
MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+ bool IsEmpty() {
+ return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
+ large_list_.IsEmpty() && huge_list_.IsEmpty();
+ }
+
#ifdef DEBUG
void Zap();
intptr_t SumFreeLists();
@@ -1609,6 +1657,7 @@ class FreeList BASE_EMBEDDED {
void RepairLists(Heap* heap);
intptr_t EvictFreeListItems(Page* p);
+ bool ContainsPageFreeListItems(Page* p);
FreeListCategory* small_list() { return &small_list_; }
FreeListCategory* medium_list() { return &medium_list_; }
@@ -1618,7 +1667,7 @@ class FreeList BASE_EMBEDDED {
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
- static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
+ static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
@@ -1677,10 +1726,10 @@ class PagedSpace : public Space {
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
- virtual void RepairFreeListsAfterBoot();
+ void RepairFreeListsAfterBoot();
// Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact();
+ void PrepareForMarkCompact();
// Current capacity without growing (Size() + Available()).
intptr_t Capacity() { return accounting_stats_.Capacity(); }
@@ -1689,6 +1738,9 @@ class PagedSpace : public Space {
// spaces this equals the capacity.
intptr_t CommittedMemory() { return Capacity(); }
+ // The maximum amount of memory ever committed for this space.
+ intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
+
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory();
@@ -1755,18 +1807,9 @@ class PagedSpace : public Space {
return allocation_info_.limit_address();
}
- enum AllocationType {
- NEW_OBJECT,
- MOVE_OBJECT
- };
-
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
- MUST_USE_RESULT inline MaybeObject* AllocateRaw(
- int size_in_bytes,
- AllocationType event = NEW_OBJECT);
-
- virtual bool ReserveSpace(int bytes);
+ MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
@@ -1783,7 +1826,7 @@ class PagedSpace : public Space {
}
// Set space allocation info.
- void SetTop(Address top, Address limit) {
+ void SetTopAndLimit(Address top, Address limit) {
ASSERT(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@@ -1791,13 +1834,20 @@ class PagedSpace : public Space {
allocation_info_.set_limit(limit);
}
+ // Empty space allocation info, returning unused area to free list.
+ void EmptyAllocationInfo() {
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap.
+ int old_linear_size = static_cast<int>(limit() - top());
+ Free(top(), old_linear_size);
+ SetTopAndLimit(NULL, NULL);
+ }
+
void Allocate(int bytes) {
accounting_stats_.AllocateBytes(bytes);
}
- void IncreaseCapacity(int size) {
- accounting_stats_.ExpandSpace(size);
- }
+ void IncreaseCapacity(int size);
// Releases an unused page and shrinks the space.
void ReleasePage(Page* page, bool unlink);
@@ -1914,12 +1964,6 @@ class PagedSpace : public Space {
// Normal allocation information.
AllocationInfo allocation_info_;
- // Bytes of each page that cannot be allocated. Possibly non-zero
- // for pages in spaces with only fixed-size objects. Always zero
- // for pages in spaces with variable sized objects (those pages are
- // padded with free-list nodes).
- int page_extra_;
-
bool was_swept_conservatively_;
// The first page to be swept when the lazy sweeper advances. Is set
@@ -1944,7 +1988,7 @@ class PagedSpace : public Space {
MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
- friend class SweeperThread;
+ friend class MarkCompactCollector;
};
@@ -2001,7 +2045,7 @@ class NewSpacePage : public MemoryChunk {
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::SCAN_ON_SCAVENGE);
- static const int kAreaSize = Page::kNonCodeObjectAreaSize;
+ static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
inline NewSpacePage* next_page() const {
return static_cast<NewSpacePage*>(next_chunk());
@@ -2053,6 +2097,12 @@ class NewSpacePage : public MemoryChunk {
return NewSpacePage::FromAddress(address_limit - 1);
}
+ // Checks if address1 and address2 are on the same new space page.
+ static inline bool OnSamePage(Address address1, Address address2) {
+ return NewSpacePage::FromAddress(address1) ==
+ NewSpacePage::FromAddress(address2);
+ }
+
private:
// Create a NewSpacePage object that is only used as anchor
// for the doubly-linked list of real pages.
@@ -2167,11 +2217,6 @@ class SemiSpace : public Space {
return 0;
}
- virtual bool ReserveSpace(int bytes) {
- UNREACHABLE();
- return false;
- }
-
bool is_committed() { return committed_; }
bool Commit();
bool Uncommit();
@@ -2207,6 +2252,9 @@ class SemiSpace : public Space {
static void Swap(SemiSpace* from, SemiSpace* to);
+ // Returns the maximum amount of memory ever committed by the semi space.
+ size_t MaximumCommittedMemory() { return maximum_committed_; }
+
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory();
@@ -2215,6 +2263,9 @@ class SemiSpace : public Space {
// Copies the flags into the masked positions on all pages in the space.
void FlipPages(intptr_t flags, intptr_t flag_mask);
+ // Updates Capacity and MaximumCommitted based on new capacity.
+ void SetCapacity(int new_capacity);
+
NewSpacePage* anchor() { return &anchor_; }
// The current and maximum capacity of the space.
@@ -2222,6 +2273,8 @@ class SemiSpace : public Space {
int maximum_capacity_;
int initial_capacity_;
+ intptr_t maximum_committed_;
+
// The start address of the space.
Address start_;
// Used to govern object promotion during mark-compact collection.
@@ -2407,6 +2460,12 @@ class NewSpace : public Space {
return Capacity();
}
+ // Return the total amount of memory committed for new space.
+ intptr_t MaximumCommittedMemory() {
+ return to_space_.MaximumCommittedMemory() +
+ from_space_.MaximumCommittedMemory();
+ }
+
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory();
@@ -2438,6 +2497,12 @@ class NewSpace : public Space {
allocation_info_.set_top(top);
}
+ // Return the address of the allocation pointer limit in the active semispace.
+ Address limit() {
+ ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
+ return allocation_info_.limit();
+ }
+
// Return the address of the first object in the active semispace.
Address bottom() { return to_space_.space_start(); }
@@ -2477,16 +2542,10 @@ class NewSpace : public Space {
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
+ void UpdateInlineAllocationLimit(int size_in_bytes);
void LowerInlineAllocationLimit(intptr_t step) {
inline_allocation_limit_step_ = step;
- if (step == 0) {
- allocation_info_.set_limit(to_space_.page_high());
- } else {
- Address new_limit = Min(
- allocation_info_.top() + inline_allocation_limit_step_,
- allocation_info_.limit());
- allocation_info_.set_limit(new_limit);
- }
+ UpdateInlineAllocationLimit(0);
top_on_previous_step_ = allocation_info_.top();
}
@@ -2521,8 +2580,6 @@ class NewSpace : public Space {
// if successful.
bool AddFreshPage();
- virtual bool ReserveSpace(int bytes);
-
#ifdef VERIFY_HEAP
// Verify the active semispace.
virtual void Verify();
@@ -2618,12 +2675,6 @@ class OldSpace : public PagedSpace {
AllocationSpace id,
Executability executable)
: PagedSpace(heap, max_capacity, id, executable) {
- page_extra_ = 0;
- }
-
- // The limit of allocation for a page in this space.
- virtual Address PageAllocationLimit(Page* page) {
- return page->area_end();
}
public:
@@ -2640,43 +2691,13 @@ class OldSpace : public PagedSpace {
// -----------------------------------------------------------------------------
-// Old space for objects of a fixed size
-
-class FixedSpace : public PagedSpace {
- public:
- FixedSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
- int object_size_in_bytes)
- : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
- object_size_in_bytes_(object_size_in_bytes) {
- page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
- }
-
- // The limit of allocation for a page in this space.
- virtual Address PageAllocationLimit(Page* page) {
- return page->area_end() - page_extra_;
- }
-
- int object_size_in_bytes() { return object_size_in_bytes_; }
-
- // Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact();
-
- private:
- // The size of objects in this space.
- int object_size_in_bytes_;
-};
-
-
-// -----------------------------------------------------------------------------
// Old space for all map objects
-class MapSpace : public FixedSpace {
+class MapSpace : public PagedSpace {
public:
// Creates a map space object with a maximum capacity.
MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, Map::kSize),
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
max_map_space_pages_(kMaxMapPageIndex - 1) {
}
@@ -2696,7 +2717,7 @@ class MapSpace : public FixedSpace {
virtual void VerifyObject(HeapObject* obj);
private:
- static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
+ static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
// Do map space compaction if there is a page gap.
int CompactionThreshold() {
@@ -2713,12 +2734,12 @@ class MapSpace : public FixedSpace {
// -----------------------------------------------------------------------------
// Old space for simple property cell objects
-class CellSpace : public FixedSpace {
+class CellSpace : public PagedSpace {
public:
// Creates a property cell space object with a maximum capacity.
CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, Cell::kSize)
- {}
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
+ }
virtual int RoundSizeDownToObjectAlignment(int size) {
if (IsPowerOf2(Cell::kSize)) {
@@ -2739,13 +2760,13 @@ class CellSpace : public FixedSpace {
// -----------------------------------------------------------------------------
// Old space for all global object property cell objects
-class PropertyCellSpace : public FixedSpace {
+class PropertyCellSpace : public PagedSpace {
public:
// Creates a property cell space object with a maximum capacity.
PropertyCellSpace(Heap* heap, intptr_t max_capacity,
AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, PropertyCell::kSize)
- {}
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
+ }
virtual int RoundSizeDownToObjectAlignment(int size) {
if (IsPowerOf2(PropertyCell::kSize)) {
@@ -2802,6 +2823,10 @@ class LargeObjectSpace : public Space {
return objects_size_;
}
+ intptr_t MaximumCommittedMemory() {
+ return maximum_committed_;
+ }
+
intptr_t CommittedMemory() {
return Size();
}
@@ -2831,11 +2856,6 @@ class LargeObjectSpace : public Space {
// Checks whether the space is empty.
bool IsEmpty() { return first_page_ == NULL; }
- // See the comments for ReserveSpace in the Space class. This has to be
- // called after ReserveSpace has been called on the paged spaces, since they
- // may use some memory, leaving less for large objects.
- virtual bool ReserveSpace(int bytes);
-
LargePage* first_page() { return first_page_; }
#ifdef VERIFY_HEAP
@@ -2853,6 +2873,7 @@ class LargeObjectSpace : public Space {
private:
intptr_t max_capacity_;
+ intptr_t maximum_committed_;
// The head of the linked list of large object chunks.
LargePage* first_page_;
intptr_t size_; // allocated bytes
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 45b675fa8b..e2d15f5405 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -30,8 +30,6 @@
#include "factory.h"
#include "string-stream.h"
-#include "allocation-inl.h"
-
namespace v8 {
namespace internal {
@@ -299,8 +297,7 @@ Handle<String> StringStream::ToString(Isolate* isolate) {
void StringStream::ClearMentionedObjectCache(Isolate* isolate) {
isolate->set_string_stream_current_security_token(NULL);
if (isolate->string_stream_debug_object_cache() == NULL) {
- isolate->set_string_stream_debug_object_cache(
- new List<HeapObject*, PreallocatedStorageAllocationPolicy>(0));
+ isolate->set_string_stream_debug_object_cache(new DebugObjectCache(0));
}
isolate->string_stream_debug_object_cache()->Clear();
}
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 14b44ca41f..74230c9864 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -61,10 +61,8 @@ function StringValueOf() {
// ECMA-262, section 15.5.4.4
function StringCharAt(pos) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.charAt"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.charAt");
+
var result = %_StringCharAt(this, pos);
if (%_IsSmi(result)) {
result = %_StringCharAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
@@ -75,10 +73,8 @@ function StringCharAt(pos) {
// ECMA-262 section 15.5.4.5
function StringCharCodeAt(pos) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.charCodeAt"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.charCodeAt");
+
var result = %_StringCharCodeAt(this, pos);
if (!%_IsSmi(result)) {
result = %_StringCharCodeAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
@@ -89,10 +85,8 @@ function StringCharCodeAt(pos) {
// ECMA-262, section 15.5.4.6
function StringConcat() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.concat"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.concat");
+
var len = %_ArgumentsLength();
var this_as_string = TO_STRING_INLINE(this);
if (len === 1) {
@@ -113,10 +107,8 @@ function StringConcat() {
// ECMA-262 section 15.5.4.7
function StringIndexOf(pattern /* position */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.indexOf"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.indexOf");
+
var subject = TO_STRING_INLINE(this);
pattern = TO_STRING_INLINE(pattern);
var index = 0;
@@ -132,10 +124,8 @@ function StringIndexOf(pattern /* position */) { // length == 1
// ECMA-262 section 15.5.4.8
function StringLastIndexOf(pat /* position */) { // length == 1
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.lastIndexOf"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.lastIndexOf");
+
var sub = TO_STRING_INLINE(this);
var subLength = sub.length;
var pat = TO_STRING_INLINE(pat);
@@ -165,10 +155,8 @@ function StringLastIndexOf(pat /* position */) { // length == 1
// This function is implementation specific. For now, we do not
// do anything locale specific.
function StringLocaleCompare(other) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.localeCompare"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.localeCompare");
+
return %StringLocaleCompare(TO_STRING_INLINE(this),
TO_STRING_INLINE(other));
}
@@ -176,10 +164,8 @@ function StringLocaleCompare(other) {
// ECMA-262 section 15.5.4.10
function StringMatch(regexp) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.match"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.match");
+
var subject = TO_STRING_INLINE(this);
if (IS_REGEXP(regexp)) {
// Emulate RegExp.prototype.exec's side effect in step 5, even though
@@ -200,6 +186,28 @@ function StringMatch(regexp) {
}
+var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
+
+
+// ECMA-262 v6, section 21.1.3.12
+//
+// For now we do nothing, as proper normalization requires big tables.
+// If Intl is enabled, then i18n.js will override it and provide the the
+// proper functionality.
+function StringNormalize(form) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
+
+ var form = form ? TO_STRING_INLINE(form) : 'NFC';
+ var normalizationForm = NORMALIZATION_FORMS.indexOf(form);
+ if (normalizationForm === -1) {
+ throw new $RangeError('The normalization form should be one of '
+ + NORMALIZATION_FORMS.join(', ') + '.');
+ }
+
+ return %_ValueOf(this);
+}
+
+
// This has the same size as the lastMatchInfo array, and can be used for
// functions that expect that structure to be returned. It is used when the
// needle is a string rather than a regexp. In this case we can't update
@@ -210,10 +218,8 @@ var reusableMatchInfo = [2, "", "", -1, -1];
// ECMA-262, section 15.5.4.11
function StringReplace(search, replace) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.replace"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.replace");
+
var subject = TO_STRING_INLINE(this);
// Decision tree for dispatch
@@ -543,10 +549,8 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
// ECMA-262 section 15.5.4.12
function StringSearch(re) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.search"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
+
var regexp;
if (IS_STRING(re)) {
regexp = %_GetFromCache(STRING_TO_REGEXP_CACHE_ID, re);
@@ -565,10 +569,8 @@ function StringSearch(re) {
// ECMA-262 section 15.5.4.13
function StringSlice(start, end) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.slice"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.slice");
+
var s = TO_STRING_INLINE(this);
var s_len = s.length;
var start_i = TO_INTEGER(start);
@@ -609,31 +611,27 @@ function StringSlice(start, end) {
// ECMA-262 section 15.5.4.14
function StringSplit(separator, limit) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.split"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.split");
+
var subject = TO_STRING_INLINE(this);
limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
- // ECMA-262 says that if separator is undefined, the result should
- // be an array of size 1 containing the entire string.
- if (IS_UNDEFINED(separator)) {
- return [subject];
- }
-
var length = subject.length;
if (!IS_REGEXP(separator)) {
- separator = TO_STRING_INLINE(separator);
+ var separator_string = TO_STRING_INLINE(separator);
if (limit === 0) return [];
- var separator_length = separator.length;
+ // ECMA-262 says that if separator is undefined, the result should
+ // be an array of size 1 containing the entire string.
+ if (IS_UNDEFINED(separator)) return [subject];
+
+ var separator_length = separator_string.length;
// If the separator string is empty then return the elements in the subject.
if (separator_length === 0) return %StringToArray(subject, limit);
- var result = %StringSplit(subject, separator, limit);
+ var result = %StringSplit(subject, separator_string, limit);
return result;
}
@@ -711,10 +709,8 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
// ECMA-262 section 15.5.4.15
function StringSubstring(start, end) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.subString"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.subString");
+
var s = TO_STRING_INLINE(this);
var s_len = s.length;
@@ -746,10 +742,8 @@ function StringSubstring(start, end) {
// This is not a part of ECMA-262.
function StringSubstr(start, n) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.substr"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.substr");
+
var s = TO_STRING_INLINE(this);
var len;
@@ -788,65 +782,51 @@ function StringSubstr(start, n) {
// ECMA-262, 15.5.4.16
function StringToLowerCase() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.toLowerCase"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
+
return %StringToLowerCase(TO_STRING_INLINE(this));
}
// ECMA-262, 15.5.4.17
function StringToLocaleLowerCase() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.toLocaleLowerCase"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
+
return %StringToLowerCase(TO_STRING_INLINE(this));
}
// ECMA-262, 15.5.4.18
function StringToUpperCase() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.toUpperCase"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toUpperCase");
+
return %StringToUpperCase(TO_STRING_INLINE(this));
}
// ECMA-262, 15.5.4.19
function StringToLocaleUpperCase() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.toLocaleUpperCase"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
+
return %StringToUpperCase(TO_STRING_INLINE(this));
}
// ES5, 15.5.4.20
function StringTrim() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.trim"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.trim");
+
return %StringTrim(TO_STRING_INLINE(this), true, true);
}
function StringTrimLeft() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.trimLeft"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.trimLeft");
+
return %StringTrim(TO_STRING_INLINE(this), true, false);
}
function StringTrimRight() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["String.prototype.trimRight"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.trimRight");
+
return %StringTrim(TO_STRING_INLINE(this), false, true);
}
@@ -984,6 +964,7 @@ function SetUpString() {
"lastIndexOf", StringLastIndexOf,
"localeCompare", StringLocaleCompare,
"match", StringMatch,
+ "normalize", StringNormalize,
"replace", StringReplace,
"search", StringSearch,
"slice", StringSlice,
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 1ec00d49bb..132ed711aa 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -35,6 +35,7 @@
#include "gdb-jit.h"
#include "ic-inl.h"
#include "stub-cache.h"
+#include "type-info.h"
#include "vm-state-inl.h"
namespace v8 {
@@ -100,124 +101,107 @@ Code* StubCache::Set(Name* name, Map* map, Code* code) {
Handle<Code> StubCache::FindIC(Handle<Name> name,
- Handle<Map> stub_holder_map,
+ Handle<Map> stub_holder,
Code::Kind kind,
- Code::ExtraICState extra_state) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind, extra_state);
- Handle<Object> probe(stub_holder_map->FindInCodeCache(*name, flags),
- isolate_);
+ ExtraICState extra_state,
+ InlineCacheHolderFlag cache_holder) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ kind, extra_state, cache_holder);
+ Handle<Object> probe(stub_holder->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
return Handle<Code>::null();
}
-Handle<Code> StubCache::FindIC(Handle<Name> name,
- Handle<JSObject> stub_holder,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state) {
- return FindIC(name, Handle<Map>(stub_holder->map()), kind, extra_ic_state);
-}
-
-
Handle<Code> StubCache::FindHandler(Handle<Name> name,
- Handle<JSObject> receiver,
+ Handle<Map> stub_holder,
Code::Kind kind,
- StrictModeFlag strict_mode) {
- Code::ExtraICState extra_ic_state = Code::kNoExtraICState;
- if (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC) {
- extra_ic_state = Code::ComputeExtraICState(
- STANDARD_STORE, strict_mode);
- }
+ InlineCacheHolderFlag cache_holder) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::HANDLER, extra_ic_state, Code::NORMAL, kind);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Code::HANDLER, kNoExtraICState, cache_holder, Code::NORMAL);
+
+ Handle<Object> probe(stub_holder->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
return Handle<Code>::null();
}
-Handle<Code> StubCache::ComputeMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
- Code::Kind kind = handler->handler_kind();
- Handle<Map> map(receiver->map());
- Handle<Code> ic = FindIC(name, map, kind, strict_mode);
- if (!ic.is_null()) return ic;
+Handle<Code> StubCache::ComputeMonomorphicIC(
+ Code::Kind kind,
+ Handle<Name> name,
+ Handle<HeapType> type,
+ Handle<Code> handler,
+ ExtraICState extra_ic_state) {
+ InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type);
+
+ Handle<Map> stub_holder;
+ Handle<Code> ic;
+ // There are multiple string maps that all use the same prototype. That
+ // prototype cannot hold multiple handlers, one for each of the string maps,
+ // for a single name. Hence, turn off caching of the IC.
+ bool can_be_cached = !type->Is(HeapType::String());
+ if (can_be_cached) {
+ stub_holder = IC::GetCodeCacheHolder(flag, *type, isolate());
+ ic = FindIC(name, stub_holder, kind, extra_ic_state, flag);
+ if (!ic.is_null()) return ic;
+ }
if (kind == Code::LOAD_IC) {
- LoadStubCompiler ic_compiler(isolate());
- ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ LoadStubCompiler ic_compiler(isolate(), extra_ic_state, flag);
+ ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
} else if (kind == Code::KEYED_LOAD_IC) {
- KeyedLoadStubCompiler ic_compiler(isolate());
- ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ KeyedLoadStubCompiler ic_compiler(isolate(), extra_ic_state, flag);
+ ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
} else if (kind == Code::STORE_IC) {
- StoreStubCompiler ic_compiler(isolate(), strict_mode);
- ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ StoreStubCompiler ic_compiler(isolate(), extra_ic_state);
+ ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
} else {
ASSERT(kind == Code::KEYED_STORE_IC);
- KeyedStoreStubCompiler ic_compiler(isolate(), strict_mode, STANDARD_STORE);
- ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ ASSERT(STANDARD_STORE ==
+ KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state));
+ KeyedStoreStubCompiler ic_compiler(isolate(), extra_ic_state);
+ ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
}
- HeapObject::UpdateMapCodeCache(receiver, name, ic);
+ if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic);
return ic;
}
Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
- Handle<JSObject> receiver) {
- // If no global objects are present in the prototype chain, the load
- // nonexistent IC stub can be shared for all names for a given map
- // and we use the empty string for the map cache in that case. If
- // there are global objects involved, we need to check global
- // property cells in the stub and therefore the stub will be
- // specific to the name.
- Handle<Name> cache_name = factory()->empty_string();
- Handle<JSObject> current;
- Handle<Object> next = receiver;
- Handle<JSGlobalObject> global;
- do {
- current = Handle<JSObject>::cast(next);
- next = Handle<Object>(current->GetPrototype(), isolate_);
- if (current->IsJSGlobalObject()) {
- global = Handle<JSGlobalObject>::cast(current);
- cache_name = name;
- } else if (!current->HasFastProperties()) {
- cache_name = name;
- }
- } while (!next->IsNull());
+ Handle<HeapType> type) {
+ InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type);
+ Handle<Map> stub_holder = IC::GetCodeCacheHolder(flag, *type, isolate());
+ // If no dictionary mode objects are present in the prototype chain, the load
+ // nonexistent IC stub can be shared for all names for a given map and we use
+ // the empty string for the map cache in that case. If there are dictionary
+ // mode objects involved, we need to do negative lookups in the stub and
+ // therefore the stub will be specific to the name.
+ Handle<Map> current_map = stub_holder;
+ Handle<Name> cache_name = current_map->is_dictionary_map()
+ ? name : Handle<Name>::cast(isolate()->factory()->empty_string());
+ Handle<Object> next(current_map->prototype(), isolate());
+ Handle<JSObject> last = Handle<JSObject>::null();
+ while (!next->IsNull()) {
+ last = Handle<JSObject>::cast(next);
+ next = handle(current_map->prototype(), isolate());
+ current_map = handle(Handle<HeapObject>::cast(next)->map());
+ if (current_map->is_dictionary_map()) cache_name = name;
+ }
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
- Handle<Code> handler = FindHandler(cache_name, receiver, Code::LOAD_IC);
+ Handle<Code> handler = FindHandler(
+ cache_name, stub_holder, Code::LOAD_IC, flag);
if (!handler.is_null()) return handler;
- LoadStubCompiler compiler(isolate_);
- handler =
- compiler.CompileLoadNonexistent(receiver, current, cache_name, global);
- HeapObject::UpdateMapCodeCache(receiver, cache_name, handler);
+ LoadStubCompiler compiler(isolate_, kNoExtraICState, flag);
+ handler = compiler.CompileLoadNonexistent(type, last, cache_name);
+ Map::UpdateCodeCache(stub_holder, cache_name, handler);
return handler;
}
-Handle<Code> StubCache::ComputeLoadGlobal(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- bool is_dont_delete) {
- Handle<Code> stub = FindIC(name, receiver, Code::LOAD_IC);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> ic =
- compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
- HeapObject::UpdateMapCodeCache(receiver, name, ic);
- return ic;
-}
-
-
Handle<Code> StubCache::ComputeKeyedLoadElement(Handle<Map> receiver_map) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
Handle<Name> name =
@@ -238,8 +222,8 @@ Handle<Code> StubCache::ComputeKeyedStoreElement(
Handle<Map> receiver_map,
StrictModeFlag strict_mode,
KeyedAccessStoreMode store_mode) {
- Code::ExtraICState extra_state =
- Code::ComputeExtraICState(store_mode, strict_mode);
+ ExtraICState extra_state =
+ KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::KEYED_STORE_IC, extra_state);
@@ -253,208 +237,17 @@ Handle<Code> StubCache::ComputeKeyedStoreElement(
Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
- KeyedStoreStubCompiler compiler(isolate(), strict_mode, store_mode);
+ KeyedStoreStubCompiler compiler(isolate(), extra_state);
Handle<Code> code = compiler.CompileStoreElement(receiver_map);
Map::UpdateCodeCache(receiver_map, name, code);
- ASSERT(Code::GetKeyedAccessStoreMode(code->extra_ic_state()) == store_mode);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name,
- Handle<GlobalObject> receiver,
- Handle<PropertyCell> cell,
- Handle<Object> value,
- StrictModeFlag strict_mode) {
- Handle<Type> union_type = PropertyCell::UpdatedType(cell, value);
- bool is_constant = union_type->IsConstant();
- StoreGlobalStub stub(strict_mode, is_constant);
-
- Handle<Code> code = FindIC(
- name, Handle<JSObject>::cast(receiver),
- Code::STORE_IC, stub.GetExtraICState());
- if (!code.is_null()) return code;
-
- // Replace the placeholder cell and global object map with the actual global
- // cell and receiver map.
- Handle<Map> meta_map(isolate_->heap()->meta_map());
- Handle<Object> receiver_map(receiver->map(), isolate_);
- code = stub.GetCodeCopyFromTemplate(isolate_);
- code->ReplaceNthObject(1, *meta_map, *receiver_map);
- Handle<Map> cell_map(isolate_->heap()->global_property_cell_map());
- code->ReplaceNthObject(1, *cell_map, *cell);
-
- HeapObject::UpdateMapCodeCache(receiver, name, code);
-
+ ASSERT(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state())
+ == store_mode);
return code;
}
-#define CALL_LOGGER_TAG(kind, type) \
- (kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
-
-Handle<Code> StubCache::ComputeCallConstant(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- Handle<Name> name,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function) {
- // Compute the check type and the map.
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
- isolate_, *object, cache_holder));
-
- // Compute check type based on receiver/holder.
- CheckType check = RECEIVER_MAP_CHECK;
- if (object->IsString()) {
- check = STRING_CHECK;
- } else if (object->IsSymbol()) {
- check = SYMBOL_CHECK;
- } else if (object->IsNumber()) {
- check = NUMBER_CHECK;
- } else if (object->IsBoolean()) {
- check = BOOLEAN_CHECK;
- }
-
- if (check != RECEIVER_MAP_CHECK &&
- !function->IsBuiltin() &&
- function->shared()->is_classic_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- return Handle<Code>::null();
- }
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::CONSTANT, argc, cache_holder);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
- Handle<Code> code =
- compiler.CompileCallConstant(object, holder, name, check, function);
- code->set_check_type(check);
- ASSERT(flags == code->flags());
- PROFILE(isolate_,
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
-
- if (CallStubCompiler::CanBeCached(function)) {
- HeapObject::UpdateMapCodeCache(stub_holder, name, code);
- }
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallField(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- Handle<Name> name,
- Handle<Object> object,
- Handle<JSObject> holder,
- PropertyIndex index) {
- // Compute the check type and the map.
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
- isolate_, *object, cache_holder));
-
- // TODO(1233596): We cannot do receiver map check for non-JS objects
- // because they may be represented as immediates without a
- // map. Instead, we check against the map in the holder.
- if (object->IsNumber() || object->IsSymbol() ||
- object->IsBoolean() || object->IsString()) {
- object = holder;
- }
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::FIELD, argc, cache_holder);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
- Handle<Code> code =
- compiler.CompileCallField(Handle<JSObject>::cast(object),
- holder, index, name);
- ASSERT(flags == code->flags());
- PROFILE(isolate_,
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- HeapObject::UpdateMapCodeCache(stub_holder, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallInterceptor(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- Handle<Name> name,
- Handle<Object> object,
- Handle<JSObject> holder) {
- // Compute the check type and the map.
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
- isolate_, *object, cache_holder));
-
- // TODO(1233596): We cannot do receiver map check for non-JS objects
- // because they may be represented as immediates without a
- // map. Instead, we check against the map in the holder.
- if (object->IsNumber() || object->IsSymbol() ||
- object->IsBoolean() || object->IsString()) {
- object = holder;
- }
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::INTERCEPTOR, argc, cache_holder);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
- Handle<Code> code =
- compiler.CompileCallInterceptor(Handle<JSObject>::cast(object),
- holder, name);
- ASSERT(flags == code->flags());
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- HeapObject::UpdateMapCodeCache(stub_holder, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallGlobal(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::NORMAL, argc);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CallStubCompiler compiler(isolate(), argc, kind, extra_state);
- Handle<Code> code =
- compiler.CompileCallGlobal(receiver, holder, cell, function, name);
- ASSERT(flags == code->flags());
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- if (CallStubCompiler::CanBeCached(function)) {
- HeapObject::UpdateMapCodeCache(receiver, name, code);
- }
- return code;
-}
-
+#define CALL_LOGGER_TAG(kind, type) (Logger::KEYED_##type)
static void FillCache(Isolate* isolate, Handle<Code> code) {
Handle<UnseededNumberDictionary> dictionary =
@@ -465,14 +258,8 @@ static void FillCache(Isolate* isolate, Handle<Code> code) {
}
-Code* StubCache::FindCallInitialize(int argc,
- RelocInfo::Mode mode,
- Code::Kind kind) {
- Code::ExtraICState extra_state =
- CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
- CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
- Code::Flags flags =
- Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
+Code* StubCache::FindPreMonomorphicIC(Code::Kind kind, ExtraICState state) {
+ Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state);
UnseededNumberDictionary* dictionary =
isolate()->heap()->non_monomorphic_cache();
int entry = dictionary->FindEntry(isolate(), flags);
@@ -484,122 +271,52 @@ Code* StubCache::FindCallInitialize(int argc,
}
-Handle<Code> StubCache::ComputeCallInitialize(int argc,
- RelocInfo::Mode mode,
- Code::Kind kind) {
- Code::ExtraICState extra_state =
- CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
- CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
- Code::Flags flags =
- Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallInitialize(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallInitialize(int argc, RelocInfo::Mode mode) {
- return ComputeCallInitialize(argc, mode, Code::CALL_IC);
-}
-
-
-Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc) {
- return ComputeCallInitialize(argc, RelocInfo::CODE_TARGET,
- Code::KEYED_CALL_IC);
-}
-
-
-Handle<Code> StubCache::ComputeCallPreMonomorphic(
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- Code::Flags flags =
- Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallPreMonomorphic(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallNormal(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- Code::Flags flags =
- Code::ComputeFlags(kind, MONOMORPHIC, extra_state, Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallNormal(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallArguments(int argc) {
- Code::Flags flags =
- Code::ComputeFlags(Code::KEYED_CALL_IC, MEGAMORPHIC,
- Code::kNoExtraICState, Code::NORMAL, argc);
+Handle<Code> StubCache::ComputeLoad(InlineCacheState ic_state,
+ ExtraICState extra_state) {
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, ic_state, extra_state);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallArguments(flags);
+ Handle<Code> code;
+ if (ic_state == UNINITIALIZED) {
+ code = compiler.CompileLoadInitialize(flags);
+ } else if (ic_state == PREMONOMORPHIC) {
+ code = compiler.CompileLoadPreMonomorphic(flags);
+ } else if (ic_state == MEGAMORPHIC) {
+ code = compiler.CompileLoadMegamorphic(flags);
+ } else {
+ UNREACHABLE();
+ }
FillCache(isolate_, code);
return code;
}
-Handle<Code> StubCache::ComputeCallMegamorphic(
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- Code::Flags flags =
- Code::ComputeFlags(kind, MEGAMORPHIC, extra_state,
- Code::NORMAL, argc);
+Handle<Code> StubCache::ComputeStore(InlineCacheState ic_state,
+ ExtraICState extra_state) {
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, ic_state, extra_state);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallMegamorphic(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallMiss(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
- // and monomorphic stubs are not mixed up together in the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(kind, MONOMORPHIC_PROTOTYPE_FAILURE, extra_state,
- Code::NORMAL, argc, OWN_MAP);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+ Handle<Code> code;
+ if (ic_state == UNINITIALIZED) {
+ code = compiler.CompileStoreInitialize(flags);
+ } else if (ic_state == PREMONOMORPHIC) {
+ code = compiler.CompileStorePreMonomorphic(flags);
+ } else if (ic_state == GENERIC) {
+ code = compiler.CompileStoreGeneric(flags);
+ } else if (ic_state == MEGAMORPHIC) {
+ code = compiler.CompileStoreMegamorphic(flags);
+ } else {
+ UNREACHABLE();
+ }
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallMiss(flags);
FillCache(isolate_, code);
return code;
}
@@ -625,6 +342,7 @@ Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
}
+// TODO(verwaest): Change this method so it takes in a TypeHandleList.
Handle<Code> StubCache::ComputeLoadElementPolymorphic(
MapHandleList* receiver_maps) {
Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
@@ -633,12 +351,15 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic(
Handle<Object> probe = cache->Lookup(receiver_maps, flags);
if (probe->IsCode()) return Handle<Code>::cast(probe);
+ TypeHandleList types(receiver_maps->length());
+ for (int i = 0; i < receiver_maps->length(); i++) {
+ types.Add(HeapType::Class(receiver_maps->at(i), isolate()));
+ }
CodeHandleList handlers(receiver_maps->length());
KeyedLoadStubCompiler compiler(isolate_);
compiler.CompileElementHandlers(receiver_maps, &handlers);
Handle<Code> code = compiler.CompilePolymorphicIC(
- receiver_maps, &handlers, factory()->empty_string(),
- Code::NORMAL, ELEMENT);
+ &types, &handlers, factory()->empty_string(), Code::NORMAL, ELEMENT);
isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
@@ -647,24 +368,25 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic(
}
-Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
+Handle<Code> StubCache::ComputePolymorphicIC(
+ Code::Kind kind,
+ TypeHandleList* types,
+ CodeHandleList* handlers,
+ int number_of_valid_types,
+ Handle<Name> name,
+ ExtraICState extra_ic_state) {
Handle<Code> handler = handlers->at(0);
- Code::Kind kind = handler->handler_kind();
- Code::StubType type = number_of_valid_maps == 1 ? handler->type()
- : Code::NORMAL;
+ Code::StubType type = number_of_valid_types == 1 ? handler->type()
+ : Code::NORMAL;
if (kind == Code::LOAD_IC) {
- LoadStubCompiler ic_compiler(isolate_);
+ LoadStubCompiler ic_compiler(isolate_, extra_ic_state);
return ic_compiler.CompilePolymorphicIC(
- receiver_maps, handlers, name, type, PROPERTY);
+ types, handlers, name, type, PROPERTY);
} else {
ASSERT(kind == Code::STORE_IC);
- StoreStubCompiler ic_compiler(isolate_, strict_mode);
+ StoreStubCompiler ic_compiler(isolate_, extra_ic_state);
return ic_compiler.CompilePolymorphicIC(
- receiver_maps, handlers, name, type, PROPERTY);
+ types, handlers, name, type, PROPERTY);
}
}
@@ -679,60 +401,20 @@ Handle<Code> StubCache::ComputeStoreElementPolymorphic(
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
Handle<PolymorphicCodeCache> cache =
isolate_->factory()->polymorphic_code_cache();
- Code::ExtraICState extra_state = Code::ComputeExtraICState(store_mode,
- strict_mode);
+ ExtraICState extra_state = KeyedStoreIC::ComputeExtraICState(
+ strict_mode, store_mode);
Code::Flags flags =
Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
Handle<Object> probe = cache->Lookup(receiver_maps, flags);
if (probe->IsCode()) return Handle<Code>::cast(probe);
- KeyedStoreStubCompiler compiler(isolate_, strict_mode, store_mode);
+ KeyedStoreStubCompiler compiler(isolate_, extra_state);
Handle<Code> code = compiler.CompileStoreElementPolymorphic(receiver_maps);
PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
return code;
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
- Code::Kind kind) {
- // Extra IC state is irrelevant for debug break ICs. They jump to
- // the actual call ic to carry out the work.
- Code::Flags flags =
- Code::ComputeFlags(kind, DEBUG_STUB, DEBUG_BREAK,
- Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallDebugBreak(flags);
- FillCache(isolate_, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeCallDebugPrepareStepIn(int argc,
- Code::Kind kind) {
- // Extra IC state is irrelevant for debug break ICs. They jump to
- // the actual call ic to carry out the work.
- Code::Flags flags =
- Code::ComputeFlags(kind, DEBUG_STUB, DEBUG_PREPARE_STEP_IN,
- Code::NORMAL, argc);
- Handle<UnseededNumberDictionary> cache =
- isolate_->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate_, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallDebugPrepareStepIn(flags);
- FillCache(isolate_, code);
- return code;
-}
-#endif
-
-
void StubCache::Clear() {
Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
for (int i = 0; i < kPrimaryTableSize; i++) {
@@ -794,24 +476,25 @@ void StubCache::CollectMatchingMaps(SmallMapList* types,
RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
- JSObject* recv = JSObject::cast(args[0]);
- ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[1]);
+ JSObject* receiver = JSObject::cast(args[0]);
+ JSObject* holder = JSObject::cast(args[1]);
+ ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[2]);
Address setter_address = v8::ToCData<Address>(callback->setter());
v8::AccessorSetterCallback fun =
FUNCTION_CAST<v8::AccessorSetterCallback>(setter_address);
ASSERT(fun != NULL);
- ASSERT(callback->IsCompatibleReceiver(recv));
- Handle<Name> name = args.at<Name>(2);
- Handle<Object> value = args.at<Object>(3);
+ ASSERT(callback->IsCompatibleReceiver(receiver));
+ Handle<Name> name = args.at<Name>(3);
+ Handle<Object> value = args.at<Object>(4);
HandleScope scope(isolate);
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return *value;
Handle<String> str = Handle<String>::cast(name);
- LOG(isolate, ApiNamedPropertyAccess("store", recv, *name));
+ LOG(isolate, ApiNamedPropertyAccess("store", receiver, *name));
PropertyCallbackArguments
- custom_args(isolate, callback->data(), recv, recv);
+ custom_args(isolate, callback->data(), receiver, holder);
custom_args.Call(fun, v8::Utils::ToLocal(str), v8::Utils::ToLocal(value));
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value;
@@ -867,12 +550,12 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
// If the load is non-contextual, just return the undefined result.
- // Note that both keyed and non-keyed loads may end up here, so we
- // can't use either LoadIC or KeyedLoadIC constructors.
+ // Note that both keyed and non-keyed loads may end up here.
HandleScope scope(isolate);
- IC ic(IC::NO_EXTRA_FRAME, isolate);
- ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
- if (!ic.SlowIsUndeclaredGlobal()) return isolate->heap()->undefined_value();
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ if (ic.contextual_mode() != CONTEXTUAL) {
+ return isolate->heap()->undefined_value();
+ }
// Throw a reference error.
Handle<Name> name_handle(name);
@@ -963,16 +646,15 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) {
RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- Handle<JSObject> recv(JSObject::cast(args[0]));
- Handle<Name> name(Name::cast(args[1]));
- Handle<Object> value(args[2], isolate);
- ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode);
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
- ASSERT(recv->HasNamedInterceptor());
+ ASSERT(args.length() == 3);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<JSObject> receiver = args.at<JSObject>(0);
+ Handle<Name> name = args.at<Name>(1);
+ Handle<Object> value = args.at<Object>(2);
+ ASSERT(receiver->HasNamedInterceptor());
PropertyAttributes attr = NONE;
Handle<Object> result = JSObject::SetPropertyWithInterceptor(
- recv, name, value, attr, strict_mode);
+ receiver, name, value, attr, ic.strict_mode());
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
@@ -986,151 +668,78 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
}
-Handle<Code> StubCompiler::CompileCallInitialize(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateInitialize(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateInitialize(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallInitialize");
- isolate()->counters()->call_initialize_stubs()->Increment();
+Handle<Code> StubCompiler::CompileLoadInitialize(Code::Flags flags) {
+ LoadIC::GenerateInitialize(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, *code));
+ CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *code));
return code;
}
-Handle<Code> StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- // The code of the PreMonomorphic stub is the same as the code
- // of the Initialized stub. They just differ on the code object flags.
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateInitialize(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateInitialize(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
- isolate()->counters()->call_premonomorphic_stubs()->Increment();
+Handle<Code> StubCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
+ LoadIC::GeneratePreMonomorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadPreMonomorphic");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, *code));
+ CodeCreateEvent(Logger::LOAD_PREMONOMORPHIC_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *code));
return code;
}
-Handle<Code> StubCompiler::CompileCallNormal(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- if (kind == Code::CALL_IC) {
- // Call normal is always with a explict receiver.
- ASSERT(!CallIC::Contextual::decode(
- Code::ExtractExtraICStateFromFlags(flags)));
- CallIC::GenerateNormal(masm(), argc);
- } else {
- KeyedCallIC::GenerateNormal(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallNormal");
- isolate()->counters()->call_normal_stubs()->Increment();
+Handle<Code> StubCompiler::CompileLoadMegamorphic(Code::Flags flags) {
+ LoadIC::GenerateMegamorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadMegamorphic");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, *code));
+ CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *code));
return code;
}
-Handle<Code> StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateMegamorphic(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateMegamorphic(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMegamorphic");
- isolate()->counters()->call_megamorphic_stubs()->Increment();
+Handle<Code> StubCompiler::CompileStoreInitialize(Code::Flags flags) {
+ StoreIC::GenerateInitialize(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
+ CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code));
return code;
}
-Handle<Code> StubCompiler::CompileCallArguments(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- KeyedCallIC::GenerateNonStrictArguments(masm(), argc);
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallArguments");
+Handle<Code> StubCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
+ StoreIC::GeneratePreMonomorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
- CALL_MEGAMORPHIC_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
+ CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code));
return code;
}
-Handle<Code> StubCompiler::CompileCallMiss(Code::Flags flags) {
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- if (kind == Code::CALL_IC) {
- CallIC::GenerateMiss(masm(), argc, extra_state);
- } else {
- KeyedCallIC::GenerateMiss(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMiss");
- isolate()->counters()->call_megamorphic_stubs()->Increment();
+Handle<Code> StubCompiler::CompileStoreGeneric(Code::Flags flags) {
+ ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+ StrictModeFlag strict_mode = StoreIC::GetStrictMode(extra_state);
+ StoreIC::GenerateRuntimeSetProperty(masm(), strict_mode);
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
- *code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MISS, *code));
+ CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code));
return code;
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Code> StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
- Debug::GenerateCallICDebugBreak(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugBreak");
+Handle<Code> StubCompiler::CompileStoreMegamorphic(Code::Flags flags) {
+ StoreIC::GenerateMegamorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
- CALL_DEBUG_BREAK_TAG),
- *code, code->arguments_count()));
+ CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code));
return code;
}
-Handle<Code> StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
- // Use the same code for the the step in preparations as we do for the
- // miss case.
- int argc = Code::ExtractArgumentsCountFromFlags(flags);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- if (kind == Code::CALL_IC) {
- // For the debugger extra ic state is irrelevant.
- CallIC::GenerateMiss(masm(), argc, Code::kNoExtraICState);
- } else {
- KeyedCallIC::GenerateMiss(masm(), argc);
- }
- Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
- PROFILE(isolate(),
- CodeCreateEvent(
- CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
- *code,
- code->arguments_count()));
- return code;
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
#undef CALL_LOGGER_TAG
@@ -1140,6 +749,9 @@ Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
CodeDesc desc;
masm_.GetCode(&desc);
Handle<Code> code = factory()->NewCode(desc, flags, masm_.CodeObject());
+ if (code->has_major_key()) {
+ code->set_major_key(CodeStub::NoCache);
+ }
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) code->Disassemble(name);
#endif
@@ -1150,7 +762,7 @@ Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
Handle<Name> name) {
return (FLAG_print_code_stubs && !name.is_null() && name->IsString())
- ? GetCodeWithFlags(flags, *Handle<String>::cast(name)->ToCString())
+ ? GetCodeWithFlags(flags, Handle<String>::cast(name)->ToCString().get())
: GetCodeWithFlags(flags, NULL);
}
@@ -1169,167 +781,195 @@ void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
Register LoadStubCompiler::HandlerFrontendHeader(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Label* miss) {
- return CheckPrototypes(object, object_reg, holder,
- scratch1(), scratch2(), scratch3(),
- name, miss, SKIP_RECEIVER);
+ PrototypeCheckType check_type = CHECK_ALL_MAPS;
+ int function_index = -1;
+ if (type->Is(HeapType::String())) {
+ function_index = Context::STRING_FUNCTION_INDEX;
+ } else if (type->Is(HeapType::Symbol())) {
+ function_index = Context::SYMBOL_FUNCTION_INDEX;
+ } else if (type->Is(HeapType::Number())) {
+ function_index = Context::NUMBER_FUNCTION_INDEX;
+ } else if (type->Is(HeapType::Boolean())) {
+ // Booleans use the generic oddball map, so an additional check is needed to
+ // ensure the receiver is really a boolean.
+ GenerateBooleanCheck(object_reg, miss);
+ function_index = Context::BOOLEAN_FUNCTION_INDEX;
+ } else {
+ check_type = SKIP_RECEIVER;
+ }
+
+ if (check_type == CHECK_ALL_MAPS) {
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), function_index, scratch1(), miss);
+ Object* function = isolate()->native_context()->get(function_index);
+ Object* prototype = JSFunction::cast(function)->instance_prototype();
+ type = IC::CurrentTypeOf(handle(prototype, isolate()), isolate());
+ object_reg = scratch1();
+ }
+
+ // Check that the maps starting from the prototype haven't changed.
+ return CheckPrototypes(
+ type, object_reg, holder, scratch1(), scratch2(), scratch3(),
+ name, miss, check_type);
}
// HandlerFrontend for store uses the name register. It has to be restored
// before a miss.
Register StoreStubCompiler::HandlerFrontendHeader(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Label* miss) {
- return CheckPrototypes(object, object_reg, holder,
- this->name(), scratch1(), scratch2(),
- name, miss, SKIP_RECEIVER);
+ return CheckPrototypes(type, object_reg, holder, this->name(),
+ scratch1(), scratch2(), name, miss, SKIP_RECEIVER);
+}
+
+
+bool BaseLoadStoreStubCompiler::IncludesNumberType(TypeHandleList* types) {
+ for (int i = 0; i < types->length(); ++i) {
+ if (types->at(i)->Is(HeapType::Number())) return true;
+ }
+ return false;
}
-Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<JSObject> object,
+Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
- Handle<Name> name,
- Label* success) {
+ Handle<Name> name) {
Label miss;
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
+
+ HandlerFrontendFooter(name, &miss);
- HandlerFrontendFooter(name, success, &miss);
return reg;
}
-void LoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Label* success,
- Handle<JSGlobalObject> global) {
+void LoadStubCompiler::NonexistentHandlerFrontend(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
Label miss;
- Register holder =
- HandlerFrontendHeader(object, receiver(), last, name, &miss);
+ Register holder;
+ Handle<Map> last_map;
+ if (last.is_null()) {
+ holder = receiver();
+ last_map = IC::TypeToMap(*type, isolate());
+ // If |type| has null as its prototype, |last| is Handle<JSObject>::null().
+ ASSERT(last_map->prototype() == isolate()->heap()->null_value());
+ } else {
+ holder = HandlerFrontendHeader(type, receiver(), last, name, &miss);
+ last_map = handle(last->map());
+ }
- if (!last->HasFastProperties() &&
- !last->IsJSGlobalObject() &&
- !last->IsJSGlobalProxy()) {
+ if (last_map->is_dictionary_map() &&
+ !last_map->IsJSGlobalObjectMap() &&
+ !last_map->IsJSGlobalProxyMap()) {
if (!name->IsUniqueName()) {
ASSERT(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- ASSERT(last->property_dictionary()->FindEntry(*name) ==
- NameDictionary::kNotFound);
+ ASSERT(last.is_null() ||
+ last->property_dictionary()->FindEntry(*name) ==
+ NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), &miss, holder, name,
scratch2(), scratch3());
}
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
- if (!global.is_null()) {
+ if (last_map->IsJSGlobalObjectMap()) {
+ Handle<JSGlobalObject> global = last.is_null()
+ ? Handle<JSGlobalObject>::cast(type->AsConstant())
+ : Handle<JSGlobalObject>::cast(last);
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
- HandlerFrontendFooter(name, success, &miss);
+ HandlerFrontendFooter(name, &miss);
}
Handle<Code> LoadStubCompiler::CompileLoadField(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
PropertyIndex field,
Representation representation) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, receiver(), holder, name, &miss);
-
+ Register reg = HandlerFrontend(type, receiver(), holder, name);
GenerateLoadField(reg, holder, field, representation);
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
- return GetCode(kind(), Code::FIELD, name);
+ return GetCode(kind(), Code::FAST, name);
}
Handle<Code> LoadStubCompiler::CompileLoadConstant(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<Object> value) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ HandlerFrontend(type, receiver(), holder, name);
GenerateLoadConstant(value);
// Return the generated code.
- return GetCode(kind(), Code::CONSTANT, name);
+ return GetCode(kind(), Code::FAST, name);
}
Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label success;
-
Register reg = CallbackHandlerFrontend(
- object, receiver(), holder, name, &success, callback);
- __ bind(&success);
+ type, receiver(), holder, name, callback);
GenerateLoadCallback(reg, callback);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization) {
ASSERT(call_optimization.is_simple_api_call());
- Label success;
-
Handle<JSFunction> callback = call_optimization.constant_function();
- CallbackHandlerFrontend(
- object, receiver(), holder, name, &success, callback);
- __ bind(&success);
- GenerateLoadCallback(call_optimization);
-
+ CallbackHandlerFrontend(type, receiver(), holder, name, callback);
+ Handle<Map>receiver_map = IC::TypeToMap(*type, isolate());
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver_map,
+ receiver(), scratch1(), false, 0, NULL);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
Handle<Code> LoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name) {
- Label success;
-
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- Register reg = HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ Register reg = HandlerFrontend(type, receiver(), holder, name);
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
- GenerateLoadInterceptor(reg, object, holder, &lookup, name);
+ GenerateLoadInterceptor(reg, type, holder, &lookup, name);
// Return the generated code.
- return GetCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -1338,7 +978,6 @@ void LoadStubCompiler::GenerateLoadPostInterceptor(
Handle<JSObject> interceptor_holder,
Handle<Name> name,
LookupResult* lookup) {
- Label success;
Handle<JSObject> holder(lookup->holder());
if (lookup->IsField()) {
PropertyIndex field = lookup->GetFieldIndex();
@@ -1349,8 +988,8 @@ void LoadStubCompiler::GenerateLoadPostInterceptor(
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
Register reg = HandlerFrontend(
- interceptor_holder, interceptor_reg, holder, name, &success);
- __ bind(&success);
+ IC::CurrentTypeOf(interceptor_holder, isolate()),
+ interceptor_reg, holder, name);
GenerateLoadField(
reg, holder, field, lookup->representation());
}
@@ -1363,39 +1002,36 @@ void LoadStubCompiler::GenerateLoadPostInterceptor(
ASSERT(callback->getter() != NULL);
Register reg = CallbackHandlerFrontend(
- interceptor_holder, interceptor_reg, holder, name, &success, callback);
- __ bind(&success);
+ IC::CurrentTypeOf(interceptor_holder, isolate()),
+ interceptor_reg, holder, name, callback);
GenerateLoadCallback(reg, callback);
}
}
Handle<Code> BaseLoadStoreStubCompiler::CompileMonomorphicIC(
- Handle<Map> receiver_map,
+ Handle<HeapType> type,
Handle<Code> handler,
Handle<Name> name) {
- MapHandleList receiver_maps(1);
- receiver_maps.Add(receiver_map);
+ TypeHandleList types(1);
CodeHandleList handlers(1);
+ types.Add(type);
handlers.Add(handler);
- Code::StubType type = handler->type();
- return CompilePolymorphicIC(&receiver_maps, &handlers, name, type, PROPERTY);
+ Code::StubType stub_type = handler->type();
+ return CompilePolymorphicIC(&types, &handlers, name, stub_type, PROPERTY);
}
Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<JSFunction> getter) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
-
- __ bind(&success);
- GenerateLoadViaGetter(masm(), receiver(), getter);
+ HandlerFrontend(type, receiver(), holder, name);
+ GenerateLoadViaGetter(masm(), type, receiver(), getter);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -1423,8 +1059,8 @@ Handle<Code> StoreStubCompiler::CompileStoreTransition(
} while (holder->GetPrototype()->IsJSObject());
}
- Register holder_reg =
- HandlerFrontendHeader(object, receiver(), holder, name, &miss);
+ Register holder_reg = HandlerFrontendHeader(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name, &miss);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
@@ -1452,7 +1088,7 @@ Handle<Code> StoreStubCompiler::CompileStoreTransition(
TailCallBuiltin(masm(), SlowBuiltin(kind()));
// Return the generated code.
- return GetCode(kind(), Code::TRANSITION, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -1461,7 +1097,8 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
Handle<Name> name) {
Label miss;
- HandlerFrontendHeader(object, receiver(), object, name, &miss);
+ HandlerFrontendHeader(IC::CurrentTypeOf(object, isolate()),
+ receiver(), object, name, &miss);
// Generate store field code.
GenerateStoreField(masm(),
@@ -1475,7 +1112,7 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(kind(), Code::FIELD, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -1484,13 +1121,27 @@ Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
Handle<JSObject> holder,
Handle<Name> name,
Handle<JSFunction> setter) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
+ Handle<HeapType> type = IC::CurrentTypeOf(object, isolate());
+ HandlerFrontend(type, receiver(), holder, name);
+ GenerateStoreViaSetter(masm(), type, setter);
+
+ return GetCode(kind(), Code::FAST, name);
+}
- __ bind(&success);
- GenerateStoreViaSetter(masm(), setter);
- return GetCode(kind(), Code::CALLBACKS, name);
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, handle(object->map()),
+ receiver(), scratch1(), true, 1, values);
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
}
@@ -1498,14 +1149,16 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
Handle<Map> receiver_map) {
ElementsKind elements_kind = receiver_map->elements_kind();
if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
Handle<Code> stub = KeyedLoadFastElementStub(
receiver_map->instance_type() == JS_ARRAY_TYPE,
elements_kind).GetCode(isolate());
__ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
} else {
- Handle<Code> stub =
- KeyedLoadDictionaryElementStub().GetCode(isolate());
+ Handle<Code> stub = FLAG_compiled_keyed_dictionary_loads
+ ? KeyedLoadDictionaryElementStub().GetCode(isolate())
+ : KeyedLoadDictionaryElementPlatformStub().GetCode(isolate());
__ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
}
@@ -1522,15 +1175,16 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub;
if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
stub = KeyedStoreFastElementStub(
is_jsarray,
elements_kind,
- store_mode_).GetCode(isolate());
+ store_mode()).GetCode(isolate());
} else {
stub = KeyedStoreElementStub(is_jsarray,
elements_kind,
- store_mode_).GetCode(isolate());
+ store_mode()).GetCode(isolate());
}
__ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
@@ -1596,9 +1250,8 @@ Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind,
Handle<Code> BaseLoadStoreStubCompiler::GetCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name) {
- ASSERT(type != Code::NORMAL);
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state(), type, kind);
+ ASSERT_EQ(kNoExtraICState, extra_state());
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder_);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
JitEvent(name, code);
@@ -1614,12 +1267,15 @@ void KeyedLoadStubCompiler::CompileElementHandlers(MapHandleList* receiver_maps,
if ((receiver_map->instance_type() & kNotStringTag) == 0) {
cached_stub = isolate()->builtins()->KeyedLoadIC_String();
+ } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+ cached_stub = isolate()->builtins()->KeyedLoadIC_Slow();
} else {
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
ElementsKind elements_kind = receiver_map->elements_kind();
if (IsFastElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind)) {
+ IsExternalArrayElementsKind(elements_kind) ||
+ IsFixedTypedArrayElementsKind(elements_kind)) {
cached_stub =
KeyedLoadFastElementStub(is_js_array,
elements_kind).GetCode(isolate());
@@ -1657,19 +1313,22 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic(
elements_kind,
transitioned_map->elements_kind(),
is_js_array,
- store_mode_).GetCode(isolate());
+ store_mode()).GetCode(isolate());
+ } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+ cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
} else {
if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
cached_stub = KeyedStoreFastElementStub(
is_js_array,
elements_kind,
- store_mode_).GetCode(isolate());
+ store_mode()).GetCode(isolate());
} else {
cached_stub = KeyedStoreElementStub(
is_js_array,
elements_kind,
- store_mode_).GetCode(isolate());
+ store_mode()).GetCode(isolate());
}
}
ASSERT(!cached_stub.is_null());
@@ -1691,99 +1350,6 @@ void KeyedStoreStubCompiler::GenerateStoreDictionaryElement(
}
-CallStubCompiler::CallStubCompiler(Isolate* isolate,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- InlineCacheHolderFlag cache_holder)
- : StubCompiler(isolate),
- arguments_(argc),
- kind_(kind),
- extra_state_(extra_state),
- cache_holder_(cache_holder) {
-}
-
-
-bool CallStubCompiler::HasCustomCallGenerator(Handle<JSFunction> function) {
- if (function->shared()->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function->shared()->builtin_function_id();
-#define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
- CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
-#undef CALL_GENERATOR_CASE
- }
-
- CallOptimization optimization(function);
- return optimization.is_simple_api_call();
-}
-
-
-bool CallStubCompiler::CanBeCached(Handle<JSFunction> function) {
- if (function->shared()->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function->shared()->builtin_function_id();
-#define CALL_GENERATOR_CASE(name) if (id == k##name) return false;
- SITE_SPECIFIC_CALL_GENERATORS(CALL_GENERATOR_CASE)
-#undef CALL_GENERATOR_CASE
- }
-
- return true;
-}
-
-
-Handle<Code> CallStubCompiler::CompileCustomCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> fname,
- Code::StubType type) {
- ASSERT(HasCustomCallGenerator(function));
-
- if (function->shared()->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function->shared()->builtin_function_id();
-#define CALL_GENERATOR_CASE(name) \
- if (id == k##name) { \
- return CallStubCompiler::Compile##name##Call(object, \
- holder, \
- cell, \
- function, \
- fname, \
- type); \
- }
- CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
-#undef CALL_GENERATOR_CASE
- }
- CallOptimization optimization(function);
- ASSERT(optimization.is_simple_api_call());
- return CompileFastApiCall(optimization,
- object,
- holder,
- cell,
- function,
- fname);
-}
-
-
-Handle<Code> CallStubCompiler::GetCode(Code::StubType type,
- Handle<Name> name) {
- int argc = arguments_.immediate();
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
- extra_state_,
- type,
- argc,
- cache_holder_);
- return GetCodeWithFlags(flags, name);
-}
-
-
-Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
- Handle<String> function_name;
- if (function->shared()->name()->IsString()) {
- function_name = Handle<String>(String::cast(function->shared()->name()));
- }
- return GetCode(Code::CONSTANT, function_name);
-}
-
-
CallOptimization::CallOptimization(LookupResult* lookup) {
if (lookup->IsFound() &&
lookup->IsCacheable() &&
@@ -1801,20 +1367,63 @@ CallOptimization::CallOptimization(Handle<JSFunction> function) {
}
-int CallOptimization::GetPrototypeDepthOfExpectedType(
- Handle<JSObject> object,
- Handle<JSObject> holder) const {
+Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
+ Handle<Map> object_map,
+ HolderLookup* holder_lookup) const {
+ ASSERT(is_simple_api_call());
+ if (!object_map->IsJSObjectMap()) {
+ *holder_lookup = kHolderNotFound;
+ return Handle<JSObject>::null();
+ }
+ if (expected_receiver_type_.is_null() ||
+ expected_receiver_type_->IsTemplateFor(*object_map)) {
+ *holder_lookup = kHolderIsReceiver;
+ return Handle<JSObject>::null();
+ }
+ while (true) {
+ if (!object_map->prototype()->IsJSObject()) break;
+ Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
+ if (!prototype->map()->is_hidden_prototype()) break;
+ object_map = handle(prototype->map());
+ if (expected_receiver_type_->IsTemplateFor(*object_map)) {
+ *holder_lookup = kHolderFound;
+ return prototype;
+ }
+ }
+ *holder_lookup = kHolderNotFound;
+ return Handle<JSObject>::null();
+}
+
+
+bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
+ Handle<JSObject> holder) const {
ASSERT(is_simple_api_call());
- if (expected_receiver_type_.is_null()) return 0;
- int depth = 0;
- while (!object.is_identical_to(holder)) {
- if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
- object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
- if (!object->map()->is_hidden_prototype()) return kInvalidProtoDepth;
- ++depth;
+ if (!receiver->IsJSObject()) return false;
+ Handle<Map> map(JSObject::cast(*receiver)->map());
+ HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ LookupHolderOfExpectedType(map, &holder_lookup);
+ switch (holder_lookup) {
+ case kHolderNotFound:
+ return false;
+ case kHolderIsReceiver:
+ return true;
+ case kHolderFound:
+ if (api_holder.is_identical_to(holder)) return true;
+ // Check if holder is in prototype chain of api_holder.
+ {
+ JSObject* object = *api_holder;
+ while (true) {
+ Object* prototype = object->map()->prototype();
+ if (!prototype->IsJSObject()) return false;
+ if (prototype == *holder) return true;
+ object = JSObject::cast(prototype);
+ }
+ }
+ break;
}
- if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
- return kInvalidProtoDepth;
+ UNREACHABLE();
+ return false;
}
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 42685b2059..7eca6bb1d8 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -83,68 +83,21 @@ class StubCache {
Handle<Code> FindIC(Handle<Name> name,
Handle<Map> stub_holder_map,
Code::Kind kind,
- Code::ExtraICState extra_state = Code::kNoExtraICState);
-
- Handle<Code> FindIC(Handle<Name> name,
- Handle<JSObject> stub_holder,
- Code::Kind kind,
- Code::ExtraICState extra_state = Code::kNoExtraICState);
+ ExtraICState extra_state = kNoExtraICState,
+ InlineCacheHolderFlag cache_holder = OWN_MAP);
Handle<Code> FindHandler(Handle<Name> name,
- Handle<JSObject> receiver,
+ Handle<Map> map,
Code::Kind kind,
- StrictModeFlag strict_mode = kNonStrictMode);
+ InlineCacheHolderFlag cache_holder = OWN_MAP);
- Handle<Code> ComputeMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
+ Handle<Code> ComputeMonomorphicIC(Code::Kind kind,
Handle<Name> name,
- StrictModeFlag strict_mode);
-
- // Computes the right stub matching. Inserts the result in the
- // cache before returning. This might compile a stub if needed.
- Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
- Handle<JSObject> object);
-
- Handle<Code> ComputeLoadGlobal(Handle<Name> name,
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- bool is_dont_delete);
-
- // ---
+ Handle<HeapType> type,
+ Handle<Code> handler,
+ ExtraICState extra_ic_state);
- Handle<Code> ComputeKeyedLoadField(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex field_index,
- Representation representation);
-
- Handle<Code> ComputeKeyedLoadCallback(
- Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback);
-
- Handle<Code> ComputeKeyedLoadCallback(
- Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- const CallOptimization& call_optimization);
-
- Handle<Code> ComputeKeyedLoadConstant(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Object> value);
-
- Handle<Code> ComputeKeyedLoadInterceptor(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder);
-
- Handle<Code> ComputeStoreGlobal(Handle<Name> name,
- Handle<GlobalObject> object,
- Handle<PropertyCell> cell,
- Handle<Object> value,
- StrictModeFlag strict_mode);
+ Handle<Code> ComputeLoadNonexistent(Handle<Name> name, Handle<HeapType> type);
Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map);
@@ -152,61 +105,11 @@ class StubCache {
StrictModeFlag strict_mode,
KeyedAccessStoreMode store_mode);
- Handle<Code> ComputeCallField(int argc,
- Code::Kind,
- Code::ExtraICState extra_state,
- Handle<Name> name,
- Handle<Object> object,
- Handle<JSObject> holder,
- PropertyIndex index);
-
- Handle<Code> ComputeCallConstant(int argc,
- Code::Kind,
- Code::ExtraICState extra_state,
- Handle<Name> name,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function);
-
- Handle<Code> ComputeCallInterceptor(int argc,
- Code::Kind,
- Code::ExtraICState extra_state,
- Handle<Name> name,
- Handle<Object> object,
- Handle<JSObject> holder);
-
- Handle<Code> ComputeCallGlobal(int argc,
- Code::Kind,
- Code::ExtraICState extra_state,
- Handle<Name> name,
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function);
-
// ---
- Handle<Code> ComputeCallInitialize(int argc, RelocInfo::Mode mode);
-
- Handle<Code> ComputeKeyedCallInitialize(int argc);
-
- Handle<Code> ComputeCallPreMonomorphic(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state);
-
- Handle<Code> ComputeCallNormal(int argc,
- Code::Kind kind,
- Code::ExtraICState state);
-
- Handle<Code> ComputeCallArguments(int argc);
-
- Handle<Code> ComputeCallMegamorphic(int argc,
- Code::Kind kind,
- Code::ExtraICState state);
-
- Handle<Code> ComputeCallMiss(int argc,
- Code::Kind kind,
- Code::ExtraICState state);
+ Handle<Code> ComputeLoad(InlineCacheState ic_state, ExtraICState extra_state);
+ Handle<Code> ComputeStore(InlineCacheState ic_state,
+ ExtraICState extra_state);
// ---
@@ -220,20 +123,15 @@ class StubCache {
KeyedAccessStoreMode store_mode,
StrictModeFlag strict_mode);
- Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
+ Handle<Code> ComputePolymorphicIC(Code::Kind kind,
+ TypeHandleList* types,
CodeHandleList* handlers,
int number_of_valid_maps,
Handle<Name> name,
- StrictModeFlag strict_mode);
+ ExtraICState extra_ic_state);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
- Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind);
-
- Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
-#endif
+ Code* FindPreMonomorphicIC(Code::Kind kind, ExtraICState extra_ic_state);
// Update cache for entry hash(name, map).
Code* Set(Name* name, Map* map, Code* code);
@@ -310,10 +208,6 @@ class StubCache {
private:
explicit StubCache(Isolate* isolate);
- Handle<Code> ComputeCallInitialize(int argc,
- RelocInfo::Mode mode,
- Code::Kind kind);
-
// The stub cache has a primary and secondary level. The two levels have
// different hashing algorithms in order to avoid simultaneous collisions
// in both caches. Unlike a probing strategy (quadratic or otherwise) the
@@ -401,7 +295,6 @@ DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, CallInterceptorProperty);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
@@ -412,22 +305,19 @@ enum IcCheckType { ELEMENT, PROPERTY };
// The stub compilers compile stubs for the stub cache.
class StubCompiler BASE_EMBEDDED {
public:
- explicit StubCompiler(Isolate* isolate)
- : isolate_(isolate), masm_(isolate, NULL, 256), failure_(NULL) { }
-
- // Functions to compile either CallIC or KeyedCallIC. The specific kind
- // is extracted from the code flags.
- Handle<Code> CompileCallInitialize(Code::Flags flags);
- Handle<Code> CompileCallPreMonomorphic(Code::Flags flags);
- Handle<Code> CompileCallNormal(Code::Flags flags);
- Handle<Code> CompileCallMegamorphic(Code::Flags flags);
- Handle<Code> CompileCallArguments(Code::Flags flags);
- Handle<Code> CompileCallMiss(Code::Flags flags);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Handle<Code> CompileCallDebugBreak(Code::Flags flags);
- Handle<Code> CompileCallDebugPrepareStepIn(Code::Flags flags);
-#endif
+ explicit StubCompiler(Isolate* isolate,
+ ExtraICState extra_ic_state = kNoExtraICState)
+ : isolate_(isolate), extra_ic_state_(extra_ic_state),
+ masm_(isolate, NULL, 256), failure_(NULL) { }
+
+ Handle<Code> CompileLoadInitialize(Code::Flags flags);
+ Handle<Code> CompileLoadPreMonomorphic(Code::Flags flags);
+ Handle<Code> CompileLoadMegamorphic(Code::Flags flags);
+
+ Handle<Code> CompileStoreInitialize(Code::Flags flags);
+ Handle<Code> CompileStorePreMonomorphic(Code::Flags flags);
+ Handle<Code> CompileStoreGeneric(Code::Flags flags);
+ Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
// Static functions for generating parts of stubs.
static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
@@ -490,15 +380,6 @@ class StubCompiler BASE_EMBEDDED {
Register scratch,
Label* miss);
- // Calls GenerateCheckPropertyCell for each global object in the prototype
- // chain from object to (but not including) holder.
- static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss);
-
static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
// Generates code that verifies that the property holder has not changed
@@ -513,10 +394,7 @@ class StubCompiler BASE_EMBEDDED {
// register is only clobbered if it the same as the holder register. The
// function returns a register containing the holder - either object_reg or
// holder_reg.
- // The function can optionally (when save_at_depth !=
- // kInvalidProtoDepth) save the object at the given depth by moving
- // it to [esp + kPointerSize].
- Register CheckPrototypes(Handle<JSObject> object,
+ Register CheckPrototypes(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
@@ -524,27 +402,25 @@ class StubCompiler BASE_EMBEDDED {
Register scratch2,
Handle<Name> name,
Label* miss,
- PrototypeCheckType check = CHECK_ALL_MAPS) {
- return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
- scratch2, name, kInvalidProtoDepth, miss, check);
- }
-
- Register CheckPrototypes(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<Name> name,
- int save_at_depth,
- Label* miss,
PrototypeCheckType check = CHECK_ALL_MAPS);
+ void GenerateBooleanCheck(Register object, Label* miss);
+
+ static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch,
+ bool is_store,
+ int argc,
+ Register* values);
protected:
Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
+ ExtraICState extra_state() { return extra_ic_state_; }
+
MacroAssembler* masm() { return &masm_; }
void set_failure(Failure* failure) { failure_ = failure; }
@@ -560,6 +436,7 @@ class StubCompiler BASE_EMBEDDED {
private:
Isolate* isolate_;
+ const ExtraICState extra_ic_state_;
MacroAssembler masm_;
Failure* failure_;
};
@@ -570,26 +447,27 @@ enum FrontendCheckType { PERFORM_INITIAL_CHECKS, SKIP_INITIAL_CHECKS };
class BaseLoadStoreStubCompiler: public StubCompiler {
public:
- BaseLoadStoreStubCompiler(Isolate* isolate, Code::Kind kind)
- : StubCompiler(isolate), kind_(kind) {
+ BaseLoadStoreStubCompiler(Isolate* isolate,
+ Code::Kind kind,
+ ExtraICState extra_ic_state = kNoExtraICState,
+ InlineCacheHolderFlag cache_holder = OWN_MAP)
+ : StubCompiler(isolate, extra_ic_state),
+ kind_(kind),
+ cache_holder_(cache_holder) {
InitializeRegisters();
}
virtual ~BaseLoadStoreStubCompiler() { }
- Handle<Code> CompileMonomorphicIC(Handle<Map> receiver_map,
+ Handle<Code> CompileMonomorphicIC(Handle<HeapType> type,
Handle<Code> handler,
Handle<Name> name);
- Handle<Code> CompilePolymorphicIC(MapHandleList* receiver_maps,
+ Handle<Code> CompilePolymorphicIC(TypeHandleList* types,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
IcCheckType check);
- virtual void GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) { }
-
static Builtins::Name MissBuiltin(Code::Kind kind) {
switch (kind) {
case Code::LOAD_IC: return Builtins::kLoadIC_Miss;
@@ -602,21 +480,18 @@ class BaseLoadStoreStubCompiler: public StubCompiler {
}
protected:
- virtual Register HandlerFrontendHeader(Handle<JSObject> object,
+ virtual Register HandlerFrontendHeader(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Label* miss) = 0;
- virtual void HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) = 0;
+ virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss) = 0;
- Register HandlerFrontend(Handle<JSObject> object,
+ Register HandlerFrontend(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
- Handle<Name> name,
- Label* success);
+ Handle<Name> name);
Handle<Code> GetCode(Code::Kind kind,
Code::StubType type,
@@ -646,7 +521,6 @@ class BaseLoadStoreStubCompiler: public StubCompiler {
}
void JitEvent(Handle<Name> name, Handle<Code> code);
- virtual Code::ExtraICState extra_state() { return Code::kNoExtraICState; }
virtual Register receiver() = 0;
virtual Register name() = 0;
virtual Register scratch1() = 0;
@@ -655,57 +529,64 @@ class BaseLoadStoreStubCompiler: public StubCompiler {
void InitializeRegisters();
+ bool IncludesNumberType(TypeHandleList* types);
+
Code::Kind kind_;
+ InlineCacheHolderFlag cache_holder_;
Register* registers_;
};
class LoadStubCompiler: public BaseLoadStoreStubCompiler {
public:
- LoadStubCompiler(Isolate* isolate, Code::Kind kind = Code::LOAD_IC)
- : BaseLoadStoreStubCompiler(isolate, kind) { }
+ LoadStubCompiler(Isolate* isolate,
+ ExtraICState extra_ic_state = kNoExtraICState,
+ InlineCacheHolderFlag cache_holder = OWN_MAP,
+ Code::Kind kind = Code::LOAD_IC)
+ : BaseLoadStoreStubCompiler(isolate, kind, extra_ic_state,
+ cache_holder) { }
virtual ~LoadStubCompiler() { }
- Handle<Code> CompileLoadField(Handle<JSObject> object,
+ Handle<Code> CompileLoadField(Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
PropertyIndex index,
Representation representation);
- Handle<Code> CompileLoadCallback(Handle<JSObject> object,
+ Handle<Code> CompileLoadCallback(Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback);
- Handle<Code> CompileLoadCallback(Handle<JSObject> object,
+ Handle<Code> CompileLoadCallback(Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization);
- Handle<Code> CompileLoadConstant(Handle<JSObject> object,
+ Handle<Code> CompileLoadConstant(Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<Object> value);
- Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
+ Handle<Code> CompileLoadInterceptor(Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name);
- Handle<Code> CompileLoadViaGetter(Handle<JSObject> object,
+ Handle<Code> CompileLoadViaGetter(Handle<HeapType> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<JSFunction> getter);
static void GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Register receiver,
Handle<JSFunction> getter);
- Handle<Code> CompileLoadNonexistent(Handle<JSObject> object,
+ Handle<Code> CompileLoadNonexistent(Handle<HeapType> type,
Handle<JSObject> last,
- Handle<Name> name,
- Handle<JSGlobalObject> global);
+ Handle<Name> name);
- Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
+ Handle<Code> CompileLoadGlobal(Handle<HeapType> type,
Handle<GlobalObject> holder,
Handle<PropertyCell> cell,
Handle<Name> name,
@@ -714,27 +595,26 @@ class LoadStubCompiler: public BaseLoadStoreStubCompiler {
static Register* registers();
protected:
- virtual Register HandlerFrontendHeader(Handle<JSObject> object,
+ ContextualMode contextual_mode() {
+ return LoadIC::GetContextualMode(extra_state());
+ }
+
+ virtual Register HandlerFrontendHeader(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Label* miss);
- virtual void HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss);
+ virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss);
- Register CallbackHandlerFrontend(Handle<JSObject> object,
+ Register CallbackHandlerFrontend(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
- Label* success,
Handle<Object> callback);
- void NonexistentHandlerFrontend(Handle<JSObject> object,
+ void NonexistentHandlerFrontend(Handle<HeapType> type,
Handle<JSObject> last,
- Handle<Name> name,
- Label* success,
- Handle<JSGlobalObject> global);
+ Handle<Name> name);
void GenerateLoadField(Register reg,
Handle<JSObject> holder,
@@ -743,9 +623,10 @@ class LoadStubCompiler: public BaseLoadStoreStubCompiler {
void GenerateLoadConstant(Handle<Object> value);
void GenerateLoadCallback(Register reg,
Handle<ExecutableAccessorInfo> callback);
- void GenerateLoadCallback(const CallOptimization& call_optimization);
+ void GenerateLoadCallback(const CallOptimization& call_optimization,
+ Handle<Map> receiver_map);
void GenerateLoadInterceptor(Register holder_reg,
- Handle<JSObject> object,
+ Handle<Object> object,
Handle<JSObject> holder,
LookupResult* lookup,
Handle<Name> name);
@@ -765,8 +646,11 @@ class LoadStubCompiler: public BaseLoadStoreStubCompiler {
class KeyedLoadStubCompiler: public LoadStubCompiler {
public:
- explicit KeyedLoadStubCompiler(Isolate* isolate)
- : LoadStubCompiler(isolate, Code::KEYED_LOAD_IC) { }
+ KeyedLoadStubCompiler(Isolate* isolate,
+ ExtraICState extra_ic_state = kNoExtraICState,
+ InlineCacheHolderFlag cache_holder = OWN_MAP)
+ : LoadStubCompiler(isolate, extra_ic_state, cache_holder,
+ Code::KEYED_LOAD_IC) { }
Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
@@ -775,13 +659,8 @@ class KeyedLoadStubCompiler: public LoadStubCompiler {
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
- protected:
- static Register* registers();
-
private:
- virtual void GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss);
+ static Register* registers();
friend class BaseLoadStoreStubCompiler;
};
@@ -789,10 +668,9 @@ class KeyedLoadStubCompiler: public LoadStubCompiler {
class StoreStubCompiler: public BaseLoadStoreStubCompiler {
public:
StoreStubCompiler(Isolate* isolate,
- StrictModeFlag strict_mode,
+ ExtraICState extra_ic_state,
Code::Kind kind = Code::STORE_IC)
- : BaseLoadStoreStubCompiler(isolate, kind),
- strict_mode_(strict_mode) { }
+ : BaseLoadStoreStubCompiler(isolate, kind, extra_ic_state) {}
virtual ~StoreStubCompiler() { }
@@ -846,6 +724,7 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
const CallOptimization& call_optimization);
static void GenerateStoreViaSetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Handle<JSFunction> setter);
Handle<Code> CompileStoreViaSetter(Handle<JSObject> object,
@@ -866,15 +745,13 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
}
protected:
- virtual Register HandlerFrontendHeader(Handle<JSObject> object,
+ virtual Register HandlerFrontendHeader(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Label* miss);
- virtual void HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss);
+ virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss);
void GenerateRestoreName(MacroAssembler* masm,
Label* label,
Handle<Name> name);
@@ -885,14 +762,11 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
virtual Register scratch1() { return registers_[3]; }
virtual Register scratch2() { return registers_[4]; }
virtual Register scratch3() { return registers_[5]; }
- StrictModeFlag strict_mode() { return strict_mode_; }
- virtual Code::ExtraICState extra_state() { return strict_mode_; }
protected:
static Register* registers();
private:
- StrictModeFlag strict_mode_;
friend class BaseLoadStoreStubCompiler;
};
@@ -900,10 +774,8 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
class KeyedStoreStubCompiler: public StoreStubCompiler {
public:
KeyedStoreStubCompiler(Isolate* isolate,
- StrictModeFlag strict_mode,
- KeyedAccessStoreMode store_mode)
- : StoreStubCompiler(isolate, strict_mode, Code::KEYED_STORE_IC),
- store_mode_(store_mode) { }
+ ExtraICState extra_ic_state)
+ : StoreStubCompiler(isolate, extra_ic_state, Code::KEYED_STORE_IC) {}
Handle<Code> CompileStoreElement(Handle<Map> receiver_map);
@@ -915,138 +787,21 @@ class KeyedStoreStubCompiler: public StoreStubCompiler {
static void GenerateStoreDictionaryElement(MacroAssembler* masm);
- protected:
- virtual Code::ExtraICState extra_state() {
- return Code::ComputeExtraICState(store_mode_, strict_mode());
- }
+ private:
static Register* registers();
- private:
+ KeyedAccessStoreMode store_mode() {
+ return KeyedStoreIC::GetKeyedAccessStoreMode(extra_state());
+ }
+
Register transition_map() {
return registers()[3];
}
- virtual void GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss);
- KeyedAccessStoreMode store_mode_;
friend class BaseLoadStoreStubCompiler;
};
-// Subset of FUNCTIONS_WITH_ID_LIST with custom constant/global call
-// IC stubs.
-#define CUSTOM_CALL_IC_GENERATORS(V) \
- V(ArrayPush) \
- V(ArrayPop) \
- V(StringCharCodeAt) \
- V(StringCharAt) \
- V(StringFromCharCode) \
- V(MathFloor) \
- V(MathAbs) \
- V(ArrayCode)
-
-
-#define SITE_SPECIFIC_CALL_GENERATORS(V) \
- V(ArrayCode)
-
-
-class CallStubCompiler: public StubCompiler {
- public:
- CallStubCompiler(Isolate* isolate,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state,
- InlineCacheHolderFlag cache_holder = OWN_MAP);
-
- Handle<Code> CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<Name> name);
-
- void CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* success);
-
- void CompileHandlerBackend(Handle<JSFunction> function);
-
- Handle<Code> CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Handle<JSFunction> function);
-
- Handle<Code> CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name);
-
- Handle<Code> CompileCallGlobal(Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function,
- Handle<Name> name);
-
- static bool HasCustomCallGenerator(Handle<JSFunction> function);
- static bool CanBeCached(Handle<JSFunction> function);
-
- private:
- // Compiles a custom call constant/global IC. For constant calls cell is
- // NULL. Returns an empty handle if there is no custom call code for the
- // given function.
- Handle<Code> CompileCustomCall(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type);
-
-#define DECLARE_CALL_GENERATOR(name) \
- Handle<Code> Compile##name##Call(Handle<Object> object, \
- Handle<JSObject> holder, \
- Handle<Cell> cell, \
- Handle<JSFunction> function, \
- Handle<String> fname, \
- Code::StubType type);
- CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
-#undef DECLARE_CALL_GENERATOR
-
- Handle<Code> CompileFastApiCall(const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name);
-
- Handle<Code> GetCode(Code::StubType type, Handle<Name> name);
- Handle<Code> GetCode(Handle<JSFunction> function);
-
- const ParameterCount& arguments() { return arguments_; }
-
- void GenerateNameCheck(Handle<Name> name, Label* miss);
-
- void GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* miss);
-
- // Generates code to load the function from the cell checking that
- // it still contains the same function.
- void GenerateLoadFunctionFromCell(Handle<Cell> cell,
- Handle<JSFunction> function,
- Label* miss);
-
- // Generates a jump to CallIC miss stub.
- void GenerateMissBranch();
-
- const ParameterCount arguments_;
- const Code::Kind kind_;
- const Code::ExtraICState extra_state_;
- const InlineCacheHolderFlag cache_holder_;
-};
-
-
// Holds information about possible function call optimizations.
class CallOptimization BASE_EMBEDDED {
public:
@@ -1077,16 +832,18 @@ class CallOptimization BASE_EMBEDDED {
return api_call_info_;
}
- // Returns the depth of the object having the expected type in the
- // prototype chain between the two arguments.
- int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
- Handle<JSObject> holder) const;
+ enum HolderLookup {
+ kHolderNotFound,
+ kHolderIsReceiver,
+ kHolderFound
+ };
+ Handle<JSObject> LookupHolderOfExpectedType(
+ Handle<Map> receiver_map,
+ HolderLookup* holder_lookup) const;
- bool IsCompatibleReceiver(Object* receiver) {
- ASSERT(is_simple_api_call());
- if (expected_receiver_type_.is_null()) return true;
- return receiver->IsInstanceOf(*expected_receiver_type_);
- }
+ // Check if the api holder is between the receiver and the holder.
+ bool IsCompatibleReceiver(Handle<Object> receiver,
+ Handle<JSObject> holder) const;
private:
void Initialize(Handle<JSFunction> function);
diff --git a/deps/v8/src/sweeper-thread.cc b/deps/v8/src/sweeper-thread.cc
index 58c684a54f..7e8305abe8 100644
--- a/deps/v8/src/sweeper-thread.cc
+++ b/deps/v8/src/sweeper-thread.cc
@@ -44,12 +44,8 @@ SweeperThread::SweeperThread(Isolate* isolate)
collector_(heap_->mark_compact_collector()),
start_sweeping_semaphore_(0),
end_sweeping_semaphore_(0),
- stop_semaphore_(0),
- free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
- free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
- private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
- private_free_list_old_pointer_space_(
- heap_->paged_space(OLD_POINTER_SPACE)) {
+ stop_semaphore_(0) {
+ ASSERT(!FLAG_job_based_sweeping);
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
}
@@ -68,27 +64,13 @@ void SweeperThread::Run() {
return;
}
- collector_->SweepInParallel(heap_->old_data_space(),
- &private_free_list_old_data_space_,
- &free_list_old_data_space_);
- collector_->SweepInParallel(heap_->old_pointer_space(),
- &private_free_list_old_pointer_space_,
- &free_list_old_pointer_space_);
+ collector_->SweepInParallel(heap_->old_data_space());
+ collector_->SweepInParallel(heap_->old_pointer_space());
end_sweeping_semaphore_.Signal();
}
}
-intptr_t SweeperThread::StealMemory(PagedSpace* space) {
- if (space->identity() == OLD_POINTER_SPACE) {
- return space->free_list()->Concatenate(&free_list_old_pointer_space_);
- } else if (space->identity() == OLD_DATA_SPACE) {
- return space->free_list()->Concatenate(&free_list_old_data_space_);
- }
- return 0;
-}
-
-
void SweeperThread::Stop() {
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
start_sweeping_semaphore_.Signal();
@@ -105,4 +87,14 @@ void SweeperThread::StartSweeping() {
void SweeperThread::WaitForSweeperThread() {
end_sweeping_semaphore_.Wait();
}
+
+
+int SweeperThread::NumberOfThreads(int max_available) {
+ if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) return 0;
+ if (FLAG_sweeper_threads > 0) return FLAG_sweeper_threads;
+ if (FLAG_concurrent_sweeping) return max_available - 1;
+ ASSERT(FLAG_parallel_sweeping);
+ return max_available;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/sweeper-thread.h b/deps/v8/src/sweeper-thread.h
index c36cfc39a2..3f7917b03f 100644
--- a/deps/v8/src/sweeper-thread.h
+++ b/deps/v8/src/sweeper-thread.h
@@ -49,7 +49,8 @@ class SweeperThread : public Thread {
void Stop();
void StartSweeping();
void WaitForSweeperThread();
- intptr_t StealMemory(PagedSpace* space);
+
+ static int NumberOfThreads(int max_available);
private:
Isolate* isolate_;
@@ -58,10 +59,6 @@ class SweeperThread : public Thread {
Semaphore start_sweeping_semaphore_;
Semaphore end_sweeping_semaphore_;
Semaphore stop_semaphore_;
- FreeList free_list_old_data_space_;
- FreeList free_list_old_pointer_space_;
- FreeList private_free_list_old_data_space_;
- FreeList private_free_list_old_pointer_space_;
volatile AtomicWord stop_thread_;
};
diff --git a/deps/v8/src/symbol.js b/deps/v8/src/symbol.js
index 050e7d918a..be308d947c 100644
--- a/deps/v8/src/symbol.js
+++ b/deps/v8/src/symbol.js
@@ -68,6 +68,20 @@ function SymbolValueOf() {
return %_ValueOf(this);
}
+
+// ES6 19.1.2.8
+function ObjectGetOwnPropertySymbols(obj) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("called_on_non_object",
+ ["Object.getOwnPropertySymbols"]);
+ }
+
+ // TODO(arv): Proxies use a shared trap for String and Symbol keys.
+
+ return ObjectGetOwnPropertyKeys(obj, true);
+}
+
+
//-------------------------------------------------------------------
function SetUpSymbol() {
@@ -85,3 +99,14 @@ function SetUpSymbol() {
}
SetUpSymbol();
+
+
+function ExtendObject() {
+ %CheckIsBootstrapping();
+
+ InstallFunctions($Object, DONT_ENUM, $Array(
+ "getOwnPropertySymbols", ObjectGetOwnPropertySymbols
+ ));
+}
+
+ExtendObject();
diff --git a/deps/v8/src/third_party/valgrind/valgrind.h b/deps/v8/src/third_party/valgrind/valgrind.h
index 7a3ee2f1fb..fa3f53675e 100644
--- a/deps/v8/src/third_party/valgrind/valgrind.h
+++ b/deps/v8/src/third_party/valgrind/valgrind.h
@@ -21,16 +21,16 @@
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- 2. The origin of this software must not be misrepresented; you must
- not claim that you wrote the original software. If you use this
- software in a product, an acknowledgment in the product
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
- 4. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
@@ -52,13 +52,13 @@
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
- ----------------------------------------------------------------
+ ----------------------------------------------------------------
*/
/* This file is for inclusion into client (your!) code.
- You can use these macros to manipulate and query Valgrind's
+ You can use these macros to manipulate and query Valgrind's
execution inside your own programs.
The resulting executables will still run without Valgrind, just a
@@ -194,8 +194,8 @@
this is executed not under Valgrind. Args are passed in a memory
block, and so there's no intrinsic limit to the number that could
be passed, but it's currently five.
-
- The macro args are:
+
+ The macro args are:
_zzq_rlval result lvalue
_zzq_default default value (result returned when running on real CPU)
_zzq_request request code
@@ -222,7 +222,7 @@
|| (defined(PLAT_x86_win32) && defined(__GNUC__))
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
@@ -277,7 +277,7 @@ typedef
#if defined(PLAT_x86_win32) && !defined(__GNUC__)
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
@@ -343,7 +343,7 @@ valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
typedef
- struct {
+ struct {
uint64_t nraddr; /* where's the code? */
}
OrigFn;
@@ -398,7 +398,7 @@ typedef
#if defined(PLAT_ppc32_linux)
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
@@ -459,7 +459,7 @@ typedef
#if defined(PLAT_ppc64_linux)
typedef
- struct {
+ struct {
uint64_t nraddr; /* where's the code? */
uint64_t r2; /* what tocptr do we need? */
}
@@ -526,7 +526,7 @@ typedef
#if defined(PLAT_arm_linux)
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
@@ -1709,7 +1709,7 @@ typedef
"r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
-/* These CALL_FN_ macros assume that on ppc32-linux,
+/* These CALL_FN_ macros assume that on ppc32-linux,
sizeof(unsigned long) == 4. */
#define CALL_FN_W_v(lval, orig) \
@@ -3581,7 +3581,7 @@ typedef
#define VG_IS_TOOL_USERREQ(a, b, v) \
(VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
-/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
@@ -3710,7 +3710,7 @@ VALGRIND_PRINTF(const char *format, ...)
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_VALIST_BY_REF,
(unsigned long)format,
- (unsigned long)&vargs,
+ (unsigned long)&vargs,
0, 0, 0);
#endif
va_end(vargs);
@@ -3748,7 +3748,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
(unsigned long)format,
- (unsigned long)&vargs,
+ (unsigned long)&vargs,
0, 0, 0);
#endif
va_end(vargs);
@@ -3759,7 +3759,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
/* These requests allow control to move from the simulated CPU to the
real CPU, calling an arbitary function.
-
+
Note that the current ThreadId is inserted as the first argument.
So this call:
@@ -3845,7 +3845,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
- It marks the block as being addressable and undefined (if 'is_zeroed' is
not set), or addressable and defined (if 'is_zeroed' is set). This
controls how accesses to the block by the program are handled.
-
+
'addr' is the start of the usable block (ie. after any
redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
can apply redzones -- these are blocks of padding at the start and end of
@@ -3853,7 +3853,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
zeroed (or filled with another predictable value), as is the case for
calloc().
-
+
VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
heap block -- that will be used by the client program -- is allocated.
It's best to put it at the outermost level of the allocator if possible;
diff --git a/deps/v8/src/third_party/vtune/jitprofiling.cc b/deps/v8/src/third_party/vtune/jitprofiling.cc
index b3952b3216..40282903fc 100644
--- a/deps/v8/src/third_party/vtune/jitprofiling.cc
+++ b/deps/v8/src/third_party/vtune/jitprofiling.cc
@@ -103,12 +103,12 @@ static iJIT_IsProfilingActiveFlags executionMode = iJIT_NOTHING_RUNNING;
/* end collector dll part. */
-/* loadiJIT_Funcs() : this function is called just in the beginning and is responsible
+/* loadiJIT_Funcs() : this function is called just in the beginning and is responsible
** to load the functions from BistroJavaCollector.dll
** result:
** on success: the functions loads, iJIT_DLL_is_missing=0, return value = 1.
** on failure: the functions are NULL, iJIT_DLL_is_missing=1, return value = 0.
-*/
+*/
static int loadiJIT_Funcs(void);
/* global representing whether the BistroJavaCollector can't be loaded */
@@ -129,7 +129,7 @@ static pthread_key_t threadLocalStorageHandle = (pthread_key_t)0;
#define INIT_TOP_Stack 10000
-typedef struct
+typedef struct
{
unsigned int TopStack;
unsigned int CurrentStack;
@@ -139,9 +139,9 @@ typedef struct
/*
** The function for reporting virtual-machine related events to VTune.
-** Note: when reporting iJVM_EVENT_TYPE_ENTER_NIDS, there is no need to fill in the stack_id
+** Note: when reporting iJVM_EVENT_TYPE_ENTER_NIDS, there is no need to fill in the stack_id
** field in the iJIT_Method_NIDS structure, as VTune fills it.
-**
+**
** The return value in iJVM_EVENT_TYPE_ENTER_NIDS && iJVM_EVENT_TYPE_LEAVE_NIDS events
** will be 0 in case of failure.
** in iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED event it will be -1 if EventSpecificData == 0
@@ -153,7 +153,7 @@ ITT_EXTERN_C int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventS
int ReturnValue;
/*******************************************************************************
- ** This section is for debugging outside of VTune.
+ ** This section is for debugging outside of VTune.
** It creates the environment variables that indicates call graph mode.
** If running outside of VTune remove the remark.
**
@@ -170,22 +170,22 @@ ITT_EXTERN_C int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventS
*******************************************************************************/
/* initialization part - the functions have not been loaded yet. This part
- ** will load the functions, and check if we are in Call Graph mode.
+ ** will load the functions, and check if we are in Call Graph mode.
** (for special treatment).
*/
- if (!FUNC_NotifyEvent)
+ if (!FUNC_NotifyEvent)
{
- if (iJIT_DLL_is_missing)
+ if (iJIT_DLL_is_missing)
return 0;
// load the Function from the DLL
- if (!loadiJIT_Funcs())
+ if (!loadiJIT_Funcs())
return 0;
/* Call Graph initialization. */
}
- /* If the event is method entry/exit, check that in the current mode
+ /* If the event is method entry/exit, check that in the current mode
** VTune is allowed to receive it
*/
if ((event_type == iJVM_EVENT_TYPE_ENTER_NIDS || event_type == iJVM_EVENT_TYPE_LEAVE_NIDS) &&
@@ -194,7 +194,7 @@ ITT_EXTERN_C int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventS
return 0;
}
/* This section is performed when method enter event occurs.
- ** It updates the virtual stack, or creates it if this is the first
+ ** It updates the virtual stack, or creates it if this is the first
** method entry in the thread. The stack pointer is decreased.
*/
if (event_type == iJVM_EVENT_TYPE_ENTER_NIDS)
@@ -263,7 +263,7 @@ ITT_EXTERN_C int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventS
return 0;
}
- ReturnValue = (int)FUNC_NotifyEvent(event_type, EventSpecificData);
+ ReturnValue = (int)FUNC_NotifyEvent(event_type, EventSpecificData);
return ReturnValue;
}
@@ -296,7 +296,7 @@ ITT_EXTERN_C iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive()
/* this function loads the collector dll (BistroJavaCollector) and the relevant functions.
** on success: all functions load, iJIT_DLL_is_missing = 0, return value = 1.
** on failure: all functions are NULL, iJIT_DLL_is_missing = 1, return value = 0.
-*/
+*/
static int loadiJIT_Funcs()
{
static int bDllWasLoaded = 0;
@@ -314,7 +314,7 @@ static int loadiJIT_Funcs()
iJIT_DLL_is_missing = 1;
FUNC_NotifyEvent = NULL;
- if (m_libHandle)
+ if (m_libHandle)
{
#if ITT_PLATFORM==ITT_PLATFORM_WIN
FreeLibrary(m_libHandle);
@@ -390,7 +390,7 @@ static int loadiJIT_Funcs()
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
FUNC_NotifyEvent = reinterpret_cast<TPNotify>(reinterpret_cast<intptr_t>(dlsym(m_libHandle, "NotifyEvent")));
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
- if (!FUNC_NotifyEvent)
+ if (!FUNC_NotifyEvent)
{
FUNC_Initialize = NULL;
return 0;
@@ -401,7 +401,7 @@ static int loadiJIT_Funcs()
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
FUNC_Initialize = reinterpret_cast<TPInitialize>(reinterpret_cast<intptr_t>(dlsym(m_libHandle, "Initialize")));
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
- if (!FUNC_Initialize)
+ if (!FUNC_Initialize)
{
FUNC_NotifyEvent = NULL;
return 0;
@@ -433,7 +433,7 @@ static int loadiJIT_Funcs()
}
/*
-** This function should be called by the user whenever a thread ends, to free the thread
+** This function should be called by the user whenever a thread ends, to free the thread
** "virtual stack" storage
*/
ITT_EXTERN_C void JITAPI FinalizeThread()
@@ -464,7 +464,7 @@ ITT_EXTERN_C void JITAPI FinalizeThread()
*/
ITT_EXTERN_C void JITAPI FinalizeProcess()
{
- if (m_libHandle)
+ if (m_libHandle)
{
#if ITT_PLATFORM==ITT_PLATFORM_WIN
FreeLibrary(m_libHandle);
@@ -484,7 +484,7 @@ ITT_EXTERN_C void JITAPI FinalizeProcess()
/*
** This function should be called by the user for any method once.
-** The function will return a unique method ID, the user should maintain the ID for each
+** The function will return a unique method ID, the user should maintain the ID for each
** method
*/
ITT_EXTERN_C unsigned int JITAPI iJIT_GetNewMethodID()
diff --git a/deps/v8/src/third_party/vtune/jitprofiling.h b/deps/v8/src/third_party/vtune/jitprofiling.h
index abd6d8ca78..193f243851 100644
--- a/deps/v8/src/third_party/vtune/jitprofiling.h
+++ b/deps/v8/src/third_party/vtune/jitprofiling.h
@@ -67,54 +67,54 @@ typedef enum iJIT_jvm_event
{
/* shutdown */
-
- /*
+
+ /*
* Program exiting EventSpecificData NA
*/
- iJVM_EVENT_TYPE_SHUTDOWN = 2,
+ iJVM_EVENT_TYPE_SHUTDOWN = 2,
/* JIT profiling */
-
- /*
+
+ /*
* issued after method code jitted into memory but before code is executed
* EventSpecificData is an iJIT_Method_Load
*/
- iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
+ iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
- /* issued before unload. Method code will no longer be executed, but code
- * and info are still in memory. The VTune profiler may capture method
+ /* issued before unload. Method code will no longer be executed, but code
+ * and info are still in memory. The VTune profiler may capture method
* code only at this point EventSpecificData is iJIT_Method_Id
*/
- iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
+ iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
/* Method Profiling */
- /* method name, Id and stack is supplied
- * issued when a method is about to be entered EventSpecificData is
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be entered EventSpecificData is
* iJIT_Method_NIDS
*/
- iJVM_EVENT_TYPE_ENTER_NIDS = 19,
+ iJVM_EVENT_TYPE_ENTER_NIDS = 19,
- /* method name, Id and stack is supplied
- * issued when a method is about to be left EventSpecificData is
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be left EventSpecificData is
* iJIT_Method_NIDS
*/
- iJVM_EVENT_TYPE_LEAVE_NIDS
+ iJVM_EVENT_TYPE_LEAVE_NIDS
} iJIT_JVM_EVENT;
typedef enum _iJIT_ModeFlags
{
/* No need to Notify VTune, since VTune is not running */
- iJIT_NO_NOTIFICATIONS = 0x0000,
+ iJIT_NO_NOTIFICATIONS = 0x0000,
- /* when turned on the jit must call
+ /* when turned on the jit must call
* iJIT_NotifyEvent
* (
* iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
* )
* for all the method already jitted
*/
- iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
+ iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
/* when turned on the jit must call
* iJIT_NotifyEvent
@@ -122,19 +122,19 @@ typedef enum _iJIT_ModeFlags
* iJVM_EVENT_TYPE_METHOD_UNLOAD_FINISHED,
* ) for all the method that are unloaded
*/
- iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
+ iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
/* when turned on the jit must instrument all
* the currently jited code with calls on
* method entries
*/
- iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
+ iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
/* when turned on the jit must instrument all
* the currently jited code with calls
* on method exit
*/
- iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
+ iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
} iJIT_ModeFlags;
@@ -143,13 +143,13 @@ typedef enum _iJIT_ModeFlags
typedef enum _iJIT_IsProfilingActiveFlags
{
/* No profiler is running. Currently not used */
- iJIT_NOTHING_RUNNING = 0x0000,
+ iJIT_NOTHING_RUNNING = 0x0000,
/* Sampling is running. This is the default value
* returned by iJIT_IsProfilingActive()
*/
- iJIT_SAMPLING_ON = 0x0001,
-
+ iJIT_SAMPLING_ON = 0x0001,
+
/* Call Graph is running */
iJIT_CALLGRAPH_ON = 0x0002
@@ -174,7 +174,7 @@ typedef struct _iJIT_Method_Id
/* Id of the method (same as the one passed in
* the iJIT_Method_Load struct
*/
- unsigned int method_id;
+ unsigned int method_id;
} *piJIT_Method_Id, iJIT_Method_Id;
@@ -188,13 +188,13 @@ typedef struct _iJIT_Method_Id
typedef struct _iJIT_Method_NIDS
{
/* unique method ID */
- unsigned int method_id;
+ unsigned int method_id;
/* NOTE: no need to fill this field, it's filled by VTune */
- unsigned int stack_id;
+ unsigned int stack_id;
/* method name (just the method, without the class) */
- char* method_name;
+ char* method_name;
} *piJIT_Method_NIDS, iJIT_Method_NIDS;
/* structures for the events:
@@ -204,54 +204,54 @@ typedef struct _iJIT_Method_NIDS
typedef struct _LineNumberInfo
{
/* x86 Offset from the begining of the method*/
- unsigned int Offset;
-
+ unsigned int Offset;
+
/* source line number from the begining of the source file */
- unsigned int LineNumber;
+ unsigned int LineNumber;
} *pLineNumberInfo, LineNumberInfo;
typedef struct _iJIT_Method_Load
{
/* unique method ID - can be any unique value, (except 0 - 999) */
- unsigned int method_id;
+ unsigned int method_id;
/* method name (can be with or without the class and signature, in any case
* the class name will be added to it)
*/
- char* method_name;
+ char* method_name;
/* virtual address of that method - This determines the method range for the
* iJVM_EVENT_TYPE_ENTER/LEAVE_METHOD_ADDR events
*/
- void* method_load_address;
+ void* method_load_address;
/* Size in memory - Must be exact */
- unsigned int method_size;
+ unsigned int method_size;
/* Line Table size in number of entries - Zero if none */
- unsigned int line_number_size;
-
+ unsigned int line_number_size;
+
/* Pointer to the begining of the line numbers info array */
- pLineNumberInfo line_number_table;
+ pLineNumberInfo line_number_table;
/* unique class ID */
- unsigned int class_id;
-
+ unsigned int class_id;
+
/* class file name */
- char* class_file_name;
+ char* class_file_name;
/* source file name */
- char* source_file_name;
+ char* source_file_name;
/* bits supplied by the user for saving in the JIT file */
- void* user_data;
+ void* user_data;
/* the size of the user data buffer */
- unsigned int user_data_size;
+ unsigned int user_data_size;
/* NOTE: no need to fill this field, it's filled by VTune */
- iJDEnvironmentType env;
+ iJDEnvironmentType env;
} *piJIT_Method_Load, iJIT_Method_Load;
@@ -280,7 +280,7 @@ typedef void (*iJIT_ModeChangedEx)(void *UserData, iJIT_ModeFlags Flags);
int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData);
/* The new mode call back routine */
-void JITAPI iJIT_RegisterCallbackEx(void *userdata,
+void JITAPI iJIT_RegisterCallbackEx(void *userdata,
iJIT_ModeChangedEx NewModeCallBackFuncEx);
iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void);
diff --git a/deps/v8/src/third_party/vtune/v8-vtune.h b/deps/v8/src/third_party/vtune/v8-vtune.h
index 29ea3eacd8..c60b303b3a 100644
--- a/deps/v8/src/third_party/vtune/v8-vtune.h
+++ b/deps/v8/src/third_party/vtune/v8-vtune.h
@@ -1,38 +1,38 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
-
+
GPL LICENSE SUMMARY
-
+
Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
-
+
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution
in the file called LICENSE.GPL.
-
+
Contact Information:
http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
-
+
BSD LICENSE
-
+
Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
-
+
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
@@ -42,7 +42,7 @@
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
-
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
index 93de7efbb9..ea897e58e4 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.cc
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -193,7 +193,7 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
jmethod.method_name = temp_method_name;
Handle<Script> script = event->script;
-
+
if (*script != NULL) {
// Get the source file name and set it to jmethod.source_file_name
if ((*script->GetScriptName())->IsString()) {
@@ -228,7 +228,7 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
}
GetEntries()->erase(event->code_start);
}
- }
+ }
iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
reinterpret_cast<void*>(&jmethod));
@@ -261,11 +261,11 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
case v8::JitCodeEvent::CODE_END_LINE_INFO_RECORDING: {
GetEntries()->insert(std::pair <void*, void*>(event->code_start, event->user_data));
break;
- }
+ }
default:
break;
}
- }
+ }
return;
}
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.h b/deps/v8/src/third_party/vtune/vtune-jit.h
index 42b8c3da1f..15011bf05e 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.h
+++ b/deps/v8/src/third_party/vtune/vtune-jit.h
@@ -1,38 +1,38 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
-
+
GPL LICENSE SUMMARY
-
+
Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
-
+
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution
in the file called LICENSE.GPL.
-
+
Contact Information:
http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
-
+
BSD LICENSE
-
+
Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
-
+
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
@@ -42,7 +42,7 @@
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
-
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index 992adaa77c..39bcc24074 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -213,6 +213,10 @@ class Token {
return COMMA <= op && op <= MOD;
}
+ static bool IsTruncatingBinaryOp(Value op) {
+ return BIT_OR <= op && op <= ROR;
+ }
+
static bool IsCompareOp(Value op) {
return EQ <= op && op <= IN;
}
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index c4825fcf73..7895117137 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -115,9 +115,7 @@ Object** TransitionArray::GetPrototypeTransitionsSlot() {
Object** TransitionArray::GetKeySlot(int transition_number) {
ASSERT(!IsSimpleTransition());
ASSERT(transition_number < number_of_transitions());
- return HeapObject::RawField(
- reinterpret_cast<HeapObject*>(this),
- OffsetOfElementAt(ToKeyIndex(transition_number)));
+ return RawFieldOfElementAt(ToKeyIndex(transition_number));
}
@@ -162,9 +160,7 @@ void TransitionArray::SetTarget(int transition_number, Map* value) {
PropertyDetails TransitionArray::GetTargetDetails(int transition_number) {
Map* map = GetTarget(transition_number);
- DescriptorArray* descriptors = map->instance_descriptors();
- int descriptor = map->LastAdded();
- return descriptors->GetDetails(descriptor);
+ return map->GetLastDescriptorDetails();
}
diff --git a/deps/v8/src/trig-table.h b/deps/v8/src/trig-table.h
new file mode 100644
index 0000000000..081c0389ae
--- /dev/null
+++ b/deps/v8/src/trig-table.h
@@ -0,0 +1,61 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TRIG_TABLE_H_
+#define V8_TRIG_TABLE_H_
+
+
+namespace v8 {
+namespace internal {
+
+class TrigonometricLookupTable : public AllStatic {
+ public:
+ // Casting away const-ness to use as argument for typed array constructor.
+ static void* sin_table() {
+ return const_cast<double*>(&kSinTable[0]);
+ }
+
+ static void* cos_x_interval_table() {
+ return const_cast<double*>(&kCosXIntervalTable[0]);
+ }
+
+ static double samples_over_pi_half() { return kSamplesOverPiHalf; }
+ static int samples() { return kSamples; }
+ static int table_num_bytes() { return kTableSize * sizeof(*kSinTable); }
+ static int table_size() { return kTableSize; }
+
+ private:
+ static const double kSinTable[];
+ static const double kCosXIntervalTable[];
+ static const int kSamples;
+ static const int kTableSize;
+ static const double kSamplesOverPiHalf;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_TRIG_TABLE_H_
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 65d1364058..7372693dfa 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -42,27 +42,17 @@ namespace v8 {
namespace internal {
-TypeInfo TypeInfo::FromValue(Handle<Object> value) {
- if (value->IsSmi()) {
- return TypeInfo::Smi();
- } else if (value->IsHeapNumber()) {
- return TypeInfo::IsInt32Double(HeapNumber::cast(*value)->value())
- ? TypeInfo::Integer32()
- : TypeInfo::Double();
- } else if (value->IsString()) {
- return TypeInfo::String();
- }
- return TypeInfo::Unknown();
-}
-
-
TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
Handle<Context> native_context,
- Isolate* isolate,
Zone* zone)
: native_context_(native_context),
- isolate_(isolate),
zone_(zone) {
+ Object* raw_info = code->type_feedback_info();
+ if (raw_info->IsTypeFeedbackInfo()) {
+ feedback_vector_ = Handle<FixedArray>(TypeFeedbackInfo::cast(raw_info)->
+ feedback_vector());
+ }
+
BuildDictionary(code);
ASSERT(dictionary_->IsDictionary());
}
@@ -79,117 +69,48 @@ Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
Object* value = dictionary_->ValueAt(entry);
if (value->IsCell()) {
Cell* cell = Cell::cast(value);
- return Handle<Object>(cell->value(), isolate_);
+ return Handle<Object>(cell->value(), isolate());
} else {
- return Handle<Object>(value, isolate_);
+ return Handle<Object>(value, isolate());
}
}
- return Handle<Object>::cast(isolate_->factory()->undefined_value());
+ return Handle<Object>::cast(isolate()->factory()->undefined_value());
}
-Handle<Cell> TypeFeedbackOracle::GetInfoCell(
- TypeFeedbackId ast_id) {
- int entry = dictionary_->FindEntry(IdToKey(ast_id));
- if (entry != UnseededNumberDictionary::kNotFound) {
- Cell* cell = Cell::cast(dictionary_->ValueAt(entry));
- return Handle<Cell>(cell, isolate_);
+Handle<Object> TypeFeedbackOracle::GetInfo(int slot) {
+ ASSERT(slot >= 0 && slot < feedback_vector_->length());
+ Object* obj = feedback_vector_->get(slot);
+ if (!obj->IsJSFunction() ||
+ !CanRetainOtherContext(JSFunction::cast(obj), *native_context_)) {
+ return Handle<Object>(obj, isolate());
}
- return Handle<Cell>::null();
+ return Handle<Object>::cast(isolate()->factory()->undefined_value());
}
-bool TypeFeedbackOracle::LoadIsUninitialized(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsMap()) return false;
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
+bool TypeFeedbackOracle::LoadIsUninitialized(TypeFeedbackId id) {
+ Handle<Object> maybe_code = GetInfo(id);
+ if (maybe_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(maybe_code);
return code->is_inline_cache_stub() && code->ic_state() == UNINITIALIZED;
}
return false;
}
-bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsMap()) return true;
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- bool preliminary_checks = code->is_keyed_load_stub() &&
- code->ic_state() == MONOMORPHIC &&
- Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
- if (!preliminary_checks) return false;
- Map* map = code->FindFirstMap();
- if (map == NULL) return false;
- map = map->CurrentMapForDeprecated();
- return map != NULL && !CanRetainOtherContext(map, *native_context_);
- }
- return false;
-}
-
-
-bool TypeFeedbackOracle::LoadIsPreMonomorphic(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- return code->is_inline_cache_stub() && code->ic_state() == PREMONOMORPHIC;
- }
- return false;
-}
-
-
-bool TypeFeedbackOracle::LoadIsPolymorphic(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- return code->is_keyed_load_stub() && code->ic_state() == POLYMORPHIC;
- }
- return false;
-}
-
-
bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsMap()) return false;
- if (!map_or_code->IsCode()) return false;
- Handle<Code> code = Handle<Code>::cast(map_or_code);
+ Handle<Object> maybe_code = GetInfo(ast_id);
+ if (!maybe_code->IsCode()) return false;
+ Handle<Code> code = Handle<Code>::cast(maybe_code);
return code->ic_state() == UNINITIALIZED;
}
-bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsMap()) return true;
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- bool preliminary_checks =
- code->is_keyed_store_stub() &&
- code->ic_state() == MONOMORPHIC &&
- Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
- if (!preliminary_checks) return false;
- Map* map = code->FindFirstMap();
- if (map == NULL) return false;
- map = map->CurrentMapForDeprecated();
- return map != NULL && !CanRetainOtherContext(map, *native_context_);
- }
- return false;
-}
-
-
-bool TypeFeedbackOracle::StoreIsPreMonomorphic(TypeFeedbackId ast_id) {
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- return code->ic_state() == PREMONOMORPHIC;
- }
- return false;
-}
-
-
bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
+ Handle<Object> maybe_code = GetInfo(ast_id);
+ if (maybe_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(maybe_code);
return code->is_keyed_store_stub() &&
code->ic_state() == POLYMORPHIC;
}
@@ -197,165 +118,76 @@ bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
}
-bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
- Handle<Object> value = GetInfo(expr->CallFeedbackId());
- return value->IsMap() || value->IsAllocationSite() || value->IsJSFunction() ||
- value->IsSmi();
+bool TypeFeedbackOracle::CallIsMonomorphic(int slot) {
+ Handle<Object> value = GetInfo(slot);
+ return value->IsAllocationSite() || value->IsJSFunction();
}
-bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
- Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
+bool TypeFeedbackOracle::CallNewIsMonomorphic(int slot) {
+ Handle<Object> info = GetInfo(slot);
return info->IsAllocationSite() || info->IsJSFunction();
}
-bool TypeFeedbackOracle::ObjectLiteralStoreIsMonomorphic(
- ObjectLiteral::Property* prop) {
- Handle<Object> map_or_code = GetInfo(prop->key()->LiteralFeedbackId());
- return map_or_code->IsMap();
-}
-
-
-byte TypeFeedbackOracle::ForInType(ForInStatement* stmt) {
- Handle<Object> value = GetInfo(stmt->ForInFeedbackId());
+byte TypeFeedbackOracle::ForInType(int feedback_vector_slot) {
+ Handle<Object> value = GetInfo(feedback_vector_slot);
return value->IsSmi() &&
- Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker
+ Smi::cast(*value)->value() == TypeFeedbackInfo::kForInFastCaseMarker
? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN;
}
-Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
- ASSERT(LoadIsMonomorphicNormal(expr));
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- Map* map = code->FindFirstMap()->CurrentMapForDeprecated();
- return map == NULL || CanRetainOtherContext(map, *native_context_)
- ? Handle<Map>::null()
- : Handle<Map>(map);
- }
- return Handle<Map>::cast(map_or_code);
-}
-
-
-Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(
- TypeFeedbackId ast_id) {
- ASSERT(StoreIsMonomorphicNormal(ast_id));
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- Map* map = code->FindFirstMap()->CurrentMapForDeprecated();
- return map == NULL || CanRetainOtherContext(map, *native_context_)
- ? Handle<Map>::null()
- : Handle<Map>(map);
- }
- return Handle<Map>::cast(map_or_code);
-}
-
-
KeyedAccessStoreMode TypeFeedbackOracle::GetStoreMode(
TypeFeedbackId ast_id) {
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
+ Handle<Object> maybe_code = GetInfo(ast_id);
+ if (maybe_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(maybe_code);
if (code->kind() == Code::KEYED_STORE_IC) {
- return Code::GetKeyedAccessStoreMode(code->extra_ic_state());
+ return KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state());
}
}
return STANDARD_STORE;
}
-void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
- Handle<String> name,
- SmallMapList* types) {
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
- CollectReceiverTypes(expr->PropertyFeedbackId(), name, flags, types);
-}
-
-
-void TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
- Handle<String> name,
- SmallMapList* types) {
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
- Code::NORMAL, Code::STORE_IC);
- CollectReceiverTypes(expr->AssignmentFeedbackId(), name, flags, types);
-}
-
-
-void TypeFeedbackOracle::CallReceiverTypes(Call* expr,
- Handle<String> name,
- CallKind call_kind,
- SmallMapList* types) {
- int arity = expr->arguments()->length();
-
- // Note: Currently we do not take string extra ic data into account
- // here.
- Code::ExtraICState extra_ic_state =
- CallIC::Contextual::encode(call_kind == CALL_AS_FUNCTION);
-
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
- extra_ic_state,
- Code::NORMAL,
- arity,
- OWN_MAP);
- CollectReceiverTypes(expr->CallFeedbackId(), name, flags, types);
-}
-
-
-CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
- Handle<Object> value = GetInfo(expr->CallFeedbackId());
- if (!value->IsSmi()) return RECEIVER_MAP_CHECK;
- CheckType check = static_cast<CheckType>(Smi::cast(*value)->value());
- ASSERT(check != RECEIVER_MAP_CHECK);
- return check;
-}
-
-
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
- Handle<Object> info = GetInfo(expr->CallFeedbackId());
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(int slot) {
+ Handle<Object> info = GetInfo(slot);
if (info->IsAllocationSite()) {
- return Handle<JSFunction>(isolate_->global_context()->array_function());
+ return Handle<JSFunction>(isolate()->global_context()->array_function());
} else {
return Handle<JSFunction>::cast(info);
}
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
- Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
+Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(int slot) {
+ Handle<Object> info = GetInfo(slot);
if (info->IsAllocationSite()) {
- return Handle<JSFunction>(isolate_->global_context()->array_function());
+ return Handle<JSFunction>(isolate()->global_context()->array_function());
} else {
return Handle<JSFunction>::cast(info);
}
}
-Handle<Cell> TypeFeedbackOracle::GetCallNewAllocationInfoCell(CallNew* expr) {
- return GetInfoCell(expr->CallNewFeedbackId());
-}
-
-
-Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap(
- ObjectLiteral::Property* prop) {
- ASSERT(ObjectLiteralStoreIsMonomorphic(prop));
- return Handle<Map>::cast(GetInfo(prop->key()->LiteralFeedbackId()));
+Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ if (info->IsAllocationSite()) {
+ return Handle<AllocationSite>::cast(info);
+ }
+ return Handle<AllocationSite>::null();
}
-bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
- return *GetInfo(expr->PropertyFeedbackId()) ==
- isolate_->builtins()->builtin(id);
+bool TypeFeedbackOracle::LoadIsBuiltin(
+ TypeFeedbackId id, Builtins::Name builtin) {
+ return *GetInfo(id) == isolate()->builtins()->builtin(builtin);
}
-bool TypeFeedbackOracle::LoadIsStub(Property* expr, ICStub* stub) {
- Handle<Object> object = GetInfo(expr->PropertyFeedbackId());
+bool TypeFeedbackOracle::LoadIsStub(TypeFeedbackId id, ICStub* stub) {
+ Handle<Object> object = GetInfo(id);
if (!object->IsCode()) return false;
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_load_stub()) return false;
@@ -365,13 +197,13 @@ bool TypeFeedbackOracle::LoadIsStub(Property* expr, ICStub* stub) {
void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
- Handle<Type>* left_type,
- Handle<Type>* right_type,
- Handle<Type>* combined_type) {
+ Type** left_type,
+ Type** right_type,
+ Type** combined_type) {
Handle<Object> info = GetInfo(id);
if (!info->IsCode()) {
// For some comparisons we don't have ICs, e.g. LiteralCompareTypeof.
- *left_type = *right_type = *combined_type = handle(Type::None(), isolate_);
+ *left_type = *right_type = *combined_type = Type::None(zone());
return;
}
Handle<Code> code = Handle<Code>::cast(info);
@@ -379,89 +211,117 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
Handle<Map> map;
Map* raw_map = code->FindFirstMap();
if (raw_map != NULL) {
- raw_map = raw_map->CurrentMapForDeprecated();
- if (raw_map != NULL && !CanRetainOtherContext(raw_map, *native_context_)) {
- map = handle(raw_map, isolate_);
+ map = Map::CurrentMapForDeprecated(handle(raw_map));
+ if (!map.is_null() && CanRetainOtherContext(*map, *native_context_)) {
+ map = Handle<Map>::null();
}
}
if (code->is_compare_ic_stub()) {
int stub_minor_key = code->stub_info();
CompareIC::StubInfoToType(
- stub_minor_key, left_type, right_type, combined_type, map, isolate());
+ stub_minor_key, left_type, right_type, combined_type, map, zone());
} else if (code->is_compare_nil_ic_stub()) {
- CompareNilICStub stub(code->extended_extra_ic_state());
- *combined_type = stub.GetType(isolate_, map);
- *left_type = *right_type = stub.GetInputType(isolate_, map);
+ CompareNilICStub stub(code->extra_ic_state());
+ *combined_type = stub.GetType(zone(), map);
+ *left_type = *right_type = stub.GetInputType(zone(), map);
}
}
void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
- Handle<Type>* left,
- Handle<Type>* right,
- Handle<Type>* result,
+ Type** left,
+ Type** right,
+ Type** result,
Maybe<int>* fixed_right_arg,
- Token::Value operation) {
+ Handle<AllocationSite>* allocation_site,
+ Token::Value op) {
Handle<Object> object = GetInfo(id);
if (!object->IsCode()) {
// For some binary ops we don't have ICs, e.g. Token::COMMA, but for the
- // operations covered by the BinaryOpStub we should always have them.
- ASSERT(!(operation >= BinaryOpStub::FIRST_TOKEN &&
- operation <= BinaryOpStub::LAST_TOKEN));
- *left = *right = *result = handle(Type::None(), isolate_);
+ // operations covered by the BinaryOpIC we should always have them.
+ ASSERT(op < BinaryOpIC::State::FIRST_TOKEN ||
+ op > BinaryOpIC::State::LAST_TOKEN);
+ *left = *right = *result = Type::None(zone());
+ *fixed_right_arg = Maybe<int>();
+ *allocation_site = Handle<AllocationSite>::null();
return;
}
Handle<Code> code = Handle<Code>::cast(object);
- ASSERT(code->is_binary_op_stub());
+ ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
+ BinaryOpIC::State state(code->extra_ic_state());
+ ASSERT_EQ(op, state.op());
+
+ *left = state.GetLeftType(zone());
+ *right = state.GetRightType(zone());
+ *result = state.GetResultType(zone());
+ *fixed_right_arg = state.fixed_right_arg();
+
+ AllocationSite* first_allocation_site = code->FindFirstAllocationSite();
+ if (first_allocation_site != NULL) {
+ *allocation_site = handle(first_allocation_site);
+ } else {
+ *allocation_site = Handle<AllocationSite>::null();
+ }
+}
+
- BinaryOpStub stub(code->extended_extra_ic_state());
+Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
+ Handle<Object> object = GetInfo(id);
+ if (!object->IsCode()) return Type::None(zone());
+ Handle<Code> code = Handle<Code>::cast(object);
+ ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
+ BinaryOpIC::State state(code->extra_ic_state());
+ return state.GetLeftType(zone());
+}
- // Sanity check.
- ASSERT(stub.operation() == operation);
- *left = stub.GetLeftType(isolate());
- *right = stub.GetRightType(isolate());
- *result = stub.GetResultType(isolate());
- *fixed_right_arg = stub.fixed_right_arg();
+void TypeFeedbackOracle::PropertyReceiverTypes(
+ TypeFeedbackId id, Handle<String> name,
+ SmallMapList* receiver_types, bool* is_prototype) {
+ receiver_types->Clear();
+ FunctionPrototypeStub proto_stub(Code::LOAD_IC);
+ *is_prototype = LoadIsStub(id, &proto_stub);
+ if (!*is_prototype) {
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ CollectReceiverTypes(id, name, flags, receiver_types);
+ }
}
-Handle<Type> TypeFeedbackOracle::ClauseType(TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
- Handle<Type> result(Type::None(), isolate_);
- if (info->IsCode() && Handle<Code>::cast(info)->is_compare_ic_stub()) {
- Handle<Code> code = Handle<Code>::cast(info);
- CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
- result = CompareIC::StateToType(isolate_, state);
+void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
+ TypeFeedbackId id, SmallMapList* receiver_types, bool* is_string) {
+ receiver_types->Clear();
+ *is_string = false;
+ if (LoadIsBuiltin(id, Builtins::kKeyedLoadIC_String)) {
+ *is_string = true;
+ } else {
+ CollectReceiverTypes(id, receiver_types);
}
- return result;
}
-Handle<Type> TypeFeedbackOracle::IncrementType(CountOperation* expr) {
- Handle<Object> object = GetInfo(expr->CountBinOpFeedbackId());
- Handle<Type> unknown(Type::None(), isolate_);
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_binary_op_stub()) return unknown;
+void TypeFeedbackOracle::AssignmentReceiverTypes(
+ TypeFeedbackId id, Handle<String> name, SmallMapList* receiver_types) {
+ receiver_types->Clear();
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ CollectReceiverTypes(id, name, flags, receiver_types);
+}
+
- BinaryOpStub stub(code->extended_extra_ic_state());
- return stub.GetLeftType(isolate());
+void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
+ TypeFeedbackId id, SmallMapList* receiver_types,
+ KeyedAccessStoreMode* store_mode) {
+ receiver_types->Clear();
+ CollectReceiverTypes(id, receiver_types);
+ *store_mode = GetStoreMode(id);
}
-void TypeFeedbackOracle::CollectPolymorphicMaps(Handle<Code> code,
- SmallMapList* types) {
- MapHandleList maps;
- code->FindAllMaps(&maps);
- types->Reserve(maps.length(), zone());
- for (int i = 0; i < maps.length(); i++) {
- Handle<Map> map(maps.at(i));
- if (!CanRetainOtherContext(*map, *native_context_)) {
- types->AddMapIfMissing(map, zone());
- }
- }
+void TypeFeedbackOracle::CountReceiverTypes(TypeFeedbackId id,
+ SmallMapList* receiver_types) {
+ receiver_types->Clear();
+ CollectReceiverTypes(id, receiver_types);
}
@@ -472,24 +332,16 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
Handle<Object> object = GetInfo(ast_id);
if (object->IsUndefined() || object->IsSmi()) return;
- if (object.is_identical_to(isolate_->builtins()->StoreIC_GlobalProxy())) {
- // TODO(fschneider): We could collect the maps and signal that
- // we need a generic store (or load) here.
- ASSERT(Handle<Code>::cast(object)->ic_state() == GENERIC);
- } else if (object->IsMap()) {
- types->AddMapIfMissing(Handle<Map>::cast(object), zone());
- } else if (Handle<Code>::cast(object)->ic_state() == POLYMORPHIC ||
- Handle<Code>::cast(object)->ic_state() == MONOMORPHIC) {
- CollectPolymorphicMaps(Handle<Code>::cast(object), types);
- } else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
- Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
+ ASSERT(object->IsCode());
+ Handle<Code> code(Handle<Code>::cast(object));
+
+ if (FLAG_collect_megamorphic_maps_from_stub_cache &&
+ code->ic_state() == MEGAMORPHIC) {
types->Reserve(4, zone());
- ASSERT(object->IsCode());
- isolate_->stub_cache()->CollectMatchingMaps(types,
- name,
- flags,
- native_context_,
- zone());
+ isolate()->stub_cache()->CollectMatchingMaps(
+ types, name, flags, native_context_, zone());
+ } else {
+ CollectReceiverTypes(ast_id, types);
}
}
@@ -528,26 +380,26 @@ bool TypeFeedbackOracle::CanRetainOtherContext(JSFunction* function,
}
-void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
- SmallMapList* types) {
+void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
+ SmallMapList* types) {
Handle<Object> object = GetInfo(ast_id);
if (!object->IsCode()) return;
Handle<Code> code = Handle<Code>::cast(object);
- if (code->kind() == Code::KEYED_LOAD_IC ||
- code->kind() == Code::KEYED_STORE_IC) {
- CollectPolymorphicMaps(code, types);
+ MapHandleList maps;
+ if (code->ic_state() == MONOMORPHIC) {
+ Map* map = code->FindFirstMap();
+ if (map != NULL) maps.Add(handle(map));
+ } else if (code->ic_state() == POLYMORPHIC) {
+ code->FindAllMaps(&maps);
+ } else {
+ return;
}
-}
-
-
-void TypeFeedbackOracle::CollectPolymorphicStoreReceiverTypes(
- TypeFeedbackId ast_id,
- SmallMapList* types) {
- Handle<Object> object = GetInfo(ast_id);
- if (!object->IsCode()) return;
- Handle<Code> code = Handle<Code>::cast(object);
- if (code->kind() == Code::STORE_IC && code->ic_state() == POLYMORPHIC) {
- CollectPolymorphicMaps(code, types);
+ types->Reserve(maps.length(), zone());
+ for (int i = 0; i < maps.length(); i++) {
+ Handle<Map> map(maps.at(i));
+ if (!CanRetainOtherContext(*map, *native_context_)) {
+ types->AddMapIfMissing(map, zone());
+ }
}
}
@@ -565,11 +417,10 @@ byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId id) {
void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
DisallowHeapAllocation no_allocation;
ZoneList<RelocInfo> infos(16, zone());
- HandleScope scope(isolate_);
+ HandleScope scope(isolate());
GetRelocInfos(code, &infos);
CreateDictionary(code, &infos);
ProcessRelocInfos(&infos);
- ProcessTypeFeedbackCells(code);
// Allocate handle in the parent scope.
dictionary_ = scope.CloseAndEscape(dictionary_);
}
@@ -587,13 +438,9 @@ void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
ZoneList<RelocInfo>* infos) {
AllowHeapAllocation allocation_allowed;
- int cell_count = code->type_feedback_info()->IsTypeFeedbackInfo()
- ? TypeFeedbackInfo::cast(code->type_feedback_info())->
- type_feedback_cells()->CellCount()
- : 0;
- int length = infos->length() + cell_count;
byte* old_start = code->instruction_start();
- dictionary_ = isolate()->factory()->NewUnseededNumberDictionary(length);
+ dictionary_ =
+ isolate()->factory()->NewUnseededNumberDictionary(infos->length());
byte* new_start = code->instruction_start();
RelocateRelocInfos(infos, old_start, new_start);
}
@@ -619,27 +466,6 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
switch (target->kind()) {
case Code::LOAD_IC:
case Code::STORE_IC:
- case Code::CALL_IC:
- case Code::KEYED_CALL_IC:
- if (target->ic_state() == MONOMORPHIC) {
- if (target->kind() == Code::CALL_IC &&
- target->check_type() != RECEIVER_MAP_CHECK) {
- SetInfo(ast_id, Smi::FromInt(target->check_type()));
- } else {
- Object* map = target->FindFirstMap();
- if (map == NULL) {
- SetInfo(ast_id, static_cast<Object*>(target));
- } else if (!CanRetainOtherContext(Map::cast(map),
- *native_context_)) {
- Map* feedback = Map::cast(map)->CurrentMapForDeprecated();
- if (feedback != NULL) SetInfo(ast_id, feedback);
- }
- }
- } else {
- SetInfo(ast_id, target);
- }
- break;
-
case Code::KEYED_LOAD_IC:
case Code::KEYED_STORE_IC:
case Code::BINARY_OP_IC:
@@ -656,26 +482,6 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
}
-void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
- Object* raw_info = code->type_feedback_info();
- if (!raw_info->IsTypeFeedbackInfo()) return;
- Handle<TypeFeedbackCells> cache(
- TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
- for (int i = 0; i < cache->CellCount(); i++) {
- TypeFeedbackId ast_id = cache->AstId(i);
- Cell* cell = cache->GetCell(i);
- Object* value = cell->value();
- if (value->IsSmi() ||
- value->IsAllocationSite() ||
- (value->IsJSFunction() &&
- !CanRetainOtherContext(JSFunction::cast(value),
- *native_context_))) {
- SetInfo(ast_id, cell);
- }
- }
-}
-
-
void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
ASSERT(dictionary_->FindEntry(IdToKey(ast_id)) ==
UnseededNumberDictionary::kNotFound);
@@ -690,14 +496,4 @@ void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
}
-Representation Representation::FromType(TypeInfo info) {
- if (info.IsUninitialized()) return Representation::None();
- if (info.IsSmi()) return Representation::Smi();
- if (info.IsInteger32()) return Representation::Integer32();
- if (info.IsDouble()) return Representation::Double();
- if (info.IsNumber()) return Representation::Double();
- return Representation::Tagged();
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index f295c06dac..6de92cec0e 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -36,262 +36,62 @@
namespace v8 {
namespace internal {
-const int kMaxKeyedPolymorphism = 4;
-
-// Unknown
-// | \____________
-// | |
-// Primitive Non-primitive
-// | \_______ |
-// | | |
-// Number String |
-// / \ | |
-// Double Integer32 | /
-// | | / /
-// | Smi / /
-// | | / __/
-// Uninitialized.
-
-class TypeInfo {
- public:
- TypeInfo() : type_(kUninitialized) { }
-
- static TypeInfo Unknown() { return TypeInfo(kUnknown); }
- // We know it's a primitive type.
- static TypeInfo Primitive() { return TypeInfo(kPrimitive); }
- // We know it's a number of some sort.
- static TypeInfo Number() { return TypeInfo(kNumber); }
- // We know it's a signed 32 bit integer.
- static TypeInfo Integer32() { return TypeInfo(kInteger32); }
- // We know it's a Smi.
- static TypeInfo Smi() { return TypeInfo(kSmi); }
- // We know it's a heap number.
- static TypeInfo Double() { return TypeInfo(kDouble); }
- // We know it's a string.
- static TypeInfo String() { return TypeInfo(kString); }
- // We know it's an internalized string.
- static TypeInfo InternalizedString() { return TypeInfo(kInternalizedString); }
- // We know it's a non-primitive (object) type.
- static TypeInfo NonPrimitive() { return TypeInfo(kNonPrimitive); }
- // We haven't started collecting info yet.
- static TypeInfo Uninitialized() { return TypeInfo(kUninitialized); }
-
- int ToInt() {
- return type_;
- }
-
- static TypeInfo FromInt(int bit_representation) {
- Type t = static_cast<Type>(bit_representation);
- ASSERT(t == kUnknown ||
- t == kPrimitive ||
- t == kNumber ||
- t == kInteger32 ||
- t == kSmi ||
- t == kDouble ||
- t == kString ||
- t == kNonPrimitive);
- return TypeInfo(t);
- }
-
- // Return the weakest (least precise) common type.
- static TypeInfo Combine(TypeInfo a, TypeInfo b) {
- return TypeInfo(static_cast<Type>(a.type_ & b.type_));
- }
-
-
- // Integer32 is an integer that can be represented as a signed
- // 32-bit integer. It has to be
- // in the range [-2^31, 2^31 - 1]. We also have to check for negative 0
- // as it is not an Integer32.
- static inline bool IsInt32Double(double value) {
- const DoubleRepresentation minus_zero(-0.0);
- DoubleRepresentation rep(value);
- if (rep.bits == minus_zero.bits) return false;
- if (value >= kMinInt && value <= kMaxInt &&
- value == static_cast<int32_t>(value)) {
- return true;
- }
- return false;
- }
-
- static TypeInfo FromValue(Handle<Object> value);
-
- bool Equals(const TypeInfo& other) {
- return type_ == other.type_;
- }
-
- inline bool IsUnknown() {
- ASSERT(type_ != kUninitialized);
- return type_ == kUnknown;
- }
-
- inline bool IsPrimitive() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kPrimitive) == kPrimitive);
- }
-
- inline bool IsNumber() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kNumber) == kNumber);
- }
-
- inline bool IsSmi() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kSmi) == kSmi);
- }
-
- inline bool IsInternalizedString() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kInternalizedString) == kInternalizedString);
- }
-
- inline bool IsNonInternalizedString() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kInternalizedString) == kString);
- }
-
- inline bool IsInteger32() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kInteger32) == kInteger32);
- }
-
- inline bool IsDouble() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kDouble) == kDouble);
- }
-
- inline bool IsString() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kString) == kString);
- }
-
- inline bool IsNonPrimitive() {
- ASSERT(type_ != kUninitialized);
- return ((type_ & kNonPrimitive) == kNonPrimitive);
- }
-
- inline bool IsUninitialized() {
- return type_ == kUninitialized;
- }
-
- const char* ToString() {
- switch (type_) {
- case kUnknown: return "Unknown";
- case kPrimitive: return "Primitive";
- case kNumber: return "Number";
- case kInteger32: return "Integer32";
- case kSmi: return "Smi";
- case kInternalizedString: return "InternalizedString";
- case kDouble: return "Double";
- case kString: return "String";
- case kNonPrimitive: return "Object";
- case kUninitialized: return "Uninitialized";
- }
- UNREACHABLE();
- return "Unreachable code";
- }
-
- private:
- enum Type {
- kUnknown = 0, // 0000000
- kPrimitive = 0x10, // 0010000
- kNumber = 0x11, // 0010001
- kInteger32 = 0x13, // 0010011
- kSmi = 0x17, // 0010111
- kDouble = 0x19, // 0011001
- kString = 0x30, // 0110000
- kInternalizedString = 0x32, // 0110010
- kNonPrimitive = 0x40, // 1000000
- kUninitialized = 0x7f // 1111111
- };
-
- explicit inline TypeInfo(Type t) : type_(t) { }
-
- Type type_;
-};
-
-
-enum StringStubFeedback {
- DEFAULT_STRING_STUB = 0,
- STRING_INDEX_OUT_OF_BOUNDS = 1
-};
-
-
// Forward declarations.
-// TODO(rossberg): these should all go away eventually.
-class Assignment;
-class Call;
-class CallNew;
-class CaseClause;
-class CompilationInfo;
-class CountOperation;
-class Expression;
-class ForInStatement;
class ICStub;
-class Property;
class SmallMapList;
-class ObjectLiteral;
-class ObjectLiteralProperty;
class TypeFeedbackOracle: public ZoneObject {
public:
TypeFeedbackOracle(Handle<Code> code,
Handle<Context> native_context,
- Isolate* isolate,
Zone* zone);
- bool LoadIsMonomorphicNormal(Property* expr);
- bool LoadIsUninitialized(Property* expr);
- bool LoadIsPreMonomorphic(Property* expr);
- bool LoadIsPolymorphic(Property* expr);
- bool StoreIsUninitialized(TypeFeedbackId ast_id);
- bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id);
- bool StoreIsPreMonomorphic(TypeFeedbackId ast_id);
- bool StoreIsKeyedPolymorphic(TypeFeedbackId ast_id);
- bool CallIsMonomorphic(Call* expr);
- bool CallNewIsMonomorphic(CallNew* expr);
- bool ObjectLiteralStoreIsMonomorphic(ObjectLiteralProperty* prop);
+ bool LoadIsUninitialized(TypeFeedbackId id);
+ bool StoreIsUninitialized(TypeFeedbackId id);
+ bool StoreIsKeyedPolymorphic(TypeFeedbackId id);
+ bool CallIsMonomorphic(int slot);
+ bool CallIsMonomorphic(TypeFeedbackId aid);
+ bool KeyedArrayCallIsHoley(TypeFeedbackId id);
+ bool CallNewIsMonomorphic(int slot);
// TODO(1571) We can't use ForInStatement::ForInType as the return value due
// to various cycles in our headers.
- byte ForInType(ForInStatement* expr);
-
- Handle<Map> LoadMonomorphicReceiverType(Property* expr);
- Handle<Map> StoreMonomorphicReceiverType(TypeFeedbackId id);
-
- KeyedAccessStoreMode GetStoreMode(TypeFeedbackId ast_id);
-
- void LoadReceiverTypes(Property* expr,
- Handle<String> name,
- SmallMapList* types);
- void StoreReceiverTypes(Assignment* expr,
- Handle<String> name,
- SmallMapList* types);
- void CallReceiverTypes(Call* expr,
- Handle<String> name,
- CallKind call_kind,
- SmallMapList* types);
- void CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
- SmallMapList* types);
- void CollectPolymorphicStoreReceiverTypes(TypeFeedbackId ast_id,
- SmallMapList* types);
+ // TODO(rossberg): once all oracle access is removed from ast.cc, it should
+ // be possible.
+ byte ForInType(int feedback_vector_slot);
+
+ KeyedAccessStoreMode GetStoreMode(TypeFeedbackId id);
+
+ void PropertyReceiverTypes(TypeFeedbackId id,
+ Handle<String> name,
+ SmallMapList* receiver_types,
+ bool* is_prototype);
+ void KeyedPropertyReceiverTypes(TypeFeedbackId id,
+ SmallMapList* receiver_types,
+ bool* is_string);
+ void AssignmentReceiverTypes(TypeFeedbackId id,
+ Handle<String> name,
+ SmallMapList* receiver_types);
+ void KeyedAssignmentReceiverTypes(TypeFeedbackId id,
+ SmallMapList* receiver_types,
+ KeyedAccessStoreMode* store_mode);
+ void CountReceiverTypes(TypeFeedbackId id,
+ SmallMapList* receiver_types);
+
+ void CollectReceiverTypes(TypeFeedbackId id,
+ SmallMapList* types);
static bool CanRetainOtherContext(Map* map, Context* native_context);
static bool CanRetainOtherContext(JSFunction* function,
Context* native_context);
- void CollectPolymorphicMaps(Handle<Code> code, SmallMapList* types);
-
- CheckType GetCallCheckType(Call* expr);
- Handle<JSFunction> GetCallTarget(Call* expr);
- Handle<JSFunction> GetCallNewTarget(CallNew* expr);
- Handle<Cell> GetCallNewAllocationInfoCell(CallNew* expr);
+ Handle<JSFunction> GetCallTarget(int slot);
+ Handle<JSFunction> GetCallNewTarget(int slot);
+ Handle<AllocationSite> GetCallNewAllocationSite(int slot);
- Handle<Map> GetObjectLiteralStoreMap(ObjectLiteralProperty* prop);
-
- bool LoadIsBuiltin(Property* expr, Builtins::Name id);
- bool LoadIsStub(Property* expr, ICStub* stub);
+ bool LoadIsBuiltin(TypeFeedbackId id, Builtins::Name builtin_id);
+ bool LoadIsStub(TypeFeedbackId id, ICStub* stub);
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
// of various cycles in our headers. Death to tons of implementations in
@@ -300,31 +100,30 @@ class TypeFeedbackOracle: public ZoneObject {
// Get type information for arithmetic operations and compares.
void BinaryType(TypeFeedbackId id,
- Handle<Type>* left,
- Handle<Type>* right,
- Handle<Type>* result,
+ Type** left,
+ Type** right,
+ Type** result,
Maybe<int>* fixed_right_arg,
+ Handle<AllocationSite>* allocation_site,
Token::Value operation);
void CompareType(TypeFeedbackId id,
- Handle<Type>* left,
- Handle<Type>* right,
- Handle<Type>* combined);
-
- Handle<Type> ClauseType(TypeFeedbackId id);
+ Type** left,
+ Type** right,
+ Type** combined);
- Handle<Type> IncrementType(CountOperation* expr);
+ Type* CountType(TypeFeedbackId id);
Zone* zone() const { return zone_; }
- Isolate* isolate() const { return isolate_; }
+ Isolate* isolate() const { return zone_->isolate(); }
private:
- void CollectReceiverTypes(TypeFeedbackId ast_id,
+ void CollectReceiverTypes(TypeFeedbackId id,
Handle<String> name,
Code::Flags flags,
SmallMapList* types);
- void SetInfo(TypeFeedbackId ast_id, Object* target);
+ void SetInfo(TypeFeedbackId id, Object* target);
void BuildDictionary(Handle<Code> code);
void GetRelocInfos(Handle<Code> code, ZoneList<RelocInfo>* infos);
@@ -333,20 +132,20 @@ class TypeFeedbackOracle: public ZoneObject {
byte* old_start,
byte* new_start);
void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
- void ProcessTypeFeedbackCells(Handle<Code> code);
// Returns an element from the backing store. Returns undefined if
// there is no information.
- Handle<Object> GetInfo(TypeFeedbackId ast_id);
+ Handle<Object> GetInfo(TypeFeedbackId id);
- // Return the cell that contains type feedback.
- Handle<Cell> GetInfoCell(TypeFeedbackId ast_id);
+ // Returns an element from the type feedback vector. Returns undefined
+ // if there is no information.
+ Handle<Object> GetInfo(int slot);
private:
Handle<Context> native_context_;
- Isolate* isolate_;
Zone* zone_;
Handle<UnseededNumberDictionary> dictionary_;
+ Handle<FixedArray> feedback_vector_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index 8d7f4deb96..0c0cb71b2a 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -34,53 +34,75 @@ var $ArrayBuffer = global.ArrayBuffer;
// --------------- Typed Arrays ---------------------
+macro TYPED_ARRAYS(FUNCTION)
+// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
+FUNCTION(1, Uint8Array, 1)
+FUNCTION(2, Int8Array, 1)
+FUNCTION(3, Uint16Array, 2)
+FUNCTION(4, Int16Array, 2)
+FUNCTION(5, Uint32Array, 4)
+FUNCTION(6, Int32Array, 4)
+FUNCTION(7, Float32Array, 4)
+FUNCTION(8, Float64Array, 8)
+FUNCTION(9, Uint8ClampedArray, 1)
+endmacro
+
+macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
+ function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
+ var bufferByteLength = buffer.byteLength;
+ var offset;
+ if (IS_UNDEFINED(byteOffset)) {
+ offset = 0;
+ } else {
+ offset = ToPositiveInteger(byteOffset, "invalid_typed_array_length");
-function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
- function ConstructByArrayBuffer(obj, buffer, byteOffset, length) {
- var offset = ToPositiveInteger(byteOffset, "invalid_typed_array_length")
-
- if (offset % elementSize !== 0) {
- throw MakeRangeError("invalid_typed_array_alignment",
- "start offset", name, elementSize);
- }
- var bufferByteLength = %ArrayBufferGetByteLength(buffer);
- if (offset > bufferByteLength) {
- throw MakeRangeError("invalid_typed_array_offset");
+ if (offset % ELEMENT_SIZE !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ ["start offset", "NAME", ELEMENT_SIZE]);
+ }
+ if (offset > bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_offset");
+ }
}
var newByteLength;
var newLength;
if (IS_UNDEFINED(length)) {
- if (bufferByteLength % elementSize !== 0) {
+ if (bufferByteLength % ELEMENT_SIZE !== 0) {
throw MakeRangeError("invalid_typed_array_alignment",
- "byte length", name, elementSize);
+ ["byte length", "NAME", ELEMENT_SIZE]);
}
newByteLength = bufferByteLength - offset;
- newLength = newByteLength / elementSize;
+ newLength = newByteLength / ELEMENT_SIZE;
} else {
var newLength = ToPositiveInteger(length, "invalid_typed_array_length");
- newByteLength = newLength * elementSize;
+ newByteLength = newLength * ELEMENT_SIZE;
}
- if (offset + newByteLength > bufferByteLength) {
+ if ((offset + newByteLength > bufferByteLength)
+ || (newLength > %MaxSmi())) {
throw MakeRangeError("invalid_typed_array_length");
}
- %TypedArrayInitialize(obj, arrayId, buffer, offset, newByteLength);
+ %TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength);
}
- function ConstructByLength(obj, length) {
- var l = ToPositiveInteger(length, "invalid_typed_array_length");
+ function NAMEConstructByLength(obj, length) {
+ var l = IS_UNDEFINED(length) ?
+ 0 : ToPositiveInteger(length, "invalid_typed_array_length");
if (l > %MaxSmi()) {
throw MakeRangeError("invalid_typed_array_length");
}
- var byteLength = l * elementSize;
+ var byteLength = l * ELEMENT_SIZE;
var buffer = new $ArrayBuffer(byteLength);
- %TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
+ %TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength);
}
- function ConstructByArrayLike(obj, arrayLike) {
+ function NAMEConstructByArrayLike(obj, arrayLike) {
var length = arrayLike.length;
var l = ToPositiveInteger(length, "invalid_typed_array_length");
- if(!%TypedArrayInitializeFromArrayLike(obj, arrayId, arrayLike, l)) {
+ if (l > %MaxSmi()) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ if(!%TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l)) {
for (var i = 0; i < l; i++) {
// It is crucial that we let any execptions from arrayLike[i]
// propagate outside the function.
@@ -89,21 +111,24 @@ function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
}
}
- return function (arg1, arg2, arg3) {
+ function NAMEConstructor(arg1, arg2, arg3) {
+
if (%_IsConstructCall()) {
if (IS_ARRAYBUFFER(arg1)) {
- ConstructByArrayBuffer(this, arg1, arg2, arg3);
+ NAMEConstructByArrayBuffer(this, arg1, arg2, arg3);
} else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
- ConstructByLength(this, arg1);
+ NAMEConstructByLength(this, arg1);
} else {
- ConstructByArrayLike(this, arg1);
+ NAMEConstructByArrayLike(this, arg1);
}
} else {
- throw MakeTypeError("constructor_not_function", [name])
+ throw MakeTypeError("constructor_not_function", ["NAME"])
}
}
-}
+endmacro
+
+TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR)
function TypedArrayGetBuffer() {
return %TypedArrayGetBuffer(this);
@@ -218,6 +243,10 @@ function TypedArraySet(obj, offset) {
if (intOffset < 0) {
throw MakeTypeError("typed_array_set_negative_offset");
}
+
+ if (intOffset > %MaxSmi()) {
+ throw MakeRangeError("typed_array_set_source_too_large");
+ }
switch (%TypedArraySetFastCases(this, obj, intOffset)) {
// These numbers should be synchronized with runtime.cc.
case 0: // TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE
@@ -250,10 +279,8 @@ function TypedArraySet(obj, offset) {
// -------------------------------------------------------------------
-function SetupTypedArray(arrayId, name, constructor, elementSize) {
+function SetupTypedArray(constructor, fun, elementSize) {
%CheckIsBootstrapping();
- var fun = CreateTypedArrayConstructor(name, elementSize,
- arrayId, constructor);
%SetCode(constructor, fun);
%FunctionSetPrototype(constructor, new $Object());
@@ -275,17 +302,11 @@ function SetupTypedArray(arrayId, name, constructor, elementSize) {
));
}
-// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
-SetupTypedArray(1, "Uint8Array", global.Uint8Array, 1);
-SetupTypedArray(2, "Int8Array", global.Int8Array, 1);
-SetupTypedArray(3, "Uint16Array", global.Uint16Array, 2);
-SetupTypedArray(4, "Int16Array", global.Int16Array, 2);
-SetupTypedArray(5, "Uint32Array", global.Uint32Array, 4);
-SetupTypedArray(6, "Int32Array", global.Int32Array, 4);
-SetupTypedArray(7, "Float32Array", global.Float32Array, 4);
-SetupTypedArray(8, "Float64Array", global.Float64Array, 8);
-SetupTypedArray(9, "Uint8ClampedArray", global.Uint8ClampedArray, 1);
+macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
+ SetupTypedArray (global.NAME, NAMEConstructor, ELEMENT_SIZE);
+endmacro
+TYPED_ARRAYS(SETUP_TYPED_ARRAY)
// --------------------------- DataView -----------------------------
@@ -296,8 +317,9 @@ function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
if (!IS_ARRAYBUFFER(buffer)) {
throw MakeTypeError('data_view_not_array_buffer', []);
}
- var bufferByteLength = %ArrayBufferGetByteLength(buffer);
- var offset = ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
+ var bufferByteLength = buffer.byteLength;
+ var offset = IS_UNDEFINED(byteOffset) ?
+ 0 : ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
if (offset > bufferByteLength) {
throw MakeRangeError('invalid_data_view_offset');
}
@@ -336,225 +358,52 @@ function DataViewGetByteLength() {
return %DataViewGetByteLength(this);
}
+macro DATA_VIEW_TYPES(FUNCTION)
+ FUNCTION(Int8)
+ FUNCTION(Uint8)
+ FUNCTION(Int16)
+ FUNCTION(Uint16)
+ FUNCTION(Int32)
+ FUNCTION(Uint32)
+ FUNCTION(Float32)
+ FUNCTION(Float64)
+endmacro
+
function ToPositiveDataViewOffset(offset) {
return ToPositiveInteger(offset, 'invalid_data_view_accessor_offset');
}
-function DataViewGetInt8(offset, little_endian) {
+
+macro DATA_VIEW_GETTER_SETTER(TYPENAME)
+function DataViewGetTYPENAME(offset, little_endian) {
if (!IS_DATAVIEW(this)) {
throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getInt8', this]);
+ ['DataView.getTYPENAME', this]);
}
if (%_ArgumentsLength() < 1) {
throw MakeTypeError('invalid_argument');
}
- return %DataViewGetInt8(this,
+ return %DataViewGetTYPENAME(this,
ToPositiveDataViewOffset(offset),
!!little_endian);
}
-function DataViewSetInt8(offset, value, little_endian) {
+function DataViewSetTYPENAME(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setInt8', this]);
+ ['DataView.setTYPENAME', this]);
}
if (%_ArgumentsLength() < 2) {
throw MakeTypeError('invalid_argument');
}
- %DataViewSetInt8(this,
+ %DataViewSetTYPENAME(this,
ToPositiveDataViewOffset(offset),
TO_NUMBER_INLINE(value),
!!little_endian);
}
+endmacro
-function DataViewGetUint8(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getUint8', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetUint8(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetUint8(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setUint8', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetUint8(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
-
-function DataViewGetInt16(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getInt16', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetInt16(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetInt16(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setInt16', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetInt16(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
-
-function DataViewGetUint16(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getUint16', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetUint16(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetUint16(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setUint16', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetUint16(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
-
-function DataViewGetInt32(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getInt32', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetInt32(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetInt32(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setInt32', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetInt32(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
-
-function DataViewGetUint32(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getUint32', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetUint32(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetUint32(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setUint32', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetUint32(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
-
-function DataViewGetFloat32(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getFloat32', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetFloat32(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetFloat32(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setFloat32', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetFloat32(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
-
-function DataViewGetFloat64(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getFloat64', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetFloat64(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetFloat64(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setFloat64', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetFloat64(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
+DATA_VIEW_TYPES(DATA_VIEW_GETTER_SETTER)
function SetupDataView() {
%CheckIsBootstrapping();
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index 17a19b29e4..3840e6fd22 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -31,14 +31,15 @@
namespace v8 {
namespace internal {
-int Type::NumClasses() {
- if (is_class()) {
+template<class Config>
+int TypeImpl<Config>::NumClasses() {
+ if (this->IsClass()) {
return 1;
- } else if (is_union()) {
- Handle<Unioned> unioned = as_union();
+ } else if (this->IsUnion()) {
+ UnionedHandle unioned = this->AsUnion();
int result = 0;
- for (int i = 0; i < unioned->length(); ++i) {
- if (union_get(unioned, i)->is_class()) ++result;
+ for (int i = 0; i < Config::union_length(unioned); ++i) {
+ if (Config::union_get(unioned, i)->IsClass()) ++result;
}
return result;
} else {
@@ -47,14 +48,15 @@ int Type::NumClasses() {
}
-int Type::NumConstants() {
- if (is_constant()) {
+template<class Config>
+int TypeImpl<Config>::NumConstants() {
+ if (this->IsConstant()) {
return 1;
- } else if (is_union()) {
- Handle<Unioned> unioned = as_union();
+ } else if (this->IsUnion()) {
+ UnionedHandle unioned = this->AsUnion();
int result = 0;
- for (int i = 0; i < unioned->length(); ++i) {
- if (union_get(unioned, i)->is_constant()) ++result;
+ for (int i = 0; i < Config::union_length(unioned); ++i) {
+ if (Config::union_get(unioned, i)->IsConstant()) ++result;
}
return result;
} else {
@@ -63,41 +65,61 @@ int Type::NumConstants() {
}
-template<class T>
-Handle<Type> Type::Iterator<T>::get_type() {
+template<class Config> template<class T>
+typename TypeImpl<Config>::TypeHandle
+TypeImpl<Config>::Iterator<T>::get_type() {
ASSERT(!Done());
- return type_->is_union() ? union_get(type_->as_union(), index_) : type_;
+ return type_->IsUnion() ? Config::union_get(type_->AsUnion(), index_) : type_;
}
-template<>
-Handle<Map> Type::Iterator<Map>::Current() {
- return get_type()->as_class();
-}
-template<>
-Handle<v8::internal::Object> Type::Iterator<v8::internal::Object>::Current() {
- return get_type()->as_constant();
-}
+// C++ cannot specialise nested templates, so we have to go through this
+// contortion with an auxiliary template to simulate it.
+template<class Config, class T>
+struct TypeImplIteratorAux {
+ static bool matches(typename TypeImpl<Config>::TypeHandle type);
+ static i::Handle<T> current(typename TypeImpl<Config>::TypeHandle type);
+};
+template<class Config>
+struct TypeImplIteratorAux<Config, i::Map> {
+ static bool matches(typename TypeImpl<Config>::TypeHandle type) {
+ return type->IsClass();
+ }
+ static i::Handle<i::Map> current(typename TypeImpl<Config>::TypeHandle type) {
+ return type->AsClass();
+ }
+};
+
+template<class Config>
+struct TypeImplIteratorAux<Config, i::Object> {
+ static bool matches(typename TypeImpl<Config>::TypeHandle type) {
+ return type->IsConstant();
+ }
+ static i::Handle<i::Object> current(
+ typename TypeImpl<Config>::TypeHandle type) {
+ return type->AsConstant();
+ }
+};
-template<>
-bool Type::Iterator<Map>::matches(Handle<Type> type) {
- return type->is_class();
+template<class Config> template<class T>
+bool TypeImpl<Config>::Iterator<T>::matches(TypeHandle type) {
+ return TypeImplIteratorAux<Config, T>::matches(type);
}
-template<>
-bool Type::Iterator<v8::internal::Object>::matches(Handle<Type> type) {
- return type->is_constant();
+template<class Config> template<class T>
+i::Handle<T> TypeImpl<Config>::Iterator<T>::Current() {
+ return TypeImplIteratorAux<Config, T>::current(get_type());
}
-template<class T>
-void Type::Iterator<T>::Advance() {
+template<class Config> template<class T>
+void TypeImpl<Config>::Iterator<T>::Advance() {
++index_;
- if (type_->is_union()) {
- Handle<Unioned> unioned = type_->as_union();
- for (; index_ < unioned->length(); ++index_) {
- if (matches(union_get(unioned, index_))) return;
+ if (type_->IsUnion()) {
+ UnionedHandle unioned = type_->AsUnion();
+ for (; index_ < Config::union_length(unioned); ++index_) {
+ if (matches(Config::union_get(unioned, index_))) return;
}
} else if (index_ == 0 && matches(type_)) {
return;
@@ -105,158 +127,181 @@ void Type::Iterator<T>::Advance() {
index_ = -1;
}
-template class Type::Iterator<Map>;
-template class Type::Iterator<v8::internal::Object>;
-
// Get the smallest bitset subsuming this type.
-int Type::LubBitset() {
- if (this->is_bitset()) {
- return this->as_bitset();
- } else if (this->is_union()) {
- Handle<Unioned> unioned = this->as_union();
+template<class Config>
+int TypeImpl<Config>::LubBitset() {
+ if (this->IsBitset()) {
+ return this->AsBitset();
+ } else if (this->IsUnion()) {
+ UnionedHandle unioned = this->AsUnion();
int bitset = kNone;
- for (int i = 0; i < unioned->length(); ++i) {
- bitset |= union_get(unioned, i)->LubBitset();
+ for (int i = 0; i < Config::union_length(unioned); ++i) {
+ bitset |= Config::union_get(unioned, i)->LubBitset();
}
return bitset;
+ } else if (this->IsClass()) {
+ return LubBitset(*this->AsClass());
} else {
- Map* map = NULL;
- if (this->is_class()) {
- map = *this->as_class();
- } else {
- Handle<v8::internal::Object> value = this->as_constant();
- if (value->IsSmi()) return kSmi;
- map = HeapObject::cast(*value)->map();
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- int32_t i;
- uint32_t u;
- if (value->ToInt32(&i)) return Smi::IsValid(i) ? kSmi : kOtherSigned32;
- if (value->ToUint32(&u)) return kUnsigned32;
- return kDouble;
- }
- if (map->instance_type() == ODDBALL_TYPE) {
- if (value->IsUndefined()) return kUndefined;
- if (value->IsNull()) return kNull;
- if (value->IsTrue() || value->IsFalse()) return kBoolean;
- if (value->IsTheHole()) return kAny; // TODO(rossberg): kNone?
- UNREACHABLE();
- }
- }
- switch (map->instance_type()) {
- case STRING_TYPE:
- case ASCII_STRING_TYPE:
- case CONS_STRING_TYPE:
- case CONS_ASCII_STRING_TYPE:
- case SLICED_STRING_TYPE:
- case SLICED_ASCII_STRING_TYPE:
- case EXTERNAL_STRING_TYPE:
- case EXTERNAL_ASCII_STRING_TYPE:
- case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_STRING_TYPE:
- case SHORT_EXTERNAL_ASCII_STRING_TYPE:
- case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case INTERNALIZED_STRING_TYPE:
- case ASCII_INTERNALIZED_STRING_TYPE:
- case CONS_INTERNALIZED_STRING_TYPE:
- case CONS_ASCII_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return kString;
- case SYMBOL_TYPE:
- return kSymbol;
- case ODDBALL_TYPE:
- return kOddball;
- case HEAP_NUMBER_TYPE:
- return kDouble;
- case JS_VALUE_TYPE:
- case JS_DATE_TYPE:
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_ARRAY_BUFFER_TYPE:
- case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE:
- case JS_SET_TYPE:
- case JS_MAP_TYPE:
- case JS_WEAK_MAP_TYPE:
- case JS_WEAK_SET_TYPE:
- if (map->is_undetectable()) return kUndetectable;
- return kOtherObject;
- case JS_ARRAY_TYPE:
- return kArray;
- case JS_FUNCTION_TYPE:
- return kFunction;
- case JS_REGEXP_TYPE:
- return kRegExp;
- case JS_PROXY_TYPE:
- case JS_FUNCTION_PROXY_TYPE:
- return kProxy;
- case MAP_TYPE:
- // When compiling stub templates, the meta map is used as a place holder
- // for the actual map with which the template is later instantiated.
- // We treat it as a kind of type variable whose upper bound is Any.
- // TODO(rossberg): for caching of CompareNilIC stubs to work correctly,
- // we must exclude Undetectable here. This makes no sense, really,
- // because it means that the template isn't actually parametric.
- // Also, it doesn't apply elsewhere. 8-(
- // We ought to find a cleaner solution for compiling stubs parameterised
- // over type or class variables, esp ones with bounds...
- return kDetectable;
- case DECLARED_ACCESSOR_INFO_TYPE:
- case EXECUTABLE_ACCESSOR_INFO_TYPE:
- case ACCESSOR_PAIR_TYPE:
- case FIXED_ARRAY_TYPE:
- return kInternal;
- default:
- UNREACHABLE();
- return kNone;
- }
+ return LubBitset(*this->AsConstant());
+ }
+}
+
+
+template<class Config>
+int TypeImpl<Config>::LubBitset(i::Object* value) {
+ if (value->IsSmi()) return kSmi;
+ i::Map* map = i::HeapObject::cast(value)->map();
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
+ int32_t i;
+ uint32_t u;
+ if (value->ToInt32(&i)) return Smi::IsValid(i) ? kSmi : kOtherSigned32;
+ if (value->ToUint32(&u)) return kUnsigned32;
+ return kDouble;
+ }
+ if (map->instance_type() == ODDBALL_TYPE) {
+ if (value->IsUndefined()) return kUndefined;
+ if (value->IsNull()) return kNull;
+ if (value->IsBoolean()) return kBoolean;
+ if (value->IsTheHole()) return kAny; // TODO(rossberg): kNone?
+ if (value->IsUninitialized()) return kNone;
+ UNREACHABLE();
+ }
+ return LubBitset(map);
+}
+
+
+template<class Config>
+int TypeImpl<Config>::LubBitset(i::Map* map) {
+ switch (map->instance_type()) {
+ case STRING_TYPE:
+ case ASCII_STRING_TYPE:
+ case CONS_STRING_TYPE:
+ case CONS_ASCII_STRING_TYPE:
+ case SLICED_STRING_TYPE:
+ case SLICED_ASCII_STRING_TYPE:
+ case EXTERNAL_STRING_TYPE:
+ case EXTERNAL_ASCII_STRING_TYPE:
+ case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_STRING_TYPE:
+ case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+ case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case INTERNALIZED_STRING_TYPE:
+ case ASCII_INTERNALIZED_STRING_TYPE:
+ case CONS_INTERNALIZED_STRING_TYPE:
+ case CONS_ASCII_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return kString;
+ case SYMBOL_TYPE:
+ return kSymbol;
+ case ODDBALL_TYPE:
+ return kOddball;
+ case HEAP_NUMBER_TYPE:
+ return kDouble;
+ case JS_VALUE_TYPE:
+ case JS_DATE_TYPE:
+ case JS_OBJECT_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_BUILTINS_OBJECT_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_ARRAY_BUFFER_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ if (map->is_undetectable()) return kUndetectable;
+ return kOtherObject;
+ case JS_ARRAY_TYPE:
+ return kArray;
+ case JS_FUNCTION_TYPE:
+ return kFunction;
+ case JS_REGEXP_TYPE:
+ return kRegExp;
+ case JS_PROXY_TYPE:
+ case JS_FUNCTION_PROXY_TYPE:
+ return kProxy;
+ case MAP_TYPE:
+ // When compiling stub templates, the meta map is used as a place holder
+ // for the actual map with which the template is later instantiated.
+ // We treat it as a kind of type variable whose upper bound is Any.
+ // TODO(rossberg): for caching of CompareNilIC stubs to work correctly,
+ // we must exclude Undetectable here. This makes no sense, really,
+ // because it means that the template isn't actually parametric.
+ // Also, it doesn't apply elsewhere. 8-(
+ // We ought to find a cleaner solution for compiling stubs parameterised
+ // over type or class variables, esp ones with bounds...
+ return kDetectable;
+ case DECLARED_ACCESSOR_INFO_TYPE:
+ case EXECUTABLE_ACCESSOR_INFO_TYPE:
+ case ACCESSOR_PAIR_TYPE:
+ case FIXED_ARRAY_TYPE:
+ return kInternal;
+ default:
+ UNREACHABLE();
+ return kNone;
}
}
// Get the largest bitset subsumed by this type.
-int Type::GlbBitset() {
- if (this->is_bitset()) {
- return this->as_bitset();
- } else if (this->is_union()) {
+template<class Config>
+int TypeImpl<Config>::GlbBitset() {
+ if (this->IsBitset()) {
+ return this->AsBitset();
+ } else if (this->IsUnion()) {
// All but the first are non-bitsets and thus would yield kNone anyway.
- return union_get(this->as_union(), 0)->GlbBitset();
+ return Config::union_get(this->AsUnion(), 0)->GlbBitset();
} else {
return kNone;
}
}
+// Most precise _current_ type of a value (usually its class).
+template<class Config>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::OfCurrently(
+ i::Handle<i::Object> value, Region* region) {
+ if (value->IsSmi()) return Smi(region);
+ i::Map* map = i::HeapObject::cast(*value)->map();
+ if (map->instance_type() == HEAP_NUMBER_TYPE ||
+ map->instance_type() == ODDBALL_TYPE) {
+ return Of(value, region);
+ }
+ return Class(i::handle(map), region);
+}
+
+
// Check this <= that.
-bool Type::SlowIs(Type* that) {
+template<class Config>
+bool TypeImpl<Config>::SlowIs(TypeImpl* that) {
// Fast path for bitsets.
- if (this->is_none()) return true;
- if (that->is_bitset()) {
- return (this->LubBitset() | that->as_bitset()) == that->as_bitset();
+ if (this->IsNone()) return true;
+ if (that->IsBitset()) {
+ return (this->LubBitset() | that->AsBitset()) == that->AsBitset();
}
- if (that->is_class()) {
- return this->is_class() && *this->as_class() == *that->as_class();
+ if (that->IsClass()) {
+ return this->IsClass() && *this->AsClass() == *that->AsClass();
}
- if (that->is_constant()) {
- return this->is_constant() && *this->as_constant() == *that->as_constant();
+ if (that->IsConstant()) {
+ return this->IsConstant() && *this->AsConstant() == *that->AsConstant();
}
// (T1 \/ ... \/ Tn) <= T <=> (T1 <= T) /\ ... /\ (Tn <= T)
- if (this->is_union()) {
- Handle<Unioned> unioned = this->as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> this_i = union_get(unioned, i);
+ if (this->IsUnion()) {
+ UnionedHandle unioned = this->AsUnion();
+ for (int i = 0; i < Config::union_length(unioned); ++i) {
+ TypeHandle this_i = Config::union_get(unioned, i);
if (!this_i->Is(that)) return false;
}
return true;
@@ -264,13 +309,13 @@ bool Type::SlowIs(Type* that) {
// T <= (T1 \/ ... \/ Tn) <=> (T <= T1) \/ ... \/ (T <= Tn)
// (iff T is not a union)
- ASSERT(!this->is_union());
- if (that->is_union()) {
- Handle<Unioned> unioned = that->as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> that_i = union_get(unioned, i);
+ ASSERT(!this->IsUnion());
+ if (that->IsUnion()) {
+ UnionedHandle unioned = that->AsUnion();
+ for (int i = 0; i < Config::union_length(unioned); ++i) {
+ TypeHandle that_i = Config::union_get(unioned, i);
if (this->Is(that_i)) return true;
- if (this->is_bitset()) break; // Fast fail, no other field is a bitset.
+ if (this->IsBitset()) break; // Fast fail, only first field is a bitset.
}
return false;
}
@@ -279,52 +324,63 @@ bool Type::SlowIs(Type* that) {
}
+template<class Config>
+bool TypeImpl<Config>::IsCurrently(TypeImpl* that) {
+ return this->Is(that) ||
+ (this->IsConstant() && that->IsClass() &&
+ this->AsConstant()->IsHeapObject() &&
+ i::HeapObject::cast(*this->AsConstant())->map() == *that->AsClass());
+}
+
+
// Check this overlaps that.
-bool Type::Maybe(Type* that) {
+template<class Config>
+bool TypeImpl<Config>::Maybe(TypeImpl* that) {
// Fast path for bitsets.
- if (this->is_bitset()) {
- return (this->as_bitset() & that->LubBitset()) != 0;
+ if (this->IsBitset()) {
+ return (this->AsBitset() & that->LubBitset()) != 0;
}
- if (that->is_bitset()) {
- return (this->LubBitset() & that->as_bitset()) != 0;
+ if (that->IsBitset()) {
+ return (this->LubBitset() & that->AsBitset()) != 0;
}
// (T1 \/ ... \/ Tn) overlaps T <=> (T1 overlaps T) \/ ... \/ (Tn overlaps T)
- if (this->is_union()) {
- Handle<Unioned> unioned = this->as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> this_i = union_get(unioned, i);
+ if (this->IsUnion()) {
+ UnionedHandle unioned = this->AsUnion();
+ for (int i = 0; i < Config::union_length(unioned); ++i) {
+ TypeHandle this_i = Config::union_get(unioned, i);
if (this_i->Maybe(that)) return true;
}
return false;
}
// T overlaps (T1 \/ ... \/ Tn) <=> (T overlaps T1) \/ ... \/ (T overlaps Tn)
- if (that->is_union()) {
- Handle<Unioned> unioned = that->as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> that_i = union_get(unioned, i);
+ if (that->IsUnion()) {
+ UnionedHandle unioned = that->AsUnion();
+ for (int i = 0; i < Config::union_length(unioned); ++i) {
+ TypeHandle that_i = Config::union_get(unioned, i);
if (this->Maybe(that_i)) return true;
}
return false;
}
- ASSERT(!that->is_union());
- if (this->is_class()) {
- return that->is_class() && *this->as_class() == *that->as_class();
+ ASSERT(!this->IsUnion() && !that->IsUnion());
+ if (this->IsClass()) {
+ return that->IsClass() && *this->AsClass() == *that->AsClass();
}
- if (this->is_constant()) {
- return that->is_constant() && *this->as_constant() == *that->as_constant();
+ if (this->IsConstant()) {
+ return that->IsConstant() && *this->AsConstant() == *that->AsConstant();
}
return false;
}
-bool Type::InUnion(Handle<Unioned> unioned, int current_size) {
- ASSERT(!this->is_union());
+template<class Config>
+bool TypeImpl<Config>::InUnion(UnionedHandle unioned, int current_size) {
+ ASSERT(!this->IsUnion());
for (int i = 0; i < current_size; ++i) {
- Handle<Type> type = union_get(unioned, i);
+ TypeHandle type = Config::union_get(unioned, i);
if (this->Is(type)) return true;
}
return false;
@@ -333,17 +389,23 @@ bool Type::InUnion(Handle<Unioned> unioned, int current_size) {
// Get non-bitsets from this which are not subsumed by union, store at unioned,
// starting at index. Returns updated index.
-int Type::ExtendUnion(Handle<Unioned> result, int current_size) {
+template<class Config>
+int TypeImpl<Config>::ExtendUnion(
+ UnionedHandle result, TypeHandle type, int current_size) {
int old_size = current_size;
- if (this->is_class() || this->is_constant()) {
- if (!this->InUnion(result, old_size)) result->set(current_size++, this);
- } else if (this->is_union()) {
- Handle<Unioned> unioned = this->as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> type = union_get(unioned, i);
- ASSERT(i == 0 || !(type->is_bitset() || type->Is(union_get(unioned, 0))));
- if (type->is_bitset()) continue;
- if (!type->InUnion(result, old_size)) result->set(current_size++, *type);
+ if (type->IsClass() || type->IsConstant()) {
+ if (!type->InUnion(result, old_size)) {
+ Config::union_set(result, current_size++, type);
+ }
+ } else if (type->IsUnion()) {
+ UnionedHandle unioned = type->AsUnion();
+ for (int i = 0; i < Config::union_length(unioned); ++i) {
+ TypeHandle type = Config::union_get(unioned, i);
+ ASSERT(i == 0 ||
+ !(type->IsBitset() || type->Is(Config::union_get(unioned, 0))));
+ if (!type->IsBitset() && !type->InUnion(result, old_size)) {
+ Config::union_set(result, current_size++, type);
+ }
}
}
return current_size;
@@ -352,74 +414,74 @@ int Type::ExtendUnion(Handle<Unioned> result, int current_size) {
// Union is O(1) on simple bit unions, but O(n*m) on structured unions.
// TODO(rossberg): Should we use object sets somehow? Is it worth it?
-Type* Type::Union(Handle<Type> type1, Handle<Type> type2) {
+template<class Config>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
+ TypeHandle type1, TypeHandle type2, Region* region) {
// Fast case: bit sets.
- if (type1->is_bitset() && type2->is_bitset()) {
- return from_bitset(type1->as_bitset() | type2->as_bitset());
+ if (type1->IsBitset() && type2->IsBitset()) {
+ return Config::from_bitset(type1->AsBitset() | type2->AsBitset(), region);
}
// Fast case: top or bottom types.
- if (type1->SameValue(Type::Any())) return *type1;
- if (type2->SameValue(Type::Any())) return *type2;
- if (type1->SameValue(Type::None())) return *type2;
- if (type2->SameValue(Type::None())) return *type1;
+ if (type1->IsAny()) return type1;
+ if (type2->IsAny()) return type2;
+ if (type1->IsNone()) return type2;
+ if (type2->IsNone()) return type1;
// Semi-fast case: Unioned objects are neither involved nor produced.
- if (!(type1->is_union() || type2->is_union())) {
- if (type1->Is(type2)) return *type2;
- if (type2->Is(type1)) return *type1;
+ if (!(type1->IsUnion() || type2->IsUnion())) {
+ if (type1->Is(type2)) return type2;
+ if (type2->Is(type1)) return type1;
}
// Slow case: may need to produce a Unioned object.
- Isolate* isolate = NULL;
- int size = type1->is_bitset() || type2->is_bitset() ? 1 : 0;
- if (!type1->is_bitset()) {
- isolate = HeapObject::cast(*type1)->GetIsolate();
- size += (type1->is_union() ? type1->as_union()->length() : 1);
+ int size = type1->IsBitset() || type2->IsBitset() ? 1 : 0;
+ if (!type1->IsBitset()) {
+ size += (type1->IsUnion() ? Config::union_length(type1->AsUnion()) : 1);
}
- if (!type2->is_bitset()) {
- isolate = HeapObject::cast(*type2)->GetIsolate();
- size += (type2->is_union() ? type2->as_union()->length() : 1);
+ if (!type2->IsBitset()) {
+ size += (type2->IsUnion() ? Config::union_length(type2->AsUnion()) : 1);
}
- ASSERT(isolate != NULL);
ASSERT(size >= 2);
- Handle<Unioned> unioned = isolate->factory()->NewFixedArray(size);
+ UnionedHandle unioned = Config::union_create(size, region);
size = 0;
int bitset = type1->GlbBitset() | type2->GlbBitset();
- if (bitset != kNone) unioned->set(size++, from_bitset(bitset));
- size = type1->ExtendUnion(unioned, size);
- size = type2->ExtendUnion(unioned, size);
+ if (bitset != kNone) {
+ Config::union_set(unioned, size++, Config::from_bitset(bitset, region));
+ }
+ size = ExtendUnion(unioned, type1, size);
+ size = ExtendUnion(unioned, type2, size);
if (size == 1) {
- return *union_get(unioned, 0);
- } else if (size == unioned->length()) {
- return from_handle(unioned);
+ return Config::union_get(unioned, 0);
+ } else {
+ Config::union_shrink(unioned, size);
+ return Config::from_union(unioned);
}
-
- // There was an overlap. Copy to smaller union.
- Handle<Unioned> result = isolate->factory()->NewFixedArray(size);
- for (int i = 0; i < size; ++i) result->set(i, unioned->get(i));
- return from_handle(result);
}
-// Get non-bitsets from this which are also in that, store at unioned,
+// Get non-bitsets from type which are also in other, store at unioned,
// starting at index. Returns updated index.
-int Type::ExtendIntersection(
- Handle<Unioned> result, Handle<Type> that, int current_size) {
+template<class Config>
+int TypeImpl<Config>::ExtendIntersection(
+ UnionedHandle result, TypeHandle type, TypeHandle other, int current_size) {
int old_size = current_size;
- if (this->is_class() || this->is_constant()) {
- if (this->Is(that) && !this->InUnion(result, old_size))
- result->set(current_size++, this);
- } else if (this->is_union()) {
- Handle<Unioned> unioned = this->as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> type = union_get(unioned, i);
- ASSERT(i == 0 || !(type->is_bitset() || type->Is(union_get(unioned, 0))));
- if (type->is_bitset()) continue;
- if (type->Is(that) && !type->InUnion(result, old_size))
- result->set(current_size++, *type);
+ if (type->IsClass() || type->IsConstant()) {
+ if (type->Is(other) && !type->InUnion(result, old_size)) {
+ Config::union_set(result, current_size++, type);
+ }
+ } else if (type->IsUnion()) {
+ UnionedHandle unioned = type->AsUnion();
+ for (int i = 0; i < Config::union_length(unioned); ++i) {
+ TypeHandle type = Config::union_get(unioned, i);
+ ASSERT(i == 0 ||
+ !(type->IsBitset() || type->Is(Config::union_get(unioned, 0))));
+ if (!type->IsBitset() && type->Is(other) &&
+ !type->InUnion(result, old_size)) {
+ Config::union_set(result, current_size++, type);
+ }
}
}
return current_size;
@@ -428,69 +490,83 @@ int Type::ExtendIntersection(
// Intersection is O(1) on simple bit unions, but O(n*m) on structured unions.
// TODO(rossberg): Should we use object sets somehow? Is it worth it?
-Type* Type::Intersect(Handle<Type> type1, Handle<Type> type2) {
+template<class Config>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
+ TypeHandle type1, TypeHandle type2, Region* region) {
// Fast case: bit sets.
- if (type1->is_bitset() && type2->is_bitset()) {
- return from_bitset(type1->as_bitset() & type2->as_bitset());
+ if (type1->IsBitset() && type2->IsBitset()) {
+ return Config::from_bitset(type1->AsBitset() & type2->AsBitset(), region);
}
// Fast case: top or bottom types.
- if (type1->SameValue(Type::None())) return *type1;
- if (type2->SameValue(Type::None())) return *type2;
- if (type1->SameValue(Type::Any())) return *type2;
- if (type2->SameValue(Type::Any())) return *type1;
+ if (type1->IsNone()) return type1;
+ if (type2->IsNone()) return type2;
+ if (type1->IsAny()) return type2;
+ if (type2->IsAny()) return type1;
// Semi-fast case: Unioned objects are neither involved nor produced.
- if (!(type1->is_union() || type2->is_union())) {
- if (type1->Is(type2)) return *type1;
- if (type2->Is(type1)) return *type2;
+ if (!(type1->IsUnion() || type2->IsUnion())) {
+ if (type1->Is(type2)) return type1;
+ if (type2->Is(type1)) return type2;
}
// Slow case: may need to produce a Unioned object.
- Isolate* isolate = NULL;
int size = 0;
- if (!type1->is_bitset()) {
- isolate = HeapObject::cast(*type1)->GetIsolate();
- size = (type1->is_union() ? type1->as_union()->length() : 2);
+ if (!type1->IsBitset()) {
+ size = (type1->IsUnion() ? Config::union_length(type1->AsUnion()) : 2);
}
- if (!type2->is_bitset()) {
- isolate = HeapObject::cast(*type2)->GetIsolate();
- int size2 = (type2->is_union() ? type2->as_union()->length() : 2);
+ if (!type2->IsBitset()) {
+ int size2 = (type2->IsUnion() ? Config::union_length(type2->AsUnion()) : 2);
size = (size == 0 ? size2 : Min(size, size2));
}
- ASSERT(isolate != NULL);
ASSERT(size >= 2);
- Handle<Unioned> unioned = isolate->factory()->NewFixedArray(size);
+ UnionedHandle unioned = Config::union_create(size, region);
size = 0;
int bitset = type1->GlbBitset() & type2->GlbBitset();
- if (bitset != kNone) unioned->set(size++, from_bitset(bitset));
- size = type1->ExtendIntersection(unioned, type2, size);
- size = type2->ExtendIntersection(unioned, type1, size);
+ if (bitset != kNone) {
+ Config::union_set(unioned, size++, Config::from_bitset(bitset, region));
+ }
+ size = ExtendIntersection(unioned, type1, type2, size);
+ size = ExtendIntersection(unioned, type2, type1, size);
if (size == 0) {
- return None();
+ return None(region);
} else if (size == 1) {
- return *union_get(unioned, 0);
- } else if (size == unioned->length()) {
- return from_handle(unioned);
+ return Config::union_get(unioned, 0);
+ } else {
+ Config::union_shrink(unioned, size);
+ return Config::from_union(unioned);
}
-
- // There were dropped cases. Copy to smaller union.
- Handle<Unioned> result = isolate->factory()->NewFixedArray(size);
- for (int i = 0; i < size; ++i) result->set(i, unioned->get(i));
- return from_handle(result);
}
-Type* Type::Optional(Handle<Type> type) {
- return type->is_bitset()
- ? from_bitset(type->as_bitset() | kUndefined)
- : Union(type, Undefined()->handle_via_isolate_of(*type));
+template<class Config>
+template<class OtherType>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert(
+ typename OtherType::TypeHandle type, Region* region) {
+ if (type->IsBitset()) {
+ return Config::from_bitset(type->AsBitset(), region);
+ } else if (type->IsClass()) {
+ return Config::from_class(type->AsClass(), region);
+ } else if (type->IsConstant()) {
+ return Config::from_constant(type->AsConstant(), region);
+ } else {
+ ASSERT(type->IsUnion());
+ typename OtherType::UnionedHandle unioned = type->AsUnion();
+ int length = OtherType::UnionLength(unioned);
+ UnionedHandle new_unioned = Config::union_create(length, region);
+ for (int i = 0; i < length; ++i) {
+ Config::union_set(new_unioned, i,
+ Convert<OtherType>(OtherType::UnionGet(unioned, i), region));
+ }
+ return Config::from_union(new_unioned);
+ }
}
-Representation Representation::FromType(Handle<Type> type) {
+// TODO(rossberg): this does not belong here.
+Representation Representation::FromType(Type* type) {
if (type->Is(Type::None())) return Representation::None();
if (type->Is(Type::Smi())) return Representation::Smi();
if (type->Is(Type::Signed32())) return Representation::Integer32();
@@ -500,52 +576,81 @@ Representation Representation::FromType(Handle<Type> type) {
#ifdef OBJECT_PRINT
-void Type::TypePrint() {
+template<class Config>
+void TypeImpl<Config>::TypePrint() {
TypePrint(stdout);
PrintF(stdout, "\n");
Flush(stdout);
}
-void Type::TypePrint(FILE* out) {
- if (is_bitset()) {
- int val = as_bitset();
- const char* composed_name = GetComposedName(val);
- if (composed_name != NULL) {
- PrintF(out, "%s", composed_name);
- return;
- }
- bool first_entry = true;
- PrintF(out, "{");
- for (unsigned i = 0; i < sizeof(val)*8; ++i) {
- int mask = (1 << i);
- if ((val & mask) != 0) {
- if (!first_entry) PrintF(out, ",");
- first_entry = false;
- PrintF(out, "%s", GetPrimitiveName(mask));
+template<class Config>
+const char* TypeImpl<Config>::bitset_name(int bitset) {
+ switch (bitset) {
+ #define PRINT_COMPOSED_TYPE(type, value) case k##type: return #type;
+ BITSET_TYPE_LIST(PRINT_COMPOSED_TYPE)
+ #undef PRINT_COMPOSED_TYPE
+ default:
+ return NULL;
+ }
+}
+
+
+template<class Config>
+void TypeImpl<Config>::TypePrint(FILE* out) {
+ if (this->IsBitset()) {
+ int bitset = this->AsBitset();
+ const char* name = bitset_name(bitset);
+ if (name != NULL) {
+ PrintF(out, "%s", name);
+ } else {
+ bool is_first = true;
+ PrintF(out, "(");
+ for (int mask = 1; mask != 0; mask = mask << 1) {
+ if ((bitset & mask) != 0) {
+ if (!is_first) PrintF(out, " | ");
+ is_first = false;
+ PrintF(out, "%s", bitset_name(mask));
+ }
}
+ PrintF(out, ")");
}
- PrintF(out, "}");
- } else if (is_constant()) {
- PrintF(out, "Constant(%p : ", static_cast<void*>(*as_constant()));
- from_bitset(LubBitset())->TypePrint(out);
+ } else if (this->IsConstant()) {
+ PrintF(out, "Constant(%p : ", static_cast<void*>(*this->AsConstant()));
+ Config::from_bitset(this->LubBitset())->TypePrint(out);
PrintF(")");
- } else if (is_class()) {
- PrintF(out, "Class(%p < ", static_cast<void*>(*as_class()));
- from_bitset(LubBitset())->TypePrint(out);
+ } else if (this->IsClass()) {
+ PrintF(out, "Class(%p < ", static_cast<void*>(*this->AsClass()));
+ Config::from_bitset(this->LubBitset())->TypePrint(out);
PrintF(")");
- } else if (is_union()) {
- PrintF(out, "{");
- Handle<Unioned> unioned = as_union();
- for (int i = 0; i < unioned->length(); ++i) {
- Handle<Type> type_i = union_get(unioned, i);
- if (i > 0) PrintF(out, ",");
+ } else if (this->IsUnion()) {
+ PrintF(out, "(");
+ UnionedHandle unioned = this->AsUnion();
+ for (int i = 0; i < Config::union_length(unioned); ++i) {
+ TypeHandle type_i = Config::union_get(unioned, i);
+ if (i > 0) PrintF(out, " | ");
type_i->TypePrint(out);
}
- PrintF(out, "}");
+ PrintF(out, ")");
}
}
#endif
+template class TypeImpl<ZoneTypeConfig>;
+template class TypeImpl<ZoneTypeConfig>::Iterator<i::Map>;
+template class TypeImpl<ZoneTypeConfig>::Iterator<i::Object>;
+
+template class TypeImpl<HeapTypeConfig>;
+template class TypeImpl<HeapTypeConfig>::Iterator<i::Map>;
+template class TypeImpl<HeapTypeConfig>::Iterator<i::Object>;
+
+template TypeImpl<ZoneTypeConfig>::TypeHandle
+ TypeImpl<ZoneTypeConfig>::Convert<HeapType>(
+ TypeImpl<HeapTypeConfig>::TypeHandle, TypeImpl<ZoneTypeConfig>::Region*);
+template TypeImpl<HeapTypeConfig>::TypeHandle
+ TypeImpl<HeapTypeConfig>::Convert<Type>(
+ TypeImpl<ZoneTypeConfig>::TypeHandle, TypeImpl<HeapTypeConfig>::Region*);
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 5d437e26b2..99a809dc10 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -95,7 +95,7 @@ namespace internal {
// a concurrent compilation context.
-#define PRIMITIVE_TYPE_LIST(V) \
+#define BITSET_TYPE_LIST(V) \
V(None, 0) \
V(Null, 1 << 0) \
V(Undefined, 1 << 1) \
@@ -113,9 +113,8 @@ namespace internal {
V(RegExp, 1 << 13) \
V(OtherObject, 1 << 14) \
V(Proxy, 1 << 15) \
- V(Internal, 1 << 16)
-
-#define COMPOSED_TYPE_LIST(V) \
+ V(Internal, 1 << 16) \
+ \
V(Oddball, kBoolean | kNull | kUndefined) \
V(Signed32, kSmi | kOtherSigned32) \
V(Number, kSigned32 | kUnsigned32 | kDouble) \
@@ -131,40 +130,78 @@ namespace internal {
V(NonNumber, kAny - kNumber) \
V(Detectable, kAllocated - kUndetectable)
-#define TYPE_LIST(V) \
- PRIMITIVE_TYPE_LIST(V) \
- COMPOSED_TYPE_LIST(V)
-
-
-class Type : public Object {
+// struct Config {
+// typedef Base;
+// typedef Unioned;
+// typedef Region;
+// template<class> struct Handle { typedef type; } // No template typedefs...
+// static Handle<Type>::type handle(Type* type); // !is_bitset(type)
+// static bool is_bitset(Type*);
+// static bool is_class(Type*);
+// static bool is_constant(Type*);
+// static bool is_union(Type*);
+// static int as_bitset(Type*);
+// static i::Handle<i::Map> as_class(Type*);
+// static i::Handle<i::Object> as_constant(Type*);
+// static Handle<Unioned>::type as_union(Type*);
+// static Type* from_bitset(int bitset);
+// static Handle<Type>::type from_bitset(int bitset, Region*);
+// static Handle<Type>::type from_class(i::Handle<i::Map>, Region*)
+// static Handle<Type>::type from_constant(i::Handle<i::Object>, Region*);
+// static Handle<Type>::type from_union(Handle<Unioned>::type);
+// static Handle<Unioned>::type union_create(int size, Region*);
+// static void union_shrink(Handle<Unioned>::type, int size);
+// static Handle<Type>::type union_get(Handle<Unioned>::type, int);
+// static void union_set(Handle<Unioned>::type, int, Handle<Type>::type);
+// static int union_length(Handle<Unioned>::type);
+// }
+template<class Config>
+class TypeImpl : public Config::Base {
public:
- #define DEFINE_TYPE_CONSTRUCTOR(type, value) \
- static Type* type() { return from_bitset(k##type); }
- TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
+ typedef typename Config::template Handle<TypeImpl>::type TypeHandle;
+ typedef typename Config::Region Region;
+
+ #define DEFINE_TYPE_CONSTRUCTOR(type, value) \
+ static TypeImpl* type() { return Config::from_bitset(k##type); } \
+ static TypeHandle type(Region* region) { \
+ return Config::from_bitset(k##type, region); \
+ }
+ BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
#undef DEFINE_TYPE_CONSTRUCTOR
- static Type* Class(Handle<Map> map) { return from_handle(map); }
- static Type* Constant(Handle<HeapObject> value) {
- return Constant(value, value->GetIsolate());
+ static TypeHandle Class(i::Handle<i::Map> map, Region* region) {
+ return Config::from_class(map, region);
}
- static Type* Constant(Handle<v8::internal::Object> value, Isolate* isolate) {
- return from_handle(isolate->factory()->NewBox(value));
+ static TypeHandle Constant(i::Handle<i::Object> value, Region* region) {
+ return Config::from_constant(value, region);
}
- static Type* Union(Handle<Type> type1, Handle<Type> type2);
- static Type* Intersect(Handle<Type> type1, Handle<Type> type2);
- static Type* Optional(Handle<Type> type); // type \/ Undefined
+ static TypeHandle Union(TypeHandle type1, TypeHandle type2, Region* reg);
+ static TypeHandle Intersect(TypeHandle type1, TypeHandle type2, Region* reg);
- bool Is(Type* that) { return (this == that) ? true : SlowIs(that); }
- bool Is(Handle<Type> that) { return this->Is(*that); }
- bool Maybe(Type* that);
- bool Maybe(Handle<Type> that) { return this->Maybe(*that); }
+ static TypeHandle Of(i::Handle<i::Object> value, Region* region) {
+ return Config::from_bitset(LubBitset(*value), region);
+ }
- bool IsClass() { return is_class(); }
- bool IsConstant() { return is_constant(); }
- Handle<Map> AsClass() { return as_class(); }
- Handle<v8::internal::Object> AsConstant() { return as_constant(); }
+ bool Is(TypeImpl* that) { return this == that || this->SlowIs(that); }
+ template<class TypeHandle>
+ bool Is(TypeHandle that) { return this->Is(*that); }
+ bool Maybe(TypeImpl* that);
+ template<class TypeHandle>
+ bool Maybe(TypeHandle that) { return this->Maybe(*that); }
+
+ // State-dependent versions of Of and Is that consider subtyping between
+ // a constant and its map class.
+ static TypeHandle OfCurrently(i::Handle<i::Object> value, Region* region);
+ bool IsCurrently(TypeImpl* that);
+ template<class TypeHandle>
+ bool IsCurrently(TypeHandle that) { return this->IsCurrently(*that); }
+
+ bool IsClass() { return Config::is_class(this); }
+ bool IsConstant() { return Config::is_constant(this); }
+ i::Handle<i::Map> AsClass() { return Config::as_class(this); }
+ i::Handle<i::Object> AsConstant() { return Config::as_constant(this); }
int NumClasses();
int NumConstants();
@@ -173,179 +210,361 @@ class Type : public Object {
class Iterator {
public:
bool Done() const { return index_ < 0; }
- Handle<T> Current();
+ i::Handle<T> Current();
void Advance();
private:
- friend class Type;
+ template<class> friend class TypeImpl;
Iterator() : index_(-1) {}
- explicit Iterator(Handle<Type> type) : type_(type), index_(-1) {
+ explicit Iterator(TypeHandle type) : type_(type), index_(-1) {
Advance();
}
- inline bool matches(Handle<Type> type);
- inline Handle<Type> get_type();
+ inline bool matches(TypeHandle type);
+ inline TypeHandle get_type();
- Handle<Type> type_;
+ TypeHandle type_;
int index_;
};
- Iterator<Map> Classes() {
- if (this->is_bitset()) return Iterator<Map>();
- return Iterator<Map>(this->handle());
+ Iterator<i::Map> Classes() {
+ if (this->IsBitset()) return Iterator<i::Map>();
+ return Iterator<i::Map>(Config::handle(this));
}
- Iterator<v8::internal::Object> Constants() {
- if (this->is_bitset()) return Iterator<v8::internal::Object>();
- return Iterator<v8::internal::Object>(this->handle());
+ Iterator<i::Object> Constants() {
+ if (this->IsBitset()) return Iterator<i::Object>();
+ return Iterator<i::Object>(Config::handle(this));
}
- static Type* cast(v8::internal::Object* object) {
- Type* t = static_cast<Type*>(object);
- ASSERT(t->is_bitset() || t->is_class() ||
- t->is_constant() || t->is_union());
+ static TypeImpl* cast(typename Config::Base* object) {
+ TypeImpl* t = static_cast<TypeImpl*>(object);
+ ASSERT(t->IsBitset() || t->IsClass() || t->IsConstant() || t->IsUnion());
return t;
}
+ template<class OtherTypeImpl>
+ static TypeHandle Convert(
+ typename OtherTypeImpl::TypeHandle type, Region* region);
+
#ifdef OBJECT_PRINT
void TypePrint();
void TypePrint(FILE* out);
#endif
private:
+ template<class> friend class Iterator;
+ template<class> friend class TypeImpl;
+
// A union is a fixed array containing types. Invariants:
// - its length is at least 2
// - at most one field is a bitset, and it must go into index 0
// - no field is a union
- typedef FixedArray Unioned;
+ typedef typename Config::Unioned Unioned;
+ typedef typename Config::template Handle<Unioned>::type UnionedHandle;
enum {
#define DECLARE_TYPE(type, value) k##type = (value),
- TYPE_LIST(DECLARE_TYPE)
+ BITSET_TYPE_LIST(DECLARE_TYPE)
#undef DECLARE_TYPE
kUnusedEOL = 0
};
- bool is_none() { return this == None(); }
- bool is_bitset() { return this->IsSmi(); }
- bool is_class() { return this->IsMap(); }
- bool is_constant() { return this->IsBox(); }
- bool is_union() { return this->IsFixedArray(); }
+ bool IsNone() { return this == None(); }
+ bool IsAny() { return this == Any(); }
+ bool IsBitset() { return Config::is_bitset(this); }
+ bool IsUnion() { return Config::is_union(this); }
+ int AsBitset() { return Config::as_bitset(this); }
+ UnionedHandle AsUnion() { return Config::as_union(this); }
+
+ static int UnionLength(UnionedHandle unioned) {
+ return Config::union_length(unioned);
+ }
+ static TypeHandle UnionGet(UnionedHandle unioned, int i) {
+ return Config::union_get(unioned, i);
+ }
+
+ bool SlowIs(TypeImpl* that);
+
+ int LubBitset(); // least upper bound that's a bitset
+ int GlbBitset(); // greatest lower bound that's a bitset
+
+ static int LubBitset(i::Object* value);
+ static int LubBitset(i::Map* map);
+
+ bool InUnion(UnionedHandle unioned, int current_size);
+ static int ExtendUnion(
+ UnionedHandle unioned, TypeHandle t, int current_size);
+ static int ExtendIntersection(
+ UnionedHandle unioned, TypeHandle t, TypeHandle other, int current_size);
+
+#ifdef OBJECT_PRINT
+ static const char* bitset_name(int bitset);
+#endif
+};
+
- bool SlowIs(Type* that);
+// Zone-allocated types are either (odd) integers to represent bitsets, or
+// (even) pointers to zone lists for everything else. The first slot of every
+// list is an explicit tag value to distinguish representation.
+struct ZoneTypeConfig {
+ private:
+ typedef i::ZoneList<void*> Tagged;
+
+ enum Tag {
+ kClassTag,
+ kConstantTag,
+ kUnionTag
+ };
- int as_bitset() { return Smi::cast(this)->value(); }
- Handle<Map> as_class() { return Handle<Map>::cast(handle()); }
- Handle<v8::internal::Object> as_constant() {
- Handle<Box> box = Handle<Box>::cast(handle());
- return v8::internal::handle(box->value(), box->GetIsolate());
+ static Tagged* tagged_create(Tag tag, int size, Zone* zone) {
+ Tagged* tagged = new(zone) Tagged(size + 1, zone);
+ tagged->Add(reinterpret_cast<void*>(tag), zone);
+ tagged->AddBlock(NULL, size, zone);
+ return tagged;
}
- Handle<Unioned> as_union() { return Handle<Unioned>::cast(handle()); }
+ static void tagged_shrink(Tagged* tagged, int size) {
+ tagged->Rewind(size + 1);
+ }
+ static Tag tagged_tag(Tagged* tagged) {
+ return static_cast<Tag>(reinterpret_cast<intptr_t>(tagged->at(0)));
+ }
+ template<class T>
+ static T tagged_get(Tagged* tagged, int i) {
+ return reinterpret_cast<T>(tagged->at(i + 1));
+ }
+ template<class T>
+ static void tagged_set(Tagged* tagged, int i, T value) {
+ tagged->at(i + 1) = reinterpret_cast<T>(value);
+ }
+ static int tagged_length(Tagged* tagged) {
+ return tagged->length() - 1;
+ }
+
+ public:
+ typedef TypeImpl<ZoneTypeConfig> Type;
+ class Base {};
+ typedef i::ZoneList<Type*> Unioned;
+ typedef i::Zone Region;
+ template<class T> struct Handle { typedef T* type; };
+
+ static Type* handle(Type* type) { return type; }
- Handle<Type> handle() { return handle_via_isolate_of(this); }
- Handle<Type> handle_via_isolate_of(Type* type) {
- ASSERT(type->IsHeapObject());
- return v8::internal::handle(this, HeapObject::cast(type)->GetIsolate());
+ static bool is(Type* type, Tag tag) {
+ return is_tagged(type) && tagged_tag(as_tagged(type)) == tag;
}
- static Type* from_bitset(int bitset) {
- return static_cast<Type*>(Object::cast(Smi::FromInt(bitset)));
+ static bool is_bitset(Type* type) {
+ return reinterpret_cast<intptr_t>(type) & 1;
}
- static Type* from_handle(Handle<HeapObject> handle) {
- return static_cast<Type*>(Object::cast(*handle));
+ static bool is_tagged(Type* type) { return !is_bitset(type); }
+ static bool is_class(Type* type) { return is(type, kClassTag); }
+ static bool is_constant(Type* type) { return is(type, kConstantTag); }
+ static bool is_union(Type* type) { return is(type, kUnionTag); }
+ static bool tagged_is_union(Tagged* tagged) {
+ return is(from_tagged(tagged), kUnionTag);
}
- static Handle<Type> union_get(Handle<Unioned> unioned, int i) {
- Type* type = static_cast<Type*>(unioned->get(i));
- ASSERT(!type->is_union());
- return type->handle_via_isolate_of(from_handle(unioned));
+ static int as_bitset(Type* type) {
+ ASSERT(is_bitset(type));
+ return static_cast<int>(reinterpret_cast<intptr_t>(type) >> 1);
+ }
+ static Tagged* as_tagged(Type* type) {
+ ASSERT(is_tagged(type));
+ return reinterpret_cast<Tagged*>(type);
+ }
+ static i::Handle<i::Map> as_class(Type* type) {
+ ASSERT(is_class(type));
+ return i::Handle<i::Map>(tagged_get<i::Map**>(as_tagged(type), 0));
+ }
+ static i::Handle<i::Object> as_constant(Type* type) {
+ ASSERT(is_constant(type));
+ return i::Handle<i::Object>(tagged_get<i::Object**>(as_tagged(type), 0));
+ }
+ static Unioned* as_union(Type* type) {
+ ASSERT(is_union(type));
+ return tagged_as_union(as_tagged(type));
+ }
+ static Unioned* tagged_as_union(Tagged* tagged) {
+ ASSERT(tagged_is_union(tagged));
+ return reinterpret_cast<Unioned*>(tagged);
}
- int LubBitset(); // least upper bound that's a bitset
- int GlbBitset(); // greatest lower bound that's a bitset
- bool InUnion(Handle<Unioned> unioned, int current_size);
- int ExtendUnion(Handle<Unioned> unioned, int current_size);
- int ExtendIntersection(
- Handle<Unioned> unioned, Handle<Type> type, int current_size);
-
- static const char* GetComposedName(int type) {
- switch (type) {
- #define PRINT_COMPOSED_TYPE(type, value) \
- case k##type: \
- return # type;
- COMPOSED_TYPE_LIST(PRINT_COMPOSED_TYPE)
- #undef PRINT_COMPOSED_TYPE
- }
- return NULL;
- }
-
- static const char* GetPrimitiveName(int type) {
- switch (type) {
- #define PRINT_PRIMITIVE_TYPE(type, value) \
- case k##type: \
- return # type;
- PRIMITIVE_TYPE_LIST(PRINT_PRIMITIVE_TYPE)
- #undef PRINT_PRIMITIVE_TYPE
- default:
- UNREACHABLE();
- return "InvalidType";
- }
+ static Type* from_bitset(int bitset) {
+ return reinterpret_cast<Type*>((bitset << 1) | 1);
+ }
+ static Type* from_bitset(int bitset, Zone* Zone) {
+ return from_bitset(bitset);
+ }
+ static Type* from_tagged(Tagged* tagged) {
+ return reinterpret_cast<Type*>(tagged);
+ }
+ static Type* from_class(i::Handle<i::Map> map, Zone* zone) {
+ Tagged* tagged = tagged_create(kClassTag, 1, zone);
+ tagged_set(tagged, 0, map.location());
+ return from_tagged(tagged);
+ }
+ static Type* from_constant(i::Handle<i::Object> value, Zone* zone) {
+ Tagged* tagged = tagged_create(kConstantTag, 1, zone);
+ tagged_set(tagged, 0, value.location());
+ return from_tagged(tagged);
+ }
+ static Type* from_union(Unioned* unioned) {
+ return from_tagged(tagged_from_union(unioned));
+ }
+ static Tagged* tagged_from_union(Unioned* unioned) {
+ return reinterpret_cast<Tagged*>(unioned);
+ }
+
+ static Unioned* union_create(int size, Zone* zone) {
+ return tagged_as_union(tagged_create(kUnionTag, size, zone));
+ }
+ static void union_shrink(Unioned* unioned, int size) {
+ tagged_shrink(tagged_from_union(unioned), size);
+ }
+ static Type* union_get(Unioned* unioned, int i) {
+ Type* type = tagged_get<Type*>(tagged_from_union(unioned), i);
+ ASSERT(!is_union(type));
+ return type;
+ }
+ static void union_set(Unioned* unioned, int i, Type* type) {
+ ASSERT(!is_union(type));
+ tagged_set(tagged_from_union(unioned), i, type);
+ }
+ static int union_length(Unioned* unioned) {
+ return tagged_length(tagged_from_union(unioned));
}
};
-// A simple struct to represent a pair of lower/upper type bounds.
-struct Bounds {
- Handle<Type> lower;
- Handle<Type> upper;
+// Heap-allocated types are either smis for bitsets, maps for classes, boxes for
+// constants, or fixed arrays for unions.
+struct HeapTypeConfig {
+ typedef TypeImpl<HeapTypeConfig> Type;
+ typedef i::Object Base;
+ typedef i::FixedArray Unioned;
+ typedef i::Isolate Region;
+ template<class T> struct Handle { typedef i::Handle<T> type; };
- Bounds() {}
- Bounds(Handle<Type> l, Handle<Type> u) : lower(l), upper(u) {
- ASSERT(lower->Is(upper));
+ static i::Handle<Type> handle(Type* type) {
+ return i::handle(type, i::HeapObject::cast(type)->GetIsolate());
}
- Bounds(Type* l, Type* u, Isolate* isl) : lower(l, isl), upper(u, isl) {
- ASSERT(lower->Is(upper));
+
+ static bool is_bitset(Type* type) { return type->IsSmi(); }
+ static bool is_class(Type* type) { return type->IsMap(); }
+ static bool is_constant(Type* type) { return type->IsBox(); }
+ static bool is_union(Type* type) { return type->IsFixedArray(); }
+
+ static int as_bitset(Type* type) {
+ return Smi::cast(type)->value();
}
- explicit Bounds(Handle<Type> t) : lower(t), upper(t) {
- ASSERT(lower->Is(upper));
+ static i::Handle<i::Map> as_class(Type* type) {
+ return i::handle(i::Map::cast(type));
+ }
+ static i::Handle<i::Object> as_constant(Type* type) {
+ i::Box* box = i::Box::cast(type);
+ return i::handle(box->value(), box->GetIsolate());
}
- Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) {
+ static i::Handle<Unioned> as_union(Type* type) {
+ return i::handle(i::FixedArray::cast(type));
+ }
+
+ static Type* from_bitset(int bitset) {
+ return Type::cast(i::Smi::FromInt(bitset));
+ }
+ static i::Handle<Type> from_bitset(int bitset, Isolate* isolate) {
+ return i::handle(from_bitset(bitset), isolate);
+ }
+ static i::Handle<Type> from_class(i::Handle<i::Map> map, Isolate* isolate) {
+ return i::Handle<Type>::cast(i::Handle<Object>::cast(map));
+ }
+ static i::Handle<Type> from_constant(
+ i::Handle<i::Object> value, Isolate* isolate) {
+ i::Handle<Box> box = isolate->factory()->NewBox(value);
+ return i::Handle<Type>::cast(i::Handle<Object>::cast(box));
+ }
+ static i::Handle<Type> from_union(i::Handle<Unioned> unioned) {
+ return i::Handle<Type>::cast(i::Handle<Object>::cast(unioned));
+ }
+
+ static i::Handle<Unioned> union_create(int size, Isolate* isolate) {
+ return isolate->factory()->NewFixedArray(size);
+ }
+ static void union_shrink(i::Handle<Unioned> unioned, int size) {
+ unioned->Shrink(size);
+ }
+ static i::Handle<Type> union_get(i::Handle<Unioned> unioned, int i) {
+ Type* type = static_cast<Type*>(unioned->get(i));
+ ASSERT(!is_union(type));
+ return i::handle(type, unioned->GetIsolate());
+ }
+ static void union_set(
+ i::Handle<Unioned> unioned, int i, i::Handle<Type> type) {
+ ASSERT(!is_union(*type));
+ unioned->set(i, *type);
+ }
+ static int union_length(i::Handle<Unioned> unioned) {
+ return unioned->length();
+ }
+};
+
+typedef TypeImpl<ZoneTypeConfig> Type;
+typedef TypeImpl<HeapTypeConfig> HeapType;
+
+
+// A simple struct to represent a pair of lower/upper type bounds.
+template<class Config>
+struct BoundsImpl {
+ typedef TypeImpl<Config> Type;
+ typedef typename Type::TypeHandle TypeHandle;
+ typedef typename Type::Region Region;
+
+ TypeHandle lower;
+ TypeHandle upper;
+
+ BoundsImpl() {}
+ explicit BoundsImpl(TypeHandle t) : lower(t), upper(t) {}
+ BoundsImpl(TypeHandle l, TypeHandle u) : lower(l), upper(u) {
ASSERT(lower->Is(upper));
}
// Unrestricted bounds.
- static Bounds Unbounded(Isolate* isl) {
- return Bounds(Type::None(), Type::Any(), isl);
+ static BoundsImpl Unbounded(Region* region) {
+ return BoundsImpl(Type::None(region), Type::Any(region));
}
// Meet: both b1 and b2 are known to hold.
- static Bounds Both(Bounds b1, Bounds b2, Isolate* isl) {
- Handle<Type> lower(Type::Union(b1.lower, b2.lower), isl);
- Handle<Type> upper(Type::Intersect(b1.upper, b2.upper), isl);
+ static BoundsImpl Both(BoundsImpl b1, BoundsImpl b2, Region* region) {
+ TypeHandle lower = Type::Union(b1.lower, b2.lower, region);
+ TypeHandle upper = Type::Intersect(b1.upper, b2.upper, region);
// Lower bounds are considered approximate, correct as necessary.
- lower = handle(Type::Intersect(lower, upper), isl);
- return Bounds(lower, upper);
+ lower = Type::Intersect(lower, upper, region);
+ return BoundsImpl(lower, upper);
}
// Join: either b1 or b2 is known to hold.
- static Bounds Either(Bounds b1, Bounds b2, Isolate* isl) {
- return Bounds(
- handle(Type::Intersect(b1.lower, b2.lower), isl),
- handle(Type::Union(b1.upper, b2.upper), isl));
+ static BoundsImpl Either(BoundsImpl b1, BoundsImpl b2, Region* region) {
+ TypeHandle lower = Type::Intersect(b1.lower, b2.lower, region);
+ TypeHandle upper = Type::Union(b1.upper, b2.upper, region);
+ return BoundsImpl(lower, upper);
}
- static Bounds NarrowLower(Bounds b, Handle<Type> t, Isolate* isl) {
+ static BoundsImpl NarrowLower(BoundsImpl b, TypeHandle t, Region* region) {
// Lower bounds are considered approximate, correct as necessary.
- t = handle(Type::Intersect(t, b.upper), isl);
- return Bounds(handle(Type::Union(b.lower, t), isl), b.upper);
+ t = Type::Intersect(t, b.upper, region);
+ TypeHandle lower = Type::Union(b.lower, t, region);
+ return BoundsImpl(lower, b.upper);
}
- static Bounds NarrowUpper(Bounds b, Handle<Type> t, Isolate* isl) {
- return Bounds(
- handle(Type::Intersect(b.lower, t), isl),
- handle(Type::Intersect(b.upper, t), isl));
+ static BoundsImpl NarrowUpper(BoundsImpl b, TypeHandle t, Region* region) {
+ TypeHandle lower = Type::Intersect(b.lower, t, region);
+ TypeHandle upper = Type::Intersect(b.upper, t, region);
+ return BoundsImpl(lower, upper);
}
};
+typedef BoundsImpl<ZoneTypeConfig> Bounds;
+
+
} } // namespace v8::internal
#endif // V8_TYPES_H_
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index 03c1ad16ef..b925dc610f 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -27,6 +27,8 @@
#include "typing.h"
+#include "frames.h"
+#include "frames-inl.h"
#include "parser.h" // for CompileTimeValue; TODO(rossberg): should move
#include "scopes.h"
@@ -39,10 +41,9 @@ AstTyper::AstTyper(CompilationInfo* info)
oracle_(
Handle<Code>(info->closure()->shared()->code()),
Handle<Context>(info->closure()->context()->native_context()),
- info->isolate(),
info->zone()),
store_(info->zone()) {
- InitializeAstVisitor(info->isolate());
+ InitializeAstVisitor(info->zone());
}
@@ -68,6 +69,75 @@ void AstTyper::Run(CompilationInfo* info) {
#undef RECURSE
+
+#ifdef OBJECT_PRINT
+ static void PrintObserved(Variable* var, Object* value, Type* type) {
+ PrintF(" observed %s ", var->IsParameter() ? "param" : "local");
+ var->name()->Print();
+ PrintF(" : ");
+ value->ShortPrint();
+ PrintF(" -> ");
+ type->TypePrint();
+ }
+#endif // OBJECT_PRINT
+
+
+Effect AstTyper::ObservedOnStack(Object* value) {
+ Type* lower = Type::OfCurrently(handle(value, isolate()), zone());
+ return Effect(Bounds(lower, Type::Any(zone())));
+}
+
+
+void AstTyper::ObserveTypesAtOsrEntry(IterationStatement* stmt) {
+ if (stmt->OsrEntryId() != info_->osr_ast_id()) return;
+
+ DisallowHeapAllocation no_gc;
+ JavaScriptFrameIterator it(isolate());
+ JavaScriptFrame* frame = it.frame();
+ Scope* scope = info_->scope();
+
+ // Assert that the frame on the stack belongs to the function we want to OSR.
+ ASSERT_EQ(*info_->closure(), frame->function());
+
+ int params = scope->num_parameters();
+ int locals = scope->StackLocalCount();
+
+ // Use sequential composition to achieve desired narrowing.
+ // The receiver is a parameter with index -1.
+ store_.Seq(parameter_index(-1), ObservedOnStack(frame->receiver()));
+ for (int i = 0; i < params; i++) {
+ store_.Seq(parameter_index(i), ObservedOnStack(frame->GetParameter(i)));
+ }
+
+ for (int i = 0; i < locals; i++) {
+ store_.Seq(stack_local_index(i), ObservedOnStack(frame->GetExpression(i)));
+ }
+
+#ifdef OBJECT_PRINT
+ if (FLAG_trace_osr && FLAG_print_scopes) {
+ PrintObserved(scope->receiver(),
+ frame->receiver(),
+ store_.LookupBounds(parameter_index(-1)).lower);
+
+ for (int i = 0; i < params; i++) {
+ PrintObserved(scope->parameter(i),
+ frame->GetParameter(i),
+ store_.LookupBounds(parameter_index(i)).lower);
+ }
+
+ ZoneList<Variable*> local_vars(locals, zone());
+ ZoneList<Variable*> context_vars(scope->ContextLocalCount(), zone());
+ scope->CollectStackAndContextLocals(&local_vars, &context_vars);
+ for (int i = 0; i < locals; i++) {
+ PrintObserved(local_vars.at(i),
+ frame->GetExpression(i),
+ store_.LookupBounds(stack_local_index(i)).lower);
+ }
+ }
+#endif // OBJECT_PRINT
+}
+
+
#define RECURSE(call) \
do { \
ASSERT(!HasStackOverflow()); \
@@ -151,24 +221,25 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
RECURSE(Visit(stmt->tag()));
ZoneList<CaseClause*>* clauses = stmt->cases();
- SwitchStatement::SwitchType switch_type = stmt->switch_type();
Effects local_effects(zone());
bool complex_effects = false; // True for label effects or fall-through.
for (int i = 0; i < clauses->length(); ++i) {
CaseClause* clause = clauses->at(i);
+
Effects clause_effects = EnterEffects();
if (!clause->is_default()) {
Expression* label = clause->label();
- SwitchStatement::SwitchType label_switch_type =
- label->IsSmiLiteral() ? SwitchStatement::SMI_SWITCH :
- label->IsStringLiteral() ? SwitchStatement::STRING_SWITCH :
- SwitchStatement::GENERIC_SWITCH;
- if (switch_type == SwitchStatement::UNKNOWN_SWITCH)
- switch_type = label_switch_type;
- else if (switch_type != label_switch_type)
- switch_type = SwitchStatement::GENERIC_SWITCH;
+ // Collect type feedback.
+ Type* tag_type;
+ Type* label_type;
+ Type* combined_type;
+ oracle()->CompareType(clause->CompareId(),
+ &tag_type, &label_type, &combined_type);
+ NarrowLowerType(stmt->tag(), tag_type);
+ NarrowLowerType(label, label_type);
+ clause->set_compare_type(combined_type);
RECURSE(Visit(label));
if (!clause_effects.IsEmpty()) complex_effects = true;
@@ -189,20 +260,6 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
} else {
store_.Seq(local_effects);
}
-
- if (switch_type == SwitchStatement::UNKNOWN_SWITCH)
- switch_type = SwitchStatement::GENERIC_SWITCH;
- stmt->set_switch_type(switch_type);
-
- // Collect type feedback.
- // TODO(rossberg): can we eliminate this special case and extra loop?
- if (switch_type == SwitchStatement::SMI_SWITCH) {
- for (int i = 0; i < clauses->length(); ++i) {
- CaseClause* clause = clauses->at(i);
- if (!clause->is_default())
- clause->RecordTypeFeedback(oracle());
- }
- }
}
@@ -221,6 +278,7 @@ void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
// computing the set of variables assigned in only some of the origins of the
// control transfer (such as the loop body here).
store_.Forget(); // Control may transfer here via looping or 'continue'.
+ ObserveTypesAtOsrEntry(stmt);
RECURSE(Visit(stmt->body()));
RECURSE(Visit(stmt->cond()));
store_.Forget(); // Control may transfer here via 'break'.
@@ -235,6 +293,7 @@ void AstTyper::VisitWhileStatement(WhileStatement* stmt) {
store_.Forget(); // Control may transfer here via looping or 'continue'.
RECURSE(Visit(stmt->cond()));
+ ObserveTypesAtOsrEntry(stmt);
RECURSE(Visit(stmt->body()));
store_.Forget(); // Control may transfer here via termination or 'break'.
}
@@ -251,6 +310,7 @@ void AstTyper::VisitForStatement(ForStatement* stmt) {
RECURSE(Visit(stmt->cond()));
}
+ ObserveTypesAtOsrEntry(stmt);
RECURSE(Visit(stmt->body()));
if (stmt->next() != NULL) {
store_.Forget(); // Control may transfer here via 'continue'.
@@ -262,10 +322,12 @@ void AstTyper::VisitForStatement(ForStatement* stmt) {
void AstTyper::VisitForInStatement(ForInStatement* stmt) {
// Collect type feedback.
- stmt->RecordTypeFeedback(oracle());
+ stmt->set_for_in_type(static_cast<ForInStatement::ForInType>(
+ oracle()->ForInType(stmt->ForInFeedbackSlot())));
RECURSE(Visit(stmt->enumerable()));
store_.Forget(); // Control may transfer here via looping or 'continue'.
+ ObserveTypesAtOsrEntry(stmt);
RECURSE(Visit(stmt->body()));
store_.Forget(); // Control may transfer here via 'break'.
}
@@ -307,6 +369,7 @@ void AstTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
+ expr->InitializeSharedInfo(Handle<Code>(info_->closure()->shared()->code()));
}
@@ -330,7 +393,7 @@ void AstTyper::VisitConditional(Conditional* expr) {
NarrowType(expr, Bounds::Either(
expr->then_expression()->bounds(),
- expr->else_expression()->bounds(), isolate_));
+ expr->else_expression()->bounds(), zone()));
}
@@ -343,13 +406,13 @@ void AstTyper::VisitVariableProxy(VariableProxy* expr) {
void AstTyper::VisitLiteral(Literal* expr) {
- Type* type = Type::Constant(expr->value(), isolate_);
- NarrowType(expr, Bounds(type, isolate_));
+ Type* type = Type::Constant(expr->value(), zone());
+ NarrowType(expr, Bounds(type));
}
void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
- NarrowType(expr, Bounds(Type::RegExp(), isolate_));
+ NarrowType(expr, Bounds(Type::RegExp(zone())));
}
@@ -370,7 +433,7 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
RECURSE(Visit(prop->value()));
}
- NarrowType(expr, Bounds(Type::Object(), isolate_));
+ NarrowType(expr, Bounds(Type::Object(zone())));
}
@@ -381,36 +444,37 @@ void AstTyper::VisitArrayLiteral(ArrayLiteral* expr) {
RECURSE(Visit(value));
}
- NarrowType(expr, Bounds(Type::Array(), isolate_));
+ NarrowType(expr, Bounds(Type::Array(zone())));
}
void AstTyper::VisitAssignment(Assignment* expr) {
- // TODO(rossberg): Can we clean this up?
- if (expr->is_compound()) {
- // Collect type feedback.
- Expression* target = expr->target();
- Property* prop = target->AsProperty();
- if (prop != NULL) {
- prop->RecordTypeFeedback(oracle(), zone());
- expr->RecordTypeFeedback(oracle(), zone());
- }
-
- RECURSE(Visit(expr->binary_operation()));
-
- NarrowType(expr, expr->binary_operation()->bounds());
- } else {
- // Collect type feedback.
- if (expr->target()->IsProperty()) {
- expr->RecordTypeFeedback(oracle(), zone());
+ // Collect type feedback.
+ Property* prop = expr->target()->AsProperty();
+ if (prop != NULL) {
+ TypeFeedbackId id = expr->AssignmentFeedbackId();
+ expr->set_is_uninitialized(oracle()->StoreIsUninitialized(id));
+ if (!expr->IsUninitialized()) {
+ if (prop->key()->IsPropertyName()) {
+ Literal* lit_key = prop->key()->AsLiteral();
+ ASSERT(lit_key != NULL && lit_key->value()->IsString());
+ Handle<String> name = Handle<String>::cast(lit_key->value());
+ oracle()->AssignmentReceiverTypes(id, name, expr->GetReceiverTypes());
+ } else {
+ KeyedAccessStoreMode store_mode;
+ oracle()->KeyedAssignmentReceiverTypes(
+ id, expr->GetReceiverTypes(), &store_mode);
+ expr->set_store_mode(store_mode);
+ }
}
-
- RECURSE(Visit(expr->target()));
- RECURSE(Visit(expr->value()));
-
- NarrowType(expr, expr->value()->bounds());
}
+ Expression* rhs =
+ expr->is_compound() ? expr->binary_operation() : expr->value();
+ RECURSE(Visit(expr->target()));
+ RECURSE(Visit(rhs));
+ NarrowType(expr, rhs->bounds());
+
VariableProxy* proxy = expr->target()->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsStackAllocated()) {
store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
@@ -430,13 +494,30 @@ void AstTyper::VisitThrow(Throw* expr) {
RECURSE(Visit(expr->exception()));
// TODO(rossberg): is it worth having a non-termination effect?
- NarrowType(expr, Bounds(Type::None(), isolate_));
+ NarrowType(expr, Bounds(Type::None(zone())));
}
void AstTyper::VisitProperty(Property* expr) {
// Collect type feedback.
- expr->RecordTypeFeedback(oracle(), zone());
+ TypeFeedbackId id = expr->PropertyFeedbackId();
+ expr->set_is_uninitialized(oracle()->LoadIsUninitialized(id));
+ if (!expr->IsUninitialized()) {
+ if (expr->key()->IsPropertyName()) {
+ Literal* lit_key = expr->key()->AsLiteral();
+ ASSERT(lit_key != NULL && lit_key->value()->IsString());
+ Handle<String> name = Handle<String>::cast(lit_key->value());
+ bool is_prototype;
+ oracle()->PropertyReceiverTypes(
+ id, name, expr->GetReceiverTypes(), &is_prototype);
+ expr->set_is_function_prototype(is_prototype);
+ } else {
+ bool is_string;
+ oracle()->KeyedPropertyReceiverTypes(
+ id, expr->GetReceiverTypes(), &is_string);
+ expr->set_is_string_access(is_string);
+ }
+ }
RECURSE(Visit(expr->obj()));
RECURSE(Visit(expr->key()));
@@ -447,16 +528,13 @@ void AstTyper::VisitProperty(Property* expr) {
void AstTyper::VisitCall(Call* expr) {
// Collect type feedback.
- Expression* callee = expr->expression();
- Property* prop = callee->AsProperty();
- if (prop != NULL) {
- if (prop->key()->IsPropertyName())
- expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
- } else {
- expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
+ RECURSE(Visit(expr->expression()));
+ if (!expr->expression()->IsProperty() &&
+ expr->HasCallFeedbackSlot() &&
+ oracle()->CallIsMonomorphic(expr->CallFeedbackSlot())) {
+ expr->set_target(oracle()->GetCallTarget(expr->CallFeedbackSlot()));
}
- RECURSE(Visit(expr->expression()));
ZoneList<Expression*>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
@@ -510,13 +588,13 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::NOT:
case Token::DELETE:
- NarrowType(expr, Bounds(Type::Boolean(), isolate_));
+ NarrowType(expr, Bounds(Type::Boolean(zone())));
break;
case Token::VOID:
- NarrowType(expr, Bounds(Type::Undefined(), isolate_));
+ NarrowType(expr, Bounds(Type::Undefined(zone())));
break;
case Token::TYPEOF:
- NarrowType(expr, Bounds(Type::InternalizedString(), isolate_));
+ NarrowType(expr, Bounds(Type::InternalizedString(zone())));
break;
default:
UNREACHABLE();
@@ -526,15 +604,15 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
void AstTyper::VisitCountOperation(CountOperation* expr) {
// Collect type feedback.
- expr->RecordTypeFeedback(oracle(), zone());
- Property* prop = expr->expression()->AsProperty();
- if (prop != NULL) {
- prop->RecordTypeFeedback(oracle(), zone());
- }
+ TypeFeedbackId store_id = expr->CountStoreFeedbackId();
+ expr->set_store_mode(oracle()->GetStoreMode(store_id));
+ oracle()->CountReceiverTypes(store_id, expr->GetReceiverTypes());
+ expr->set_type(oracle()->CountType(expr->CountBinOpFeedbackId()));
+ // TODO(rossberg): merge the count type with the generic expression type.
RECURSE(Visit(expr->expression()));
- NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
+ NarrowType(expr, Bounds(Type::Smi(zone()), Type::Number(zone())));
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsStackAllocated()) {
@@ -545,13 +623,18 @@ void AstTyper::VisitCountOperation(CountOperation* expr) {
void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
// Collect type feedback.
- Handle<Type> type, left_type, right_type;
+ Type* type;
+ Type* left_type;
+ Type* right_type;
Maybe<int> fixed_right_arg;
+ Handle<AllocationSite> allocation_site;
oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
- &left_type, &right_type, &type, &fixed_right_arg, expr->op());
+ &left_type, &right_type, &type, &fixed_right_arg,
+ &allocation_site, expr->op());
NarrowLowerType(expr, type);
NarrowLowerType(expr->left(), left_type);
NarrowLowerType(expr->right(), right_type);
+ expr->set_allocation_site(allocation_site);
expr->set_fixed_right_arg(fixed_right_arg);
if (expr->op() == Token::OR || expr->op() == Token::AND) {
expr->left()->RecordToBooleanTypeFeedback(oracle());
@@ -575,21 +658,17 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
store_.Seq(left_effects);
NarrowType(expr, Bounds::Either(
- expr->left()->bounds(), expr->right()->bounds(), isolate_));
+ expr->left()->bounds(), expr->right()->bounds(), zone()));
break;
}
case Token::BIT_OR:
case Token::BIT_AND: {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- Handle<Type> upper(
- Type::Union(
- expr->left()->bounds().upper, expr->right()->bounds().upper),
- isolate_);
- if (!upper->Is(Type::Signed32()))
- upper = handle(Type::Signed32(), isolate_);
- Handle<Type> lower(Type::Intersect(
- handle(Type::Smi(), isolate_), upper), isolate_);
+ Type* upper = Type::Union(
+ expr->left()->bounds().upper, expr->right()->bounds().upper, zone());
+ if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
+ Type* lower = Type::Intersect(Type::Smi(zone()), upper, zone());
NarrowType(expr, Bounds(lower, upper));
break;
}
@@ -598,7 +677,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::SAR:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Smi(), Type::Signed32(), isolate_));
+ NarrowType(expr, Bounds(Type::Smi(zone()), Type::Signed32(zone())));
break;
case Token::SHR:
RECURSE(Visit(expr->left()));
@@ -606,7 +685,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
// TODO(rossberg): The upper bound would be Unsigned32, but since there
// is no 'positive Smi' type for the lower bound, we use the smallest
// union of Smi and Unsigned32 as upper bound instead.
- NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
+ NarrowType(expr, Bounds(Type::Smi(zone()), Type::Number(zone())));
break;
case Token::ADD: {
RECURSE(Visit(expr->left()));
@@ -615,17 +694,17 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
Bounds r = expr->right()->bounds();
Type* lower =
l.lower->Is(Type::None()) || r.lower->Is(Type::None()) ?
- Type::None() :
+ Type::None(zone()) :
l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ?
- Type::String() :
+ Type::String(zone()) :
l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
- Type::Smi() : Type::None();
+ Type::Smi(zone()) : Type::None(zone());
Type* upper =
l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ?
- Type::String() :
+ Type::String(zone()) :
l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ?
- Type::Number() : Type::NumberOrString();
- NarrowType(expr, Bounds(lower, upper, isolate_));
+ Type::Number(zone()) : Type::NumberOrString(zone());
+ NarrowType(expr, Bounds(lower, upper));
break;
}
case Token::SUB:
@@ -634,7 +713,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::MOD:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
+ NarrowType(expr, Bounds(Type::Smi(zone()), Type::Number(zone())));
break;
default:
UNREACHABLE();
@@ -644,7 +723,9 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
void AstTyper::VisitCompareOperation(CompareOperation* expr) {
// Collect type feedback.
- Handle<Type> left_type, right_type, combined_type;
+ Type* left_type;
+ Type* right_type;
+ Type* combined_type;
oracle()->CompareType(expr->CompareOperationFeedbackId(),
&left_type, &right_type, &combined_type);
NarrowLowerType(expr->left(), left_type);
@@ -654,7 +735,7 @@ void AstTyper::VisitCompareOperation(CompareOperation* expr) {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Boolean(), isolate_));
+ NarrowType(expr, Bounds(Type::Boolean(zone())));
}
diff --git a/deps/v8/src/typing.h b/deps/v8/src/typing.h
index c942b00632..0517812ec3 100644
--- a/deps/v8/src/typing.h
+++ b/deps/v8/src/typing.h
@@ -58,6 +58,9 @@ class AstTyper: public AstVisitor {
private:
explicit AstTyper(CompilationInfo* info);
+ Effect ObservedOnStack(Object* value);
+ void ObserveTypesAtOsrEntry(IterationStatement* stmt);
+
static const int kNoVar = INT_MIN;
typedef v8::internal::Effects<int, kNoVar> Effects;
typedef v8::internal::NestedEffects<int, kNoVar> Store;
@@ -67,13 +70,12 @@ class AstTyper: public AstVisitor {
Store store_;
TypeFeedbackOracle* oracle() { return &oracle_; }
- Zone* zone() const { return info_->zone(); }
void NarrowType(Expression* e, Bounds b) {
- e->set_bounds(Bounds::Both(e->bounds(), b, isolate_));
+ e->set_bounds(Bounds::Both(e->bounds(), b, zone()));
}
- void NarrowLowerType(Expression* e, Handle<Type> t) {
- e->set_bounds(Bounds::NarrowLower(e->bounds(), t, isolate_));
+ void NarrowLowerType(Expression* e, Type* t) {
+ e->set_bounds(Bounds::NarrowLower(e->bounds(), t, zone()));
}
Effects EnterEffects() {
@@ -82,9 +84,15 @@ class AstTyper: public AstVisitor {
}
void ExitEffects() { store_ = store_.Pop(); }
+ int parameter_index(int index) { return -index - 2; }
+ int stack_local_index(int index) { return index; }
+
int variable_index(Variable* var) {
- return var->IsStackLocal() ? var->index() :
- var->IsParameter() ? -var->index() : kNoVar;
+ // Stack locals have the range [0 .. l]
+ // Parameters have the range [-1 .. p]
+ // We map this to [-p-2 .. -1, 0 .. l]
+ return var->IsStackLocal() ? stack_local_index(var->index()) :
+ var->IsParameter() ? parameter_index(var->index()) : kNoVar;
}
void VisitDeclarations(ZoneList<Declaration*>* declarations);
diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/unicode-inl.h
index f861f9f2d4..99eca644b7 100644
--- a/deps/v8/src/unicode-inl.h
+++ b/deps/v8/src/unicode-inl.h
@@ -107,8 +107,14 @@ unsigned Utf8::EncodeOneByte(char* str, uint8_t c) {
return 2;
}
-
-unsigned Utf8::Encode(char* str, uchar c, int previous) {
+// Encode encodes the UTF-16 code units c and previous into the given str
+// buffer, and combines surrogate code units into single code points. If
+// replace_invalid is set to true, orphan surrogate code units will be replaced
+// with kBadChar.
+unsigned Utf8::Encode(char* str,
+ uchar c,
+ int previous,
+ bool replace_invalid) {
static const int kMask = ~(1 << 6);
if (c <= kMaxOneByteChar) {
str[0] = c;
@@ -118,12 +124,16 @@ unsigned Utf8::Encode(char* str, uchar c, int previous) {
str[1] = 0x80 | (c & kMask);
return 2;
} else if (c <= kMaxThreeByteChar) {
- if (Utf16::IsTrailSurrogate(c) &&
- Utf16::IsLeadSurrogate(previous)) {
+ if (Utf16::IsSurrogatePair(previous, c)) {
const int kUnmatchedSize = kSizeOfUnmatchedSurrogate;
return Encode(str - kUnmatchedSize,
Utf16::CombineSurrogatePair(previous, c),
- Utf16::kNoPreviousCharacter) - kUnmatchedSize;
+ Utf16::kNoPreviousCharacter,
+ replace_invalid) - kUnmatchedSize;
+ } else if (replace_invalid &&
+ (Utf16::IsLeadSurrogate(c) ||
+ Utf16::IsTrailSurrogate(c))) {
+ c = kBadChar;
}
str[0] = 0xE0 | (c >> 12);
str[1] = 0x80 | ((c >> 6) & kMask);
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index bd32467786..2bef7ab20b 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// This file was generated at 2012-03-06 09:55:58.934483
+// This file was generated at 2014-02-07 15:31:16.733174
#include "unicode-inl.h"
#include <stdlib.h>
@@ -710,28 +710,6 @@ bool Letter::Is(uchar c) {
}
-// Space: point.category == 'Zs'
-
-static const uint16_t kSpaceTable0Size = 4;
-static const int32_t kSpaceTable0[4] = {
- 32, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kSpaceTable1Size = 5;
-static const int32_t kSpaceTable1[5] = {
- 1073741824, 10, 47, 95, 4096 }; // NOLINT
-bool Space::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kSpaceTable0,
- kSpaceTable0Size,
- c);
- case 1: return LookupPredicate(kSpaceTable1,
- kSpaceTable1Size,
- c);
- default: return false;
- }
-}
-
-
// Number: point.category == 'Nd'
static const uint16_t kNumberTable0Size = 56;
@@ -767,14 +745,14 @@ bool Number::Is(uchar c) {
}
-// WhiteSpace: 'Ws' in point.properties
+// WhiteSpace: point.category == 'Zs'
-static const uint16_t kWhiteSpaceTable0Size = 7;
-static const int32_t kWhiteSpaceTable0[7] = {
- 1073741833, 13, 32, 133, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kWhiteSpaceTable1Size = 7;
-static const int32_t kWhiteSpaceTable1[7] = {
- 1073741824, 10, 1073741864, 41, 47, 95, 4096 }; // NOLINT
+static const uint16_t kWhiteSpaceTable0Size = 4;
+static const int32_t kWhiteSpaceTable0[4] = {
+ 32, 160, 5760, 6158 }; // NOLINT
+static const uint16_t kWhiteSpaceTable1Size = 5;
+static const int32_t kWhiteSpaceTable1[5] = {
+ 1073741824, 10, 47, 95, 4096 }; // NOLINT
bool WhiteSpace::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
@@ -1833,8 +1811,6 @@ int UnicodeData::GetByteCount() {
+ kLetterTable5Size * sizeof(int32_t) // NOLINT
+ kLetterTable6Size * sizeof(int32_t) // NOLINT
+ kLetterTable7Size * sizeof(int32_t) // NOLINT
- + kSpaceTable0Size * sizeof(int32_t) // NOLINT
- + kSpaceTable1Size * sizeof(int32_t) // NOLINT
+ kNumberTable0Size * sizeof(int32_t) // NOLINT
+ kNumberTable5Size * sizeof(int32_t) // NOLINT
+ kNumberTable7Size * sizeof(int32_t) // NOLINT
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 6ba61d0e17..65a9af58fc 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -102,6 +102,9 @@ class UnicodeData {
class Utf16 {
public:
+ static inline bool IsSurrogatePair(int lead, int trail) {
+ return IsLeadSurrogate(lead) && IsTrailSurrogate(trail);
+ }
static inline bool IsLeadSurrogate(int code) {
if (code == kNoPreviousCharacter) return false;
return (code & 0xfc00) == 0xd800;
@@ -146,11 +149,16 @@ class Utf8 {
public:
static inline uchar Length(uchar chr, int previous);
static inline unsigned EncodeOneByte(char* out, uint8_t c);
- static inline unsigned Encode(
- char* out, uchar c, int previous);
+ static inline unsigned Encode(char* out,
+ uchar c,
+ int previous,
+ bool replace_invalid = false);
static uchar CalculateValue(const byte* str,
unsigned length,
unsigned* cursor);
+
+ // The unicode replacement character, used to signal invalid unicode
+ // sequences (e.g. an orphan surrogate) when converting to a UTF-8 encoding.
static const uchar kBadChar = 0xFFFD;
static const unsigned kMaxEncodedSize = 4;
static const unsigned kMaxOneByteChar = 0x7f;
@@ -162,6 +170,9 @@ class Utf8 {
// that match are coded as a 4 byte UTF-8 sequence.
static const unsigned kBytesSavedByCombiningSurrogates = 2;
static const unsigned kSizeOfUnmatchedSurrogate = 3;
+ // The maximum size a single UTF-16 code unit may take up when encoded as
+ // UTF-8.
+ static const unsigned kMax16BitCodeUnitSize = 3;
static inline uchar ValueOf(const byte* str,
unsigned length,
unsigned* cursor);
@@ -215,9 +226,6 @@ struct Lowercase {
struct Letter {
static bool Is(uchar c);
};
-struct Space {
- static bool Is(uchar c);
-};
struct Number {
static bool Is(uchar c);
};
diff --git a/deps/v8/src/unique.h b/deps/v8/src/unique.h
index a93b046993..a2f29e4335 100644
--- a/deps/v8/src/unique.h
+++ b/deps/v8/src/unique.h
@@ -278,7 +278,7 @@ class UniqueSet V8_FINAL : public ZoneObject {
return out;
}
- // Makes an exact copy of this set. O(|this| + |that|).
+ // Makes an exact copy of this set. O(|this|).
UniqueSet<T>* Copy(Zone* zone) const {
UniqueSet<T>* copy = new(zone) UniqueSet<T>();
copy->size_ = this->size_;
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 062019af46..c86fcba782 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -28,10 +28,10 @@
#ifndef V8_UTILS_H_
#define V8_UTILS_H_
+#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
-#include <climits>
#include "allocation.h"
#include "checks.h"
@@ -172,6 +172,17 @@ inline T RoundUp(T x, intptr_t m) {
}
+// Increment a pointer until it has the specified alignment.
+// This works like RoundUp, but it works correctly on pointer types where
+// sizeof(*pointer) might not be 1.
+template<class T>
+T AlignUp(T pointer, size_t alignment) {
+ ASSERT(sizeof(pointer) == sizeof(uintptr_t));
+ uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
+ return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
+}
+
+
template <typename T>
int Compare(const T& a, const T& b) {
if (a == b)
@@ -1083,12 +1094,72 @@ class EnumSet {
// The strange typing in ASSERT is necessary to avoid stupid warnings, see:
// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43680
ASSERT(static_cast<int>(element) < static_cast<int>(sizeof(T) * CHAR_BIT));
- return 1 << element;
+ return static_cast<T>(1) << element;
}
T bits_;
};
+// Bit field extraction.
+inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
+ return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
+}
+
+inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
+ return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
+}
+
+inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
+ return (x << (31 - msb)) >> (lsb + 31 - msb);
+}
+
+inline int signed_bitextract_64(int msb, int lsb, int x) {
+ // TODO(jbramley): This is broken for big bitfields.
+ return (x << (63 - msb)) >> (lsb + 63 - msb);
+}
+
+// Check number width.
+inline bool is_intn(int64_t x, unsigned n) {
+ ASSERT((0 < n) && (n < 64));
+ int64_t limit = static_cast<int64_t>(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+
+inline bool is_uintn(int64_t x, unsigned n) {
+ ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return !(x >> n);
+}
+
+template <class T>
+inline T truncate_to_intn(T x, unsigned n) {
+ ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return (x & ((static_cast<T>(1) << n) - 1));
+}
+
+#define INT_1_TO_63_LIST(V) \
+V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
+V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
+V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
+V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
+V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
+V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
+V(57) V(58) V(59) V(60) V(61) V(62) V(63)
+
+#define DECLARE_IS_INT_N(N) \
+inline bool is_int##N(int64_t x) { return is_intn(x, N); }
+#define DECLARE_IS_UINT_N(N) \
+template <class T> \
+inline bool is_uint##N(T x) { return is_uintn(x, N); }
+#define DECLARE_TRUNCATE_TO_INT_N(N) \
+template <class T> \
+inline T truncate_to_int##N(T x) { return truncate_to_intn(x, N); }
+INT_1_TO_63_LIST(DECLARE_IS_INT_N)
+INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
+INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
+#undef DECLARE_IS_INT_N
+#undef DECLARE_IS_UINT_N
+#undef DECLARE_TRUNCATE_TO_INT_N
class TypeFeedbackId {
public:
@@ -1118,6 +1189,7 @@ class BailoutId {
bool IsNone() const { return id_ == kNoneId; }
bool operator==(const BailoutId& other) const { return id_ == other.id_; }
+ bool operator!=(const BailoutId& other) const { return id_ != other.id_; }
private:
static const int kNoneId = -1;
@@ -1139,6 +1211,21 @@ class BailoutId {
int id_;
};
+
+template <class C>
+class ContainerPointerWrapper {
+ public:
+ typedef typename C::iterator iterator;
+ typedef typename C::reverse_iterator reverse_iterator;
+ explicit ContainerPointerWrapper(C* container) : container_(container) {}
+ iterator begin() { return container_->begin(); }
+ iterator end() { return container_->end(); }
+ reverse_iterator rbegin() { return container_->rbegin(); }
+ reverse_iterator rend() { return container_->rend(); }
+ private:
+ C* container_;
+};
+
} } // namespace v8::internal
#endif // V8_UTILS_H_
diff --git a/deps/v8/src/utils/random-number-generator.cc b/deps/v8/src/utils/random-number-generator.cc
index fe273315a7..d40102f91f 100644
--- a/deps/v8/src/utils/random-number-generator.cc
+++ b/deps/v8/src/utils/random-number-generator.cc
@@ -27,8 +27,8 @@
#include "utils/random-number-generator.h"
-#include <cstdio>
-#include <cstdlib>
+#include <stdio.h>
+#include <stdlib.h>
#include "flags.h"
#include "platform/mutex.h"
diff --git a/deps/v8/src/v8-counters.cc b/deps/v8/src/v8-counters.cc
index a0c3ebd07c..c899b289a5 100644
--- a/deps/v8/src/v8-counters.cc
+++ b/deps/v8/src/v8-counters.cc
@@ -82,7 +82,7 @@ Counters::Counters(Isolate* isolate) {
StatsCounter(isolate, "c:" "V8.CountOf_CODE_AGE-" #name); \
size_of_CODE_AGE_##name##_ = \
StatsCounter(isolate, "c:" "V8.SizeOf_CODE_AGE-" #name);
- CODE_AGE_LIST_WITH_NO_AGE(SC)
+ CODE_AGE_LIST_COMPLETE(SC)
#undef SC
}
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 476021cdbb..0bd4955105 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -101,6 +101,8 @@ namespace internal {
V8.MemoryHeapSamplePropertyCellSpaceCommitted) \
HM(heap_sample_code_space_committed, \
V8.MemoryHeapSampleCodeSpaceCommitted) \
+ HM(heap_sample_maximum_committed, \
+ V8.MemoryHeapSampleMaximumCommitted) \
// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
@@ -240,18 +242,12 @@ namespace internal {
SC(math_asin, V8.MathAsin) \
SC(math_atan, V8.MathAtan) \
SC(math_atan2, V8.MathAtan2) \
- SC(math_ceil, V8.MathCeil) \
- SC(math_cos, V8.MathCos) \
SC(math_exp, V8.MathExp) \
SC(math_floor, V8.MathFloor) \
SC(math_log, V8.MathLog) \
SC(math_pow, V8.MathPow) \
SC(math_round, V8.MathRound) \
- SC(math_sin, V8.MathSin) \
SC(math_sqrt, V8.MathSqrt) \
- SC(math_tan, V8.MathTan) \
- SC(transcendental_cache_hit, V8.TranscendentalCacheHit) \
- SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \
SC(stack_interrupts, V8.StackInterrupts) \
SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
SC(bounds_checks_eliminated, V8.BoundsChecksEliminated) \
@@ -259,6 +255,9 @@ namespace internal {
SC(soft_deopts_requested, V8.SoftDeoptsRequested) \
SC(soft_deopts_inserted, V8.SoftDeoptsInserted) \
SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \
+ /* Number of write barriers in generated code. */ \
+ SC(write_barriers_dynamic, V8.WriteBarriersDynamic) \
+ SC(write_barriers_static, V8.WriteBarriersStatic) \
SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \
@@ -341,7 +340,7 @@ class Counters {
{ return &count_of_CODE_AGE_##name##_; } \
StatsCounter* size_of_CODE_AGE_##name() \
{ return &size_of_CODE_AGE_##name##_; }
- CODE_AGE_LIST_WITH_NO_AGE(SC)
+ CODE_AGE_LIST_COMPLETE(SC)
#undef SC
enum Id {
@@ -371,7 +370,7 @@ class Counters {
#undef COUNTER_ID
#define COUNTER_ID(name) kCountOfCODE_AGE__##name, \
kSizeOfCODE_AGE__##name,
- CODE_AGE_LIST_WITH_NO_AGE(COUNTER_ID)
+ CODE_AGE_LIST_COMPLETE(COUNTER_ID)
#undef COUNTER_ID
stats_counter_count
};
@@ -421,7 +420,7 @@ class Counters {
#define SC(name) \
StatsCounter size_of_CODE_AGE_##name##_; \
StatsCounter count_of_CODE_AGE_##name##_;
- CODE_AGE_LIST_WITH_NO_AGE(SC)
+ CODE_AGE_LIST_COMPLETE(SC)
#undef SC
friend class Isolate;
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 62330c32d4..28454b437e 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -36,6 +36,9 @@
#include "frames.h"
#include "heap-profiler.h"
#include "hydrogen.h"
+#ifdef V8_USE_DEFAULT_PLATFORM
+#include "libplatform/default-platform.h"
+#endif
#include "lithium-allocator.h"
#include "objects.h"
#include "once.h"
@@ -52,6 +55,7 @@ V8_DECLARE_ONCE(init_once);
List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
v8::ArrayBuffer::Allocator* V8::array_buffer_allocator_ = NULL;
+v8::Platform* V8::platform_ = NULL;
bool V8::Initialize(Deserializer* des) {
@@ -75,6 +79,11 @@ bool V8::Initialize(Deserializer* des) {
if (isolate->IsDead()) return false;
if (isolate->IsInitialized()) return true;
+#ifdef V8_USE_DEFAULT_PLATFORM
+ DefaultPlatform* platform = static_cast<DefaultPlatform*>(platform_);
+ platform->SetThreadPoolSize(isolate->max_available_threads());
+#endif
+
return isolate->Init(des);
}
@@ -90,6 +99,7 @@ void V8::TearDown() {
isolate->TearDown();
delete isolate;
+ Bootstrapper::TearDownExtensions();
ElementsAccessor::TearDown();
LOperand::TearDownCaches();
ExternalReference::TearDownMathExpData();
@@ -100,6 +110,12 @@ void V8::TearDown() {
call_completed_callbacks_ = NULL;
Sampler::TearDown();
+
+#ifdef V8_USE_DEFAULT_PLATFORM
+ DefaultPlatform* platform = static_cast<DefaultPlatform*>(platform_);
+ platform_ = NULL;
+ delete platform;
+#endif
}
@@ -109,25 +125,6 @@ void V8::SetReturnAddressLocationResolver(
}
-// Used by JavaScript APIs
-uint32_t V8::Random(Context* context) {
- ASSERT(context->IsNativeContext());
- ByteArray* seed = context->random_seed();
- uint32_t* state = reinterpret_cast<uint32_t*>(seed->GetDataStartAddress());
-
- // When we get here, the RNG must have been initialized,
- // see the Genesis constructor in file bootstrapper.cc.
- ASSERT_NE(0, state[0]);
- ASSERT_NE(0, state[1]);
-
- // Mix the bits. Never replaces state[i] with 0 if it is nonzero.
- state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
- state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
-
- return (state[0] << 14) + (state[1] & 0x3FFFF);
-}
-
-
void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
if (call_completed_callbacks_ == NULL) { // Lazy init.
call_completed_callbacks_ = new List<CallCompletedCallback>();
@@ -151,17 +148,16 @@ void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
void V8::FireCallCompletedCallback(Isolate* isolate) {
bool has_call_completed_callbacks = call_completed_callbacks_ != NULL;
- bool observer_delivery_pending =
- FLAG_harmony_observation && isolate->observer_delivery_pending();
- if (!has_call_completed_callbacks && !observer_delivery_pending) return;
+ bool run_microtasks = isolate->autorun_microtasks() &&
+ isolate->microtask_pending();
+ if (!has_call_completed_callbacks && !run_microtasks) return;
+
HandleScopeImplementer* handle_scope_implementer =
isolate->handle_scope_implementer();
if (!handle_scope_implementer->CallDepthIsZero()) return;
// Fire callbacks. Increase call depth to prevent recursive callbacks.
handle_scope_implementer->IncrementCallDepth();
- if (observer_delivery_pending) {
- JSObject::DeliverChangeRecords(isolate);
- }
+ if (run_microtasks) Execution::RunMicrotasks(isolate);
if (has_call_completed_callbacks) {
for (int i = 0; i < call_completed_callbacks_->length(); i++) {
call_completed_callbacks_->at(i)();
@@ -171,68 +167,41 @@ void V8::FireCallCompletedCallback(Isolate* isolate) {
}
-// Use a union type to avoid type-aliasing optimizations in GCC.
-typedef union {
- double double_value;
- uint64_t uint64_t_value;
-} double_int_union;
-
+void V8::RunMicrotasks(Isolate* isolate) {
+ if (!isolate->microtask_pending())
+ return;
-Object* V8::FillHeapNumberWithRandom(Object* heap_number,
- Context* context) {
- double_int_union r;
- uint64_t random_bits = Random(context);
- // Convert 32 random bits to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- static const double binary_million = 1048576.0;
- r.double_value = binary_million;
- r.uint64_t_value |= random_bits;
- r.double_value -= binary_million;
+ HandleScopeImplementer* handle_scope_implementer =
+ isolate->handle_scope_implementer();
+ ASSERT(handle_scope_implementer->CallDepthIsZero());
- HeapNumber::cast(heap_number)->set_value(r.double_value);
- return heap_number;
+ // Increase call depth to prevent recursive callbacks.
+ handle_scope_implementer->IncrementCallDepth();
+ Execution::RunMicrotasks(isolate);
+ handle_scope_implementer->DecrementCallDepth();
}
void V8::InitializeOncePerProcessImpl() {
FlagList::EnforceFlagImplications();
- if (FLAG_stress_compaction) {
- FLAG_force_marking_deque_overflows = true;
- FLAG_gc_global = true;
- FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
- }
- if (FLAG_concurrent_recompilation &&
- (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs)) {
- FLAG_concurrent_recompilation = false;
- PrintF("Concurrent recompilation has been disabled for tracing.\n");
- }
-
- if (FLAG_sweeper_threads <= 0) {
- if (FLAG_concurrent_sweeping) {
- FLAG_sweeper_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::CONCURRENT_SWEEPING);
- } else if (FLAG_parallel_sweeping) {
- FLAG_sweeper_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_SWEEPING);
- }
- if (FLAG_sweeper_threads == 0) {
- FLAG_concurrent_sweeping = false;
- FLAG_parallel_sweeping = false;
+ if (FLAG_predictable) {
+ if (FLAG_random_seed == 0) {
+ // Avoid random seeds in predictable mode.
+ FLAG_random_seed = 12347;
}
- } else if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) {
- FLAG_sweeper_threads = 0;
+ FLAG_hash_seed = 0;
}
- if (FLAG_concurrent_recompilation &&
- SystemThreadManager::NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_RECOMPILATION) == 0) {
- FLAG_concurrent_recompilation = false;
+ if (FLAG_stress_compaction) {
+ FLAG_force_marking_deque_overflows = true;
+ FLAG_gc_global = true;
+ FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
}
+#ifdef V8_USE_DEFAULT_PLATFORM
+ platform_ = new DefaultPlatform;
+#endif
Sampler::SetUp();
CPU::SetUp();
OS::PostSetUp();
@@ -248,4 +217,23 @@ void V8::InitializeOncePerProcess() {
CallOnce(&init_once, &InitializeOncePerProcessImpl);
}
+
+void V8::InitializePlatform(v8::Platform* platform) {
+ ASSERT(!platform_);
+ ASSERT(platform);
+ platform_ = platform;
+}
+
+
+void V8::ShutdownPlatform() {
+ ASSERT(platform_);
+ platform_ = NULL;
+}
+
+
+v8::Platform* V8::GetCurrentPlatform() {
+ ASSERT(platform_);
+ return platform_;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 5848f74818..d3f5a9c839 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -50,6 +50,7 @@
// Basic includes
#include "../include/v8.h"
+#include "../include/v8-platform.h"
#include "v8globals.h"
#include "v8checks.h"
#include "allocation.h"
@@ -95,15 +96,13 @@ class V8 : public AllStatic {
ReturnAddressLocationResolver resolver);
// Support for entry hooking JITed code.
static void SetFunctionEntryHook(FunctionEntryHook entry_hook);
- // Random number generation support. Not cryptographically safe.
- static uint32_t Random(Context* context);
- static Object* FillHeapNumberWithRandom(Object* heap_number,
- Context* context);
static void AddCallCompletedCallback(CallCompletedCallback callback);
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
static void FireCallCompletedCallback(Isolate* isolate);
+ static void RunMicrotasks(Isolate* isolate);
+
static v8::ArrayBuffer::Allocator* ArrayBufferAllocator() {
return array_buffer_allocator_;
}
@@ -113,6 +112,10 @@ class V8 : public AllStatic {
array_buffer_allocator_ = allocator;
}
+ static void InitializePlatform(v8::Platform* platform);
+ static void ShutdownPlatform();
+ static v8::Platform* GetCurrentPlatform();
+
private:
static void InitializeOncePerProcessImpl();
static void InitializeOncePerProcess();
@@ -121,6 +124,8 @@ class V8 : public AllStatic {
static List<CallCompletedCallback>* call_completed_callbacks_;
// Allocator for external array buffers.
static v8::ArrayBuffer::Allocator* array_buffer_allocator_;
+ // v8::Platform to use.
+ static v8::Platform* platform_;
};
diff --git a/deps/v8/src/v8checks.h b/deps/v8/src/v8checks.h
index 9857f73d17..76e16fd24e 100644
--- a/deps/v8/src/v8checks.h
+++ b/deps/v8/src/v8checks.h
@@ -30,8 +30,6 @@
#include "checks.h"
-void API_Fatal(const char* location, const char* format, ...);
-
namespace v8 {
class Value;
template <class T> class Handle;
diff --git a/deps/v8/src/v8conversions.h b/deps/v8/src/v8conversions.h
index 3a7b5242ab..f2568c066f 100644
--- a/deps/v8/src/v8conversions.h
+++ b/deps/v8/src/v8conversions.h
@@ -33,6 +33,24 @@
namespace v8 {
namespace internal {
+
+static inline bool IsMinusZero(double value) {
+ static const DoubleRepresentation minus_zero(-0.0);
+ return DoubleRepresentation(value) == minus_zero;
+}
+
+
+// Integer32 is an integer that can be represented as a signed 32-bit
+// integer. It has to be in the range [-2^31, 2^31 - 1].
+// We also have to check for negative 0 as it is not an Integer32.
+static inline bool IsInt32Double(double value) {
+ return !IsMinusZero(value) &&
+ value >= kMinInt &&
+ value <= kMaxInt &&
+ value == FastI2D(FastD2I(value));
+}
+
+
// Convert from Number object to C integer.
inline int32_t NumberToInt32(Object* number) {
if (number->IsSmi()) return Smi::cast(number)->value();
@@ -55,19 +73,41 @@ double StringToDouble(UnicodeCache* unicode_cache,
// Converts a string into an integer.
double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
-// Converts a number into size_t.
-inline size_t NumberToSize(Isolate* isolate,
- Object* number) {
+inline bool TryNumberToSize(Isolate* isolate,
+ Object* number, size_t* result) {
SealHandleScope shs(isolate);
if (number->IsSmi()) {
- return Smi::cast(number)->value();
+ int value = Smi::cast(number)->value();
+ ASSERT(
+ static_cast<unsigned>(Smi::kMaxValue)
+ <= std::numeric_limits<size_t>::max());
+ if (value >= 0) {
+ *result = static_cast<size_t>(value);
+ return true;
+ }
+ return false;
} else {
ASSERT(number->IsHeapNumber());
double value = HeapNumber::cast(number)->value();
- return static_cast<size_t>(value);
+ if (value >= 0 &&
+ value <= std::numeric_limits<size_t>::max()) {
+ *result = static_cast<size_t>(value);
+ return true;
+ } else {
+ return false;
+ }
}
}
+// Converts a number into size_t.
+inline size_t NumberToSize(Isolate* isolate,
+ Object* number) {
+ size_t result = 0;
+ bool is_valid = TryNumberToSize(isolate, number, &result);
+ CHECK(is_valid);
+ return result;
+}
+
} } // namespace v8::internal
#endif // V8_V8CONVERSIONS_H_
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 7fa2fd62c5..7d8d1b7e40 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -199,6 +199,11 @@ const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
// allows).
enum PretenureFlag { NOT_TENURED, TENURED };
+enum MinimumCapacity {
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ USE_CUSTOM_MINIMUM_CAPACITY
+};
+
enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
@@ -274,22 +279,14 @@ enum InlineCacheState {
};
-enum CheckType {
- RECEIVER_MAP_CHECK,
- STRING_CHECK,
- SYMBOL_CHECK,
- NUMBER_CHECK,
- BOOLEAN_CHECK
-};
-
-
enum CallFunctionFlags {
- NO_CALL_FUNCTION_FLAGS = 0,
- // Receiver might implicitly be the global objects. If it is, the
- // hole is passed to the call function stub.
- RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0,
+ NO_CALL_FUNCTION_FLAGS,
// The call target is cached in the instruction stream.
- RECORD_CALL_TARGET = 1 << 1
+ RECORD_CALL_TARGET,
+ CALL_AS_METHOD,
+ // Always wrap the receiver and call to the JSFunction. Only use this flag
+ // both the receiver type and the target method are statically known.
+ WRAP_AND_CALL
};
@@ -317,6 +314,9 @@ union DoubleRepresentation {
double value;
int64_t bits;
DoubleRepresentation(double x) { value = x; }
+ bool operator==(const DoubleRepresentation& other) const {
+ return bits == other.bits;
+ }
};
@@ -439,14 +439,6 @@ enum SmiCheckType {
};
-// Used to specify whether a receiver is implicitly or explicitly
-// provided to a call.
-enum CallKind {
- CALL_AS_METHOD,
- CALL_AS_FUNCTION
-};
-
-
enum ScopeType {
EVAL_SCOPE, // The top-level scope for an eval source.
FUNCTION_SCOPE, // The top-level scope for a function.
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index c42d5c4d35..e4f0a3b860 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -79,6 +79,21 @@ function InstallGetterSetter(object, name, getter, setter) {
}
+// Helper function for installing constant properties on objects.
+function InstallConstants(object, constants) {
+ if (constants.length >= 4) {
+ %OptimizeObjectForAddingMultipleProperties(object, constants.length >> 1);
+ }
+ var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
+ for (var i = 0; i < constants.length; i += 2) {
+ var name = constants[i];
+ var k = constants[i + 1];
+ %SetProperty(object, name, k, attributes);
+ }
+ %ToFastProperties(object);
+}
+
+
// Prevents changes to the prototype of a built-in function.
// The "prototype" property of the function object is made non-configurable,
// and the prototype object is made non-extensible. The latter prevents
@@ -170,19 +185,18 @@ function GlobalParseFloat(string) {
function GlobalEval(x) {
if (!IS_STRING(x)) return x;
- var global_receiver = %GlobalReceiver(global);
- var global_is_detached = (global === global_receiver);
-
// For consistency with JSC we require the global object passed to
// eval to be the global object from which 'eval' originated. This
// is not mandated by the spec.
// We only throw if the global has been detached, since we need the
// receiver as this-value for the call.
- if (global_is_detached) {
+ if (!%IsAttachedGlobal(global)) {
throw new $EvalError('The "this" value passed to eval must ' +
'be the global object from which eval originated');
}
+ var global_receiver = %GlobalReceiver(global);
+
var f = %CompileString(x, false);
if (!IS_FUNCTION(f)) return f;
@@ -233,10 +247,7 @@ function ObjectToString() {
// ECMA-262 - 15.2.4.3
function ObjectToLocaleString() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Object.prototype.toLocaleString"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Object.prototype.toLocaleString");
return this.toString();
}
@@ -262,10 +273,7 @@ function ObjectHasOwnProperty(V) {
// ECMA-262 - 15.2.4.6
function ObjectIsPrototypeOf(V) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Object.prototype.isPrototypeOf"]);
- }
+ CHECK_OBJECT_COERCIBLE(this, "Object.prototype.isPrototypeOf");
if (!IS_SPEC_OBJECT(V)) return false;
return %IsInPrototypeChain(this, V);
}
@@ -389,8 +397,7 @@ function FromPropertyDescriptor(desc) {
}
// Must be an AccessorDescriptor then. We never return a generic descriptor.
return { get: desc.getGet(),
- set: desc.getSet() === ObjectSetProto ? ObjectPoisonProto
- : desc.getSet(),
+ set: desc.getSet(),
enumerable: desc.isEnumerable(),
configurable: desc.isConfigurable() };
}
@@ -1001,6 +1008,21 @@ function ObjectGetPrototypeOf(obj) {
return %GetPrototype(obj);
}
+// ES6 section 19.1.2.19.
+function ObjectSetPrototypeOf(obj, proto) {
+ CHECK_OBJECT_COERCIBLE(obj, "Object.setPrototypeOf");
+
+ if (proto !== null && !IS_SPEC_OBJECT(proto)) {
+ throw MakeTypeError("proto_object_or_null", [proto]);
+ }
+
+ if (IS_SPEC_OBJECT(obj)) {
+ %SetPrototype(obj, proto);
+ }
+
+ return obj;
+}
+
// ES5 section 15.2.3.3
function ObjectGetOwnPropertyDescriptor(obj, p) {
@@ -1038,46 +1060,41 @@ function ToNameArray(obj, trap, includeSymbols) {
}
-// ES5 section 15.2.3.4.
-function ObjectGetOwnPropertyNames(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("called_on_non_object", ["Object.getOwnPropertyNames"]);
- }
- // Special handling for proxies.
- if (%IsJSProxy(obj)) {
- var handler = %GetHandler(obj);
- var names = CallTrap0(handler, "getOwnPropertyNames", UNDEFINED);
- return ToNameArray(names, "getOwnPropertyNames", false);
- }
-
+function ObjectGetOwnPropertyKeys(obj, symbolsOnly) {
var nameArrays = new InternalArray();
+ var filter = symbolsOnly ?
+ PROPERTY_ATTRIBUTES_STRING | PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL :
+ PROPERTY_ATTRIBUTES_SYMBOLIC;
// Find all the indexed properties.
- // Get the local element names.
- var localElementNames = %GetLocalElementNames(obj);
- for (var i = 0; i < localElementNames.length; ++i) {
- localElementNames[i] = %_NumberToString(localElementNames[i]);
- }
- nameArrays.push(localElementNames);
-
- // Get names for indexed interceptor properties.
- var interceptorInfo = %GetInterceptorInfo(obj);
- if ((interceptorInfo & 1) != 0) {
- var indexedInterceptorNames = %GetIndexedInterceptorElementNames(obj);
- if (!IS_UNDEFINED(indexedInterceptorNames)) {
- nameArrays.push(indexedInterceptorNames);
+ // Only get the local element names if we want to include string keys.
+ if (!symbolsOnly) {
+ var localElementNames = %GetLocalElementNames(obj);
+ for (var i = 0; i < localElementNames.length; ++i) {
+ localElementNames[i] = %_NumberToString(localElementNames[i]);
+ }
+ nameArrays.push(localElementNames);
+
+ // Get names for indexed interceptor properties.
+ var interceptorInfo = %GetInterceptorInfo(obj);
+ if ((interceptorInfo & 1) != 0) {
+ var indexedInterceptorNames = %GetIndexedInterceptorElementNames(obj);
+ if (!IS_UNDEFINED(indexedInterceptorNames)) {
+ nameArrays.push(indexedInterceptorNames);
+ }
}
}
// Find all the named properties.
// Get the local property names.
- nameArrays.push(%GetLocalPropertyNames(obj, false));
+ nameArrays.push(%GetLocalPropertyNames(obj, filter));
// Get names for named interceptor properties if any.
if ((interceptorInfo & 2) != 0) {
- var namedInterceptorNames = %GetNamedInterceptorPropertyNames(obj);
+ var namedInterceptorNames =
+ %GetNamedInterceptorPropertyNames(obj);
if (!IS_UNDEFINED(namedInterceptorNames)) {
nameArrays.push(namedInterceptorNames);
}
@@ -1090,18 +1107,18 @@ function ObjectGetOwnPropertyNames(obj) {
// Property names are expected to be unique strings,
// but interceptors can interfere with that assumption.
if (interceptorInfo != 0) {
- var propertySet = { __proto__: null };
+ var seenKeys = { __proto__: null };
var j = 0;
for (var i = 0; i < propertyNames.length; ++i) {
- if (IS_SYMBOL(propertyNames[i])) continue;
- var name = ToString(propertyNames[i]);
- // We need to check for the exact property value since for intrinsic
- // properties like toString if(propertySet["toString"]) will always
- // succeed.
- if (propertySet[name] === true) {
- continue;
+ var name = propertyNames[i];
+ if (symbolsOnly) {
+ if (!IS_SYMBOL(name) || IS_PRIVATE(name)) continue;
+ } else {
+ if (IS_SYMBOL(name)) continue;
+ name = ToString(name);
}
- propertySet[name] = true;
+ if (seenKeys[name]) continue;
+ seenKeys[name] = true;
propertyNames[j++] = name;
}
propertyNames.length = j;
@@ -1111,6 +1128,22 @@ function ObjectGetOwnPropertyNames(obj) {
}
+// ES5 section 15.2.3.4.
+function ObjectGetOwnPropertyNames(obj) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("called_on_non_object", ["Object.getOwnPropertyNames"]);
+ }
+ // Special handling for proxies.
+ if (%IsJSProxy(obj)) {
+ var handler = %GetHandler(obj);
+ var names = CallTrap0(handler, "getOwnPropertyNames", UNDEFINED);
+ return ToNameArray(names, "getOwnPropertyNames", false);
+ }
+
+ return ObjectGetOwnPropertyKeys(obj, false);
+}
+
+
// ES5 section 15.2.3.5.
function ObjectCreate(proto, properties) {
if (!IS_SPEC_OBJECT(proto) && proto !== null) {
@@ -1249,7 +1282,7 @@ function ObjectFreeze(obj) {
throw MakeTypeError("called_on_non_object", ["Object.freeze"]);
}
var isProxy = %IsJSProxy(obj);
- if (isProxy || %HasNonStrictArgumentsElements(obj)) {
+ if (isProxy || %HasNonStrictArgumentsElements(obj) || %IsObserved(obj)) {
if (isProxy) {
ProxyFix(obj);
}
@@ -1363,12 +1396,6 @@ function ObjectSetProto(obj) {
}
-// Harmony __proto__ poison pill.
-function ObjectPoisonProto(obj) {
- throw MakeTypeError("proto_poison_pill", []);
-}
-
-
function ObjectConstructor(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
@@ -1388,8 +1415,6 @@ function SetUpObject() {
%SetNativeFlag($Object);
%SetCode($Object, ObjectConstructor);
- %FunctionSetName(ObjectPoisonProto, "__proto__");
- %FunctionRemovePrototype(ObjectPoisonProto);
%SetExpectedNumberOfProperties($Object, 4);
%SetProperty($Object.prototype, "constructor", $Object, DONT_ENUM);
@@ -1418,14 +1443,18 @@ function SetUpObject() {
"defineProperties", ObjectDefineProperties,
"freeze", ObjectFreeze,
"getPrototypeOf", ObjectGetPrototypeOf,
+ "setPrototypeOf", ObjectSetPrototypeOf,
"getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
"getOwnPropertyNames", ObjectGetOwnPropertyNames,
+ // getOwnPropertySymbols is added in symbol.js.
"is", ObjectIs,
"isExtensible", ObjectIsExtensible,
"isFrozen", ObjectIsFrozen,
"isSealed", ObjectIsSealed,
"preventExtensions", ObjectPreventExtension,
"seal", ObjectSeal
+ // deliverChangeRecords, getNotifier, observe and unobserve are added
+ // in object-observe.js.
));
}
@@ -1625,12 +1654,29 @@ function NumberIsFinite(number) {
}
+// Harmony isInteger
+function NumberIsInteger(number) {
+ return NumberIsFinite(number) && TO_INTEGER(number) == number;
+}
+
+
// Harmony isNaN.
function NumberIsNaN(number) {
return IS_NUMBER(number) && NUMBER_IS_NAN(number);
}
+// Harmony isSafeInteger
+function NumberIsSafeInteger(number) {
+ if (NumberIsFinite(number)) {
+ var integral = TO_INTEGER(number);
+ if (integral == number)
+ return MathAbs(integral) <= $Number.MAX_SAFE_INTEGER;
+ }
+ return false;
+}
+
+
// ----------------------------------------------------------------------------
function SetUpNumber() {
@@ -1643,32 +1689,24 @@ function SetUpNumber() {
// Set up the constructor property on the Number prototype object.
%SetProperty($Number.prototype, "constructor", $Number, DONT_ENUM);
- %OptimizeObjectForAddingMultipleProperties($Number, 5);
- // ECMA-262 section 15.7.3.1.
- %SetProperty($Number,
- "MAX_VALUE",
- 1.7976931348623157e+308,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.2.
- %SetProperty($Number, "MIN_VALUE", 5e-324,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.3.
- %SetProperty($Number, "NaN", NAN, DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.4.
- %SetProperty($Number,
- "NEGATIVE_INFINITY",
- -INFINITY,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- // ECMA-262 section 15.7.3.5.
- %SetProperty($Number,
- "POSITIVE_INFINITY",
- INFINITY,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
- %ToFastProperties($Number);
+ InstallConstants($Number, $Array(
+ // ECMA-262 section 15.7.3.1.
+ "MAX_VALUE", 1.7976931348623157e+308,
+ // ECMA-262 section 15.7.3.2.
+ "MIN_VALUE", 5e-324,
+ // ECMA-262 section 15.7.3.3.
+ "NaN", NAN,
+ // ECMA-262 section 15.7.3.4.
+ "NEGATIVE_INFINITY", -INFINITY,
+ // ECMA-262 section 15.7.3.5.
+ "POSITIVE_INFINITY", INFINITY,
+
+ // --- Harmony constants (no spec refs until settled.)
+
+ "MAX_SAFE_INTEGER", %_MathPow(2, 53) - 1,
+ "MIN_SAFE_INTEGER", -%_MathPow(2, 53) + 1,
+ "EPSILON", %_MathPow(2, -52)
+ ));
// Set up non-enumerable functions on the Number prototype object.
InstallFunctions($Number.prototype, DONT_ENUM, $Array(
@@ -1679,9 +1717,15 @@ function SetUpNumber() {
"toExponential", NumberToExponential,
"toPrecision", NumberToPrecision
));
+
+ // Harmony Number constructor additions
InstallFunctions($Number, DONT_ENUM, $Array(
"isFinite", NumberIsFinite,
- "isNaN", NumberIsNaN
+ "isInteger", NumberIsInteger,
+ "isNaN", NumberIsNaN,
+ "isSafeInteger", NumberIsSafeInteger,
+ "parseInt", GlobalParseInt,
+ "parseFloat", GlobalParseFloat
));
}
@@ -1837,3 +1881,38 @@ function SetUpFunction() {
}
SetUpFunction();
+
+
+//----------------------------------------------------------------------------
+
+// TODO(rossberg): very simple abstraction for generic microtask queue.
+// Eventually, we should move to a real event queue that allows to maintain
+// relative ordering of different kinds of tasks.
+
+function GetMicrotaskQueue() {
+ var microtaskState = %GetMicrotaskState();
+ if (IS_UNDEFINED(microtaskState.queue)) {
+ microtaskState.queue = new InternalArray;
+ }
+ return microtaskState.queue;
+}
+
+function RunMicrotasks() {
+ while (%SetMicrotaskPending(false)) {
+ var microtaskState = %GetMicrotaskState();
+ if (IS_UNDEFINED(microtaskState.queue))
+ return;
+
+ var microtasks = microtaskState.queue;
+ microtaskState.queue = new InternalArray;
+
+ for (var i = 0; i < microtasks.length; i++) {
+ microtasks[i]();
+ }
+ }
+}
+
+function EnqueueExternalMicrotask(fn) {
+ GetMicrotaskQueue().push(fn);
+ %SetMicrotaskPending(true);
+}
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index cc4f43965f..1de9d4fd76 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -133,18 +133,6 @@ Unlocker::~Unlocker() {
}
-void Locker::StartPreemption(v8::Isolate* isolate, int every_n_ms) {
- v8::internal::ContextSwitcher::StartPreemption(
- reinterpret_cast<i::Isolate*>(isolate), every_n_ms);
-}
-
-
-void Locker::StopPreemption(v8::Isolate* isolate) {
- v8::internal::ContextSwitcher::StopPreemption(
- reinterpret_cast<i::Isolate*>(isolate));
-}
-
-
namespace internal {
@@ -419,63 +407,5 @@ void ThreadManager::TerminateExecution(ThreadId thread_id) {
}
-ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
- : Thread("v8:CtxtSwitcher"),
- keep_going_(true),
- sleep_ms_(every_n_ms),
- isolate_(isolate) {
-}
-
-
-// Set the scheduling interval of V8 threads. This function starts the
-// ContextSwitcher thread if needed.
-void ContextSwitcher::StartPreemption(Isolate* isolate, int every_n_ms) {
- ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
- if (isolate->context_switcher() == NULL) {
- // If the ContextSwitcher thread is not running at the moment start it now.
- isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
- isolate->context_switcher()->Start();
- } else {
- // ContextSwitcher thread is already running, so we just change the
- // scheduling interval.
- isolate->context_switcher()->sleep_ms_ = every_n_ms;
- }
-}
-
-
-// Disable preemption of V8 threads. If multiple threads want to use V8 they
-// must cooperatively schedule amongst them from this point on.
-void ContextSwitcher::StopPreemption(Isolate* isolate) {
- ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
- if (isolate->context_switcher() != NULL) {
- // The ContextSwitcher thread is running. We need to stop it and release
- // its resources.
- isolate->context_switcher()->keep_going_ = false;
- // Wait for the ContextSwitcher thread to exit.
- isolate->context_switcher()->Join();
- // Thread has exited, now we can delete it.
- delete(isolate->context_switcher());
- isolate->set_context_switcher(NULL);
- }
-}
-
-
-// Main loop of the ContextSwitcher thread: Preempt the currently running V8
-// thread at regular intervals.
-void ContextSwitcher::Run() {
- while (keep_going_) {
- OS::Sleep(sleep_ms_);
- isolate()->stack_guard()->Preempt();
- }
-}
-
-
-// Acknowledge the preemption by the receiving thread.
-void ContextSwitcher::PreemptionReceived() {
- // There is currently no accounting being done for this. But could be in the
- // future, which is why we leave this in.
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index 1edacfc3bb..a20700a5c9 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -139,34 +139,6 @@ class ThreadManager {
};
-// The ContextSwitcher thread is used to schedule regular preemptions to
-// multiple running V8 threads. Generally it is necessary to call
-// StartPreemption if there is more than one thread running. If not, a single
-// JavaScript can take full control of V8 and not allow other threads to run.
-class ContextSwitcher: public Thread {
- public:
- // Set the preemption interval for the ContextSwitcher thread.
- static void StartPreemption(Isolate* isolate, int every_n_ms);
-
- // Stop sending preemption requests to threads.
- static void StopPreemption(Isolate* isolate);
-
- // Preempted thread needs to call back to the ContextSwitcher to acknowledge
- // the handling of a preemption request.
- static void PreemptionReceived();
-
- private:
- ContextSwitcher(Isolate* isolate, int every_n_ms);
-
- Isolate* isolate() const { return isolate_; }
-
- void Run();
-
- bool keep_going_;
- int sleep_ms_;
- Isolate* isolate_;
-};
-
} } // namespace v8::internal
#endif // V8_V8THREADS_H_
diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h
index 02e57ebe72..058b153a7e 100644
--- a/deps/v8/src/v8utils.h
+++ b/deps/v8/src/v8utils.h
@@ -266,6 +266,9 @@ INLINE(static void CopyCharsUnsigned(sinkchar* dest,
INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars));
INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
+#elif defined(V8_HOST_ARCH_MIPS)
+INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
#endif
// Copy from ASCII/16bit chars to ASCII/16bit chars.
@@ -421,6 +424,24 @@ void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
break;
}
}
+
+
+#elif defined(V8_HOST_ARCH_MIPS)
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
+ if (chars < OS::kMinComplexMemCopy) {
+ memcpy(dest, src, chars);
+ } else {
+ OS::MemCopy(dest, src, chars);
+ }
+}
+
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
+ if (chars < OS::kMinComplexMemCopy) {
+ memcpy(dest, src, chars * sizeof(*dest));
+ } else {
+ OS::MemCopy(dest, src, chars * sizeof(*dest));
+ }
+}
#endif
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index fa89770466..b8ddaf0fe5 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 22
-#define BUILD_NUMBER 24
-#define PATCH_LEVEL 19
+#define MINOR_VERSION 24
+#define BUILD_NUMBER 40
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index 658773e6d6..5bee438b65 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -85,8 +85,7 @@ ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
callback_(callback),
previous_scope_(isolate->external_callback_scope()) {
#ifdef USE_SIMULATOR
- int32_t sp = Simulator::current(isolate)->get_register(Simulator::sp);
- scope_address_ = reinterpret_cast<Address>(static_cast<intptr_t>(sp));
+ scope_address_ = Simulator::current(isolate)->get_sp();
#endif
isolate_->set_external_callback_scope(this);
}
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index afac886c73..073fcbe8e9 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -303,15 +303,9 @@ Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
}
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(pc_);
-}
-
-
-Address* RelocInfo::target_reference_address() {
+Address RelocInfo::target_reference() {
ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
+ return Memory::Address_at(pc_);
}
@@ -370,6 +364,18 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
}
+void RelocInfo::WipeOut() {
+ if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
+ Memory::Address_at(pc_) = NULL;
+ } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
+ // Effectively write zero into the relocation.
+ Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
bool RelocInfo::IsPatchedReturnSequence() {
// The recognized call sequence is:
// movq(kScratchRegister, address); call(kScratchRegister);
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index dcb9fa5621..e7c20bb150 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -76,7 +76,7 @@ void CpuFeatures::Probe() {
ASSERT(cpu.has_sse2());
probed_features |= static_cast<uint64_t>(1) << SSE2;
- // CMOD must be available on every x64 CPU.
+ // CMOV must be available on every x64 CPU.
ASSERT(cpu.has_cmov());
probed_features |= static_cast<uint64_t>(1) << CMOV;
@@ -110,7 +110,7 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
- patcher.masm()->movq(kScratchRegister, target, RelocInfo::NONE64);
+ patcher.masm()->movp(kScratchRegister, target, Assembler::RelocInfoNone());
patcher.masm()->call(kScratchRegister);
// Check that the size of the code generated is as expected.
@@ -140,12 +140,12 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
const int
Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
- // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
- 0, 3, 2, 1, 7, 8, 9, 11, 14, 15
+ // rax, rbx, rdx, rcx, rsi, rdi, r8, r9, r11, r14, r15
+ 0, 3, 2, 1, 6, 7, 8, 9, 11, 14, 15
};
const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
- 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, 8, 9
+ 0, 3, 2, 1, -1, -1, 4, 5, 6, 7, -1, 8, -1, -1, 9, 10
};
@@ -1305,9 +1305,19 @@ void Assembler::leal(Register dst, const Operand& src) {
void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- emit(0x48); // REX.W
- emit(0xA1);
- emitp(value, mode);
+ if (kPointerSize == kInt64Size) {
+ emit(0x48); // REX.W
+ emit(0xA1);
+ emitp(value, mode);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ emit(0xA1);
+ emitp(value, mode);
+ // In 64-bit mode, need to zero extend the operand to 8 bytes.
+ // See 2.2.1.4 in Intel64 and IA32 Architectures Software
+ // Developer's Manual Volume 2.
+ emitl(0);
+ }
}
@@ -1357,142 +1367,115 @@ void Assembler::movb(const Operand& dst, Register src) {
}
-void Assembler::movw(const Operand& dst, Register src) {
+void Assembler::movb(const Operand& dst, Immediate imm) {
EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_operand(src, dst);
+ emit_optional_rex_32(dst);
+ emit(0xC6);
+ emit_operand(0x0, dst);
+ emit(static_cast<byte>(imm.value_));
}
-void Assembler::movl(Register dst, const Operand& src) {
+void Assembler::movw(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
+ emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x8B);
emit_operand(dst, src);
}
-void Assembler::movl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_modrm(src, dst);
- } else {
- emit_optional_rex_32(dst, src);
- emit(0x8B);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::movl(const Operand& dst, Register src) {
+void Assembler::movw(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
+ emit(0x66);
emit_optional_rex_32(src, dst);
emit(0x89);
emit_operand(src, dst);
}
-void Assembler::movl(const Operand& dst, Immediate value) {
+void Assembler::movw(const Operand& dst, Immediate imm) {
EnsureSpace ensure_space(this);
+ emit(0x66);
emit_optional_rex_32(dst);
emit(0xC7);
emit_operand(0x0, dst);
- emit(value);
-}
-
-
-void Assembler::movl(Register dst, Immediate value) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xB8 + dst.low_bits());
- emit(value);
+ emit(static_cast<byte>(imm.value_ & 0xff));
+ emit(static_cast<byte>(imm.value_ >> 8));
}
-void Assembler::movq(Register dst, const Operand& src) {
+void Assembler::emit_mov(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
+ emit_rex(dst, src, size);
emit(0x8B);
emit_operand(dst, src);
}
-void Assembler::movq(Register dst, Register src) {
+void Assembler::emit_mov(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
+ emit_rex(src, dst, size);
emit(0x89);
emit_modrm(src, dst);
} else {
- emit_rex_64(dst, src);
+ emit_rex(dst, src, size);
emit(0x8B);
emit_modrm(dst, src);
}
}
-void Assembler::movq(Register dst, Immediate value) {
+void Assembler::emit_mov(const Operand& dst, Register src, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xC7);
- emit_modrm(0x0, dst);
- emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
-}
-
-
-void Assembler::movq(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src, dst);
+ emit_rex(src, dst, size);
emit(0x89);
emit_operand(src, dst);
}
-void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
- // This method must not be used with heap object references. The stored
- // address is not GC safe. Use the handle version instead.
- ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
+void Assembler::emit_mov(Register dst, Immediate value, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitp(value, rmode);
+ emit_rex(dst, size);
+ if (size == kInt64Size) {
+ emit(0xC7);
+ emit_modrm(0x0, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ emit(0xB8 + dst.low_bits());
+ }
+ emit(value);
}
-void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
- // Non-relocatable values might not need a 64-bit representation.
- ASSERT(RelocInfo::IsNone(rmode));
- if (is_uint32(value)) {
- movl(dst, Immediate(static_cast<int32_t>(value)));
- } else if (is_int32(value)) {
- movq(dst, Immediate(static_cast<int32_t>(value)));
- } else {
- // Value cannot be represented by 32 bits, so do a full 64 bit immediate
- // value.
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitq(value);
- }
+void Assembler::emit_mov(const Operand& dst, Immediate value, int size) {
+ EnsureSpace ensure_space(this);
+ emit_rex(dst, size);
+ emit(0xC7);
+ emit_operand(0x0, dst);
+ emit(value);
}
-void Assembler::movq(Register dst, ExternalReference ref) {
- Address value = reinterpret_cast<Address>(ref.address());
- movq(dst, value, RelocInfo::EXTERNAL_REFERENCE);
+void Assembler::movp(Register dst, void* value, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ emit_rex(dst, kPointerSize);
+ emit(0xB8 | dst.low_bits());
+ emitp(value, rmode);
}
-void Assembler::movq(const Operand& dst, Immediate value) {
+void Assembler::movq(Register dst, int64_t value) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
- emit(0xC7);
- emit_operand(0, dst);
- emit(value);
+ emit(0xB8 | dst.low_bits());
+ emitq(value);
+}
+
+
+void Assembler::movq(Register dst, uint64_t value) {
+ movq(dst, static_cast<int64_t>(value));
}
@@ -1519,26 +1502,6 @@ void Assembler::movl(const Operand& dst, Label* src) {
}
-void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
- AllowDeferredHandleDereference using_raw_address;
- // If there is no relocation info, emit the value of the handle efficiently
- // (possibly using less that 8 bytes for the value).
- if (RelocInfo::IsNone(mode)) {
- // There is no possible reason to store a heap pointer without relocation
- // info, so it must be a smi.
- ASSERT(value->IsSmi());
- movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE64);
- } else {
- EnsureSpace ensure_space(this);
- ASSERT(value->IsHeapObject());
- ASSERT(!isolate()->heap()->InNewSpace(*value));
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitp(value.location(), mode);
- }
-}
-
-
void Assembler::movsxbq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
@@ -1935,9 +1898,19 @@ void Assembler::xchgl(Register dst, Register src) {
void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- emit(0x48); // REX.W
- emit(0xA3);
- emitp(dst, mode);
+ if (kPointerSize == kInt64Size) {
+ emit(0x48); // REX.W
+ emit(0xA3);
+ emitp(dst, mode);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ emit(0xA3);
+ emitp(dst, mode);
+ // In 64-bit mode, need to zero extend the operand to 8 bytes.
+ // See 2.2.1.4 in Intel64 and IA32 Architectures Software
+ // Developer's Manual Volume 2.
+ emitl(0);
+ }
}
@@ -2487,6 +2460,123 @@ void Assembler::andps(XMMRegister dst, XMMRegister src) {
}
+void Assembler::andps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::orps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::orps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::xorps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
// SSE 2 operations.
void Assembler::movd(XMMRegister dst, Register src) {
@@ -2649,6 +2739,17 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
}
+void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0xC6);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+
void Assembler::movapd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
@@ -2918,15 +3019,6 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::xorps(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x57);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 508c622112..ba3dbd7613 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -44,27 +44,6 @@ namespace internal {
// Utility functions
-// Test whether a 64-bit value is in a specific range.
-inline bool is_uint32(int64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return static_cast<uint64_t>(x) <= kMaxUInt32;
-}
-
-inline bool is_int32(int64_t x) {
- static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
- return is_uint32(x - kMinInt32);
-}
-
-inline bool uint_is_int32(uint64_t x) {
- static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
- return x <= kMaxInt32;
-}
-
-inline bool is_uint32(uint64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return x <= kMaxUInt32;
-}
-
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -91,11 +70,10 @@ struct Register {
// The non-allocatable registers are:
// rsp - stack pointer
// rbp - frame pointer
- // rsi - context register
// r10 - fixed scratch register
// r12 - smi constant register
// r13 - root register
- static const int kMaxNumAllocatableRegisters = 10;
+ static const int kMaxNumAllocatableRegisters = 11;
static int NumAllocatableRegisters() {
return kMaxNumAllocatableRegisters;
}
@@ -118,6 +96,7 @@ struct Register {
"rbx",
"rdx",
"rcx",
+ "rsi",
"rdi",
"r8",
"r9",
@@ -395,7 +374,7 @@ enum ScaleFactor {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
- times_pointer_size = times_8
+ times_pointer_size = (kPointerSize == 8) ? times_8 : times_4
};
@@ -530,6 +509,10 @@ class CpuFeatures : public AllStatic {
};
+#define ASSEMBLER_INSTRUCTION_LIST(V) \
+ V(mov)
+
+
class Assembler : public AssemblerBase {
private:
// We check before assembling an instruction that there is sufficient
@@ -586,11 +569,13 @@ class Assembler : public AssemblerBase {
set_target_address_at(instruction_payload, target);
}
- // This sets the branch destination (which is a load instruction on x64).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- *reinterpret_cast<Address*>(instruction_payload) = target;
+ static inline RelocInfo::Mode RelocInfoNone() {
+ if (kPointerSize == kInt64Size) {
+ return RelocInfo::NONE64;
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ return RelocInfo::NONE32;
+ }
}
inline Handle<Object> code_target_object_handle_at(Address pc);
@@ -665,6 +650,24 @@ class Assembler : public AssemblerBase {
// Some mnemonics, such as "and", are the same as C++ keywords.
// Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
+#define DECLARE_INSTRUCTION(instruction) \
+ template<class P1, class P2> \
+ void instruction##p(P1 p1, P2 p2) { \
+ emit_##instruction(p1, p2, kPointerSize); \
+ } \
+ \
+ template<class P1, class P2> \
+ void instruction##l(P1 p1, P2 p2) { \
+ emit_##instruction(p1, p2, kInt32Size); \
+ } \
+ \
+ template<class P1, class P2> \
+ void instruction##q(P1 p1, P2 p2) { \
+ emit_##instruction(p1, p2, kInt64Size); \
+ }
+ ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
+#undef DECLARE_INSTRUCTION
+
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
// of m, where m must be a power of 2.
@@ -694,38 +697,24 @@ class Assembler : public AssemblerBase {
void movb(Register dst, const Operand& src);
void movb(Register dst, Immediate imm);
void movb(const Operand& dst, Register src);
+ void movb(const Operand& dst, Immediate imm);
// Move the low 16 bits of a 64-bit register value to a 16-bit
// memory location.
+ void movw(Register dst, const Operand& src);
void movw(const Operand& dst, Register src);
+ void movw(const Operand& dst, Immediate imm);
- void movl(Register dst, Register src);
- void movl(Register dst, const Operand& src);
- void movl(const Operand& dst, Register src);
- void movl(const Operand& dst, Immediate imm);
- // Load a 32-bit immediate value, zero-extended to 64 bits.
- void movl(Register dst, Immediate imm32);
-
- // Move 64 bit register value to 64-bit memory location.
- void movq(const Operand& dst, Register src);
- // Move 64 bit memory location to 64-bit register value.
- void movq(Register dst, const Operand& src);
- void movq(Register dst, Register src);
- // Sign extends immediate 32-bit value to 64 bits.
- void movq(Register dst, Immediate x);
// Move the offset of the label location relative to the current
// position (after the move) to the destination.
void movl(const Operand& dst, Label* src);
- // Move sign extended immediate to memory location.
- void movq(const Operand& dst, Immediate value);
- // Instructions to load a 64-bit immediate into a register.
- // All 64-bit immediates must have a relocation mode.
- void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
- void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
- // Moves the address of the external reference into the register.
- void movq(Register dst, ExternalReference ext);
- void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
+ // Loads a pointer into a register with a relocation mode.
+ void movp(Register dst, void* ptr, RelocInfo::Mode rmode);
+
+ // Loads a 64-bit immediate into a register.
+ void movq(Register dst, int64_t value);
+ void movq(Register dst, uint64_t value);
void movsxbq(Register dst, const Operand& src);
void movsxwq(Register dst, const Operand& src);
@@ -1011,7 +1000,6 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op_32(0x1, dst, src);
}
-
void rcl(Register dst, Immediate imm8) {
shift(dst, imm8, 0x2);
}
@@ -1251,9 +1239,6 @@ class Assembler : public AssemblerBase {
// Call near absolute indirect, address in register
void call(Register adr);
- // Call near indirect
- void call(const Operand& operand);
-
// Jumps
// Jump short or near relative.
// Use a 32-bit signed displacement.
@@ -1265,9 +1250,6 @@ class Assembler : public AssemblerBase {
// Jump near absolute indirect (r64)
void jmp(Register adr);
- // Jump near absolute indirect (m64)
- void jmp(const Operand& src);
-
// Conditional jumps
void j(Condition cc,
Label* L,
@@ -1350,13 +1332,27 @@ class Assembler : public AssemblerBase {
void movaps(XMMRegister dst, XMMRegister src);
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
+ void shufps(XMMRegister dst, XMMRegister src, byte imm8);
void cvttss2si(Register dst, const Operand& src);
void cvttss2si(Register dst, XMMRegister src);
void cvtlsi2ss(XMMRegister dst, Register src);
- void xorps(XMMRegister dst, XMMRegister src);
void andps(XMMRegister dst, XMMRegister src);
+ void andps(XMMRegister dst, const Operand& src);
+ void orps(XMMRegister dst, XMMRegister src);
+ void orps(XMMRegister dst, const Operand& src);
+ void xorps(XMMRegister dst, XMMRegister src);
+ void xorps(XMMRegister dst, const Operand& src);
+
+ void addps(XMMRegister dst, XMMRegister src);
+ void addps(XMMRegister dst, const Operand& src);
+ void subps(XMMRegister dst, XMMRegister src);
+ void subps(XMMRegister dst, const Operand& src);
+ void mulps(XMMRegister dst, XMMRegister src);
+ void mulps(XMMRegister dst, const Operand& src);
+ void divps(XMMRegister dst, XMMRegister src);
+ void divps(XMMRegister dst, const Operand& src);
void movmskps(Register dst, XMMRegister src);
@@ -1476,6 +1472,13 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+ protected:
+ // Call near indirect
+ void call(const Operand& operand);
+
+ // Jump near absolute indirect (m64)
+ void jmp(const Operand& src);
+
private:
byte* addr_at(int pos) { return buffer_ + pos; }
uint32_t long_at(int pos) {
@@ -1582,6 +1585,25 @@ class Assembler : public AssemblerBase {
// numbers have a high bit set.
inline void emit_optional_rex_32(const Operand& op);
+ template<class P1>
+ void emit_rex(P1 p1, int size) {
+ if (size == kInt64Size) {
+ emit_rex_64(p1);
+ } else {
+ ASSERT(size == kInt32Size);
+ emit_optional_rex_32(p1);
+ }
+ }
+
+ template<class P1, class P2>
+ void emit_rex(P1 p1, P2 p2, int size) {
+ if (size == kInt64Size) {
+ emit_rex_64(p1, p2);
+ } else {
+ ASSERT(size == kInt32Size);
+ emit_optional_rex_32(p1, p2);
+ }
+ }
// Emit the ModR/M byte, and optionally the SIB byte and
// 1- or 4-byte offset for a memory operand. Also encodes
@@ -1667,6 +1689,12 @@ class Assembler : public AssemblerBase {
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ void emit_mov(Register dst, const Operand& src, int size);
+ void emit_mov(Register dst, Register src, int size);
+ void emit_mov(const Operand& dst, Register src, int size);
+ void emit_mov(Register dst, Immediate value, int size);
+ void emit_mov(const Operand& dst, Immediate value, int size);
+
friend class CodePatcher;
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 7fc26937e9..6717dd5d6d 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -32,6 +32,7 @@
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
@@ -73,35 +74,37 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(rdi);
- // Push call kind information.
- __ push(rcx);
// Function is also the parameter to the runtime call.
__ push(rdi);
__ CallRuntime(function_id, 1);
- // Restore call kind information.
- __ pop(rcx);
// Restore receiver.
__ pop(rdi);
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
__ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
__ jmp(kScratchRegister);
}
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rax);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
@@ -111,22 +114,14 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
- // Tail call to returned code.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+ CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
- GenerateTailCallToSharedCode(masm);
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -158,7 +153,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ movq(kScratchRegister, debug_step_in_fp);
+ __ Move(kScratchRegister, debug_step_in_fp);
__ cmpq(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
#endif
@@ -166,7 +161,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
// rdi: constructor
- __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
ASSERT(kSmiTag == 0);
__ JumpIfSmi(rax, &rt_call);
@@ -186,7 +181,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
if (count_constructions) {
Label allocate;
// Decrease generous allocation count.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ decb(FieldOperand(rcx,
SharedFunctionInfo::kConstructionCountOffset));
__ j(not_zero, &allocate);
@@ -218,10 +213,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
// rdi: start of next object
- __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+ __ movp(Operand(rbx, JSObject::kMapOffset), rax);
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+ __ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+ __ movp(Operand(rbx, JSObject::kElementsOffset), rcx);
// Set extra fields in the newly allocated object.
// rax: initial map
// rbx: JSObject
@@ -290,9 +285,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements
// rax: start of next object
__ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
+ __ movp(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
__ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
+ __ movp(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
// Initialize the fields to undefined.
// rbx: JSObject
@@ -304,7 +299,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
+ __ movp(Operand(rcx, 0), rdx);
__ addq(rcx, Immediate(kPointerSize));
__ bind(&entry);
__ cmpq(rcx, rax);
@@ -316,7 +311,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rbx: JSObject
// rdi: FixedArray
__ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
- __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+ __ movp(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
// Continue with JSObject being successfully allocated
@@ -335,10 +330,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdi: function (constructor)
__ bind(&rt_call);
// Must restore rdi (constructor) before calling runtime.
- __ movq(rdi, Operand(rsp, 0));
+ __ movp(rdi, Operand(rsp, 0));
__ push(rdi);
__ CallRuntime(Runtime::kNewObject, 1);
- __ movq(rbx, rax); // store result in rbx
+ __ movp(rbx, rax); // store result in rbx
// New object allocated.
// rbx: newly allocated object
@@ -347,7 +342,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ pop(rdi);
// Retrieve smi-tagged arguments count from the stack.
- __ movq(rax, Operand(rsp, 0));
+ __ movp(rax, Operand(rsp, 0));
__ SmiToInteger32(rax, rax);
// Push the allocated receiver to the stack. We need two copies
@@ -361,7 +356,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Copy arguments and receiver to the expression stack.
Label loop, entry;
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
__ push(Operand(rbx, rcx, times_pointer_size, 0));
@@ -371,16 +366,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Call the function.
if (is_api_function) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ __ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
}
// Store offset of return address for deoptimizer.
@@ -389,7 +381,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore context from the frame.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
// of the receiver and use the result; see ECMA-262 section 13.2.2-7
@@ -407,11 +399,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0));
+ __ movp(rax, Operand(rsp, 0));
// Restore the arguments count and leave the construct frame.
__ bind(&exit);
- __ movq(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
+ __ movp(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
// Leave construct frame.
}
@@ -475,19 +467,19 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Load the function context into rsi.
- __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
// Push the function and the receiver onto the stack.
__ push(rdx);
__ push(r8);
// Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, r9);
+ __ movp(rax, r9);
// Load the previous frame pointer to access C argument on stack
- __ movq(kScratchRegister, Operand(rbp, 0));
- __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+ __ movp(kScratchRegister, Operand(rbp, 0));
+ __ movp(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
// Load the function pointer into rdi.
- __ movq(rdi, rdx);
+ __ movp(rdi, rdx);
#else // _WIN64
// GCC parameters in:
// rdi : entry (ignored)
@@ -496,7 +488,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// rcx : argc
// r8 : argv
- __ movq(rdi, rsi);
+ __ movp(rdi, rsi);
// rdi : function
// Clear the context before we push it when entering the internal frame.
@@ -507,11 +499,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the function and receiver and setup the context.
__ push(rdi);
__ push(rdx);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, rcx);
- __ movq(rbx, r8);
+ __ movp(rax, rcx);
+ __ movp(rbx, r8);
#endif // _WIN64
// Current stack contents:
@@ -531,7 +523,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Set(rcx, 0); // Set loop variable to 0.
__ jmp(&entry);
__ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+ __ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
__ push(Operand(kScratchRegister, 0)); // dereference handle
__ addq(rcx, Immediate(1));
__ bind(&entry);
@@ -550,8 +542,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
} else {
ParameterCount actual(rax);
// Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
}
// Exit the internal frame. Notice that this also removes the empty
// context and the function left on the stack by the code
@@ -573,19 +564,37 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyCompile);
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+static void CallCompileOptimized(MacroAssembler* masm,
+ bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Function is also the parameter to the runtime call.
+ __ push(rdi);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kCompileOptimized, 2);
+ // Restore receiver.
+ __ pop(rdi);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
}
@@ -600,14 +609,13 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// the stub returns.
__ subq(Operand(rsp, 0), Immediate(5));
__ Pushad();
- __ movq(arg_reg_2,
- ExternalReference::isolate_address(masm->isolate()));
- __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1);
+ __ PrepareCallCFunction(2);
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
}
__ Popad();
__ ret(0);
@@ -633,22 +641,22 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// save/restore the registers without worrying about which of them contain
// pointers.
__ Pushad();
- __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
- __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
__ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1);
+ __ PrepareCallCFunction(2);
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
- 1);
+ 2);
}
__ Popad();
// Perform prologue operations usually performed by the young code stub.
__ PopReturnAddressTo(kScratchRegister);
__ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
+ __ movp(rbp, rsp);
__ push(rsi); // Callee's context.
__ push(rdi); // Callee's JS Function.
__ PushReturnAddressFrom(kScratchRegister);
@@ -716,7 +724,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
- __ movq(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
+ __ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
__ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -767,7 +775,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// if it is a function.
Label slow, non_function;
StackArgumentsAccessor args(rsp, rax);
- __ movq(rdi, args.GetReceiverOperand());
+ __ movp(rdi, args.GetReceiverOperand());
__ JumpIfSmi(rdi, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
@@ -777,10 +785,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Set(rdx, 0); // indicate regular JS_FUNCTION
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(rbx, SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &shift_arguments);
@@ -792,7 +800,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(not_zero, &shift_arguments);
// Compute the receiver in non-strict mode.
- __ movq(rbx, args.GetArgumentOperand(1));
+ __ movp(rbx, args.GetArgumentOperand(1));
__ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
@@ -813,7 +821,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ Set(rdx, 0); // indicate regular JS_FUNCTION
__ pop(rax);
@@ -821,21 +829,16 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
// Restore the function to rdi.
- __ movq(rdi, args.GetReceiverOperand());
+ __ movp(rdi, args.GetReceiverOperand());
__ jmp(&patch_receiver, Label::kNear);
- // Use the global receiver object from the called function as the
- // receiver.
__ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ movp(rbx,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
- __ movq(args.GetArgumentOperand(1), rbx);
+ __ movp(args.GetArgumentOperand(1), rbx);
__ jmp(&shift_arguments);
}
@@ -852,17 +855,17 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
- __ movq(args.GetArgumentOperand(1), rdi);
+ __ movp(args.GetArgumentOperand(1), rdi);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
__ bind(&shift_arguments);
{ Label loop;
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
__ bind(&loop);
- __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
- __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
+ __ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
+ __ movp(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
__ decq(rcx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
__ pop(rbx); // Discard copy of return address.
@@ -875,7 +878,6 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ testq(rdx, rdx);
__ j(zero, &function);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
__ cmpq(rdx, Immediate(1));
__ j(not_equal, &non_proxy);
@@ -897,20 +899,18 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 5b. Get the code to call from the function and check that the number of
// expected arguments matches what we're providing. If so, jump
// (tail-call) to the code in register edx without checking arguments.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movsxlq(rbx,
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ SetCallKind(rcx, CALL_AS_METHOD);
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpq(rax, rbx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
ParameterCount expected(0);
- __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
@@ -941,7 +941,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// limit" is checked.
Label okay;
__ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movq(rcx, rsp);
+ __ movp(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
__ subq(rcx, kScratchRegister);
@@ -967,20 +967,20 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(Immediate(0)); // index
// Get the receiver.
- __ movq(rbx, Operand(rbp, kReceiverOffset));
+ __ movp(rbx, Operand(rbp, kReceiverOffset));
// Check that the function is a JS function (otherwise it must be a proxy).
Label push_receiver;
- __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ movp(rdi, Operand(rbp, kFunctionOffset));
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &push_receiver);
// Change context eagerly to get the right global object if necessary.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
Label call_to_object, use_global_receiver;
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &push_receiver);
@@ -1007,17 +1007,13 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&call_to_object);
__ push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ jmp(&push_receiver, Label::kNear);
- // Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ movp(rbx,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
__ bind(&push_receiver);
@@ -1025,10 +1021,10 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Copy all arguments from the array to the stack.
Label entry, loop;
- __ movq(rax, Operand(rbp, kIndexOffset));
+ __ movp(rax, Operand(rbp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
+ __ movp(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
Handle<Code> ic =
@@ -1043,33 +1039,31 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(rax);
// Update the index on the stack and in register rax.
- __ movq(rax, Operand(rbp, kIndexOffset));
+ __ movp(rax, Operand(rbp, kIndexOffset));
__ SmiAddConstant(rax, rax, Smi::FromInt(1));
- __ movq(Operand(rbp, kIndexOffset), rax);
+ __ movp(Operand(rbp, kIndexOffset), rax);
__ bind(&entry);
__ cmpq(rax, Operand(rbp, kLimitOffset));
__ j(not_equal, &loop);
- // Invoke the function.
+ // Call the function.
Label call_proxy;
ParameterCount actual(rax);
__ SmiToInteger32(rax, rax);
- __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ movp(rdi, Operand(rbp, kFunctionOffset));
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &call_proxy);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ ret(3 * kPointerSize); // remove this, receiver, and arguments
- // Invoke the function proxy.
+ // Call the function proxy.
__ bind(&call_proxy);
__ push(rdi); // add function proxy as last argument
__ incq(rax);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -1093,7 +1087,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
@@ -1123,7 +1117,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
@@ -1166,11 +1160,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
Label no_arguments;
__ testq(rax, rax);
__ j(zero, &no_arguments);
- __ movq(rbx, args.GetArgumentOperand(1));
+ __ movp(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx);
__ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushReturnAddressFrom(rcx);
- __ movq(rax, rbx);
+ __ movp(rax, rbx);
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
@@ -1206,15 +1200,15 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
__ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
}
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
// Set properties and elements.
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rcx);
// Set the value.
- __ movq(FieldOperand(rax, JSValue::kValueOffset), rbx);
+ __ movp(FieldOperand(rax, JSValue::kValueOffset), rbx);
// Ensure the object is fully initialized.
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
@@ -1230,7 +1224,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ JumpIfSmi(rax, &convert_argument);
Condition is_string = masm->IsObjectStringType(rax, rbx, rcx);
__ j(NegateCondition(is_string), &convert_argument);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ IncrementCounter(counters->string_ctor_string_value(), 1);
__ jmp(&argument_is_string);
@@ -1244,7 +1238,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
__ pop(rdi);
}
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ jmp(&argument_is_string);
// Load the empty string into rbx, remove the receiver from the
@@ -1271,7 +1265,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(rbp);
- __ movq(rbp, rsp);
+ __ movp(rbp, rsp);
// Store the arguments adaptor context sentinel.
__ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
@@ -1289,10 +1283,10 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack. Number is a Smi.
- __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
// Leave the frame.
- __ movq(rsp, rbp);
+ __ movp(rsp, rbp);
__ pop(rbp);
// Remove caller arguments from the stack.
@@ -1307,8 +1301,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : actual number of arguments
// -- rbx : expected number of arguments
- // -- rcx : call kind information
- // -- rdx : code entry to call
+ // -- rdi: function (passed through to callee)
// -----------------------------------
Label invoke, dont_adapt_arguments;
@@ -1316,6 +1309,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ IncrementCounter(counters->arguments_adaptors(), 1);
Label enough, too_few;
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpq(rax, rbx);
__ j(less, &too_few);
__ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -1367,7 +1361,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ j(less, &fill);
// Restore function pointer.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// Call the entry point.
@@ -1391,20 +1385,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Lookup and calculate pc offset.
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
- __ movq(rbx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ subq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ subq(rdx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
- __ Integer32ToSmi(rdx, rdx);
-
- // Pass both function and pc offset as arguments.
+ // Pass function as argument.
__ push(rax);
- __ push(rdx);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
Label skip;
@@ -1416,7 +1402,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ bind(&skip);
// Load deoptimization data from the code object.
- __ movq(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
__ SmiToInteger32(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
@@ -1426,7 +1412,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ lea(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
// Overwrite the return address on the stack.
- __ movq(Operand(rsp, 0), rax);
+ __ movq(StackOperandForReturnAddress(0), rax);
// And "return" to the OSR entry point of the function.
__ ret(0);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index b3ab8c1e75..92af1f0455 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -50,6 +50,16 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
}
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdi };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -78,7 +88,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
}
@@ -96,8 +106,8 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rbx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { rbx, rdx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -114,6 +124,28 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rcx, rbx, rax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -156,18 +188,6 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
}
-void BinaryOpStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rdx, rax };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
-}
-
-
static void InitializeArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
@@ -175,15 +195,22 @@ static void InitializeArrayConstructorDescriptor(
// register state
// rax -- number of arguments
// rdi -- function
- // rbx -- type info cell with elements kind
- static Register registers[] = { rdi, rbx };
- descriptor->register_param_count_ = 2;
- if (constant_stack_parameter_count != 0) {
+ // rbx -- allocation site with elements kind
+ static Register registers_variable_args[] = { rdi, rbx, rax };
+ static Register registers_no_args[] = { rdi, rbx };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
descriptor->stack_parameter_count_ = rax;
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
@@ -197,15 +224,21 @@ static void InitializeInternalArrayConstructorDescriptor(
// register state
// rax -- number of arguments
// rdi -- constructor function
- static Register registers[] = { rdi };
- descriptor->register_param_count_ = 1;
+ static Register registers_variable_args[] = { rdi, rax };
+ static Register registers_no_args[] = { rdi };
- if (constant_stack_parameter_count != 0) {
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
descriptor->stack_parameter_count_ = rax;
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
@@ -302,6 +335,124 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rcx, rdx, rax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { rdi, // JSFunction
+ rsi, // context
+ rax, // actual number of arguments
+ rbx, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { rsi, // context
+ rcx, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { rsi, // context
+ rcx, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { rsi, // context
+ rdx, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { rax, // callee
+ rbx, // call_data
+ rcx, // holder
+ rdx, // api_function_address
+ rsi, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -329,111 +480,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize,
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(0));
-
- // Set up the object header.
- __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // Set up the fixed slots.
- __ Set(rbx, 0); // Set to NULL.
- __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
- __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
-
- // Copy the global object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), rbx);
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + (1 * kPointerSize)] : function
- // [rsp + (2 * kPointerSize)] : serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate(FixedArray::SizeFor(length),
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(1));
- // Get the serialized scope info from the stack.
- __ movq(rbx, args.GetArgumentOperand(0));
-
- // Set up the object header.
- __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
- if (FLAG_debug_code) {
- __ cmpq(rcx, Immediate(0));
- __ Assert(equal, kExpected0AsASmiSentinel);
- }
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
- __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots.
- __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
- __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
- __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
-
- // Copy the global object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(ContextOperand(rax, Context::GLOBAL_OBJECT_INDEX), rbx);
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(2 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ PushCallerSaved(save_doubles_);
const int argument_count = 1;
@@ -549,312 +595,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // TAGGED case:
- // Input:
- // rsp[8] : argument (should be number).
- // rsp[0] : return address.
- // Output:
- // rax: tagged double result.
- // UNTAGGED case:
- // Input::
- // rsp[0] : return address.
- // xmm1 : untagged double input argument
- // Output:
- // xmm1 : untagged double result.
-
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label skip_cache;
- const bool tagged = (argument_type_ == TAGGED);
- if (tagged) {
- Label input_not_smi, loaded;
-
- // Test that rax is a number.
- StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0));
- __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the bits of the double into rbx.
- __ SmiToInteger32(rax, rax);
- __ subq(rsp, Immediate(kDoubleSize));
- __ Cvtlsi2sd(xmm1, rax);
- __ movsd(Operand(rsp, 0), xmm1);
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&loaded, Label::kNear);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // bits into rbx.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rbx);
-
- __ bind(&loaded);
- } else { // UNTAGGED.
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- }
-
- // ST[0] == double value, if TAGGED.
- // rbx = bits of double value.
- // rdx = also bits of double value.
- // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
- // h = h0 = bits ^ (bits >> 32);
- // h ^= h >> 16;
- // h ^= h >> 8;
- // h = h & (cacheSize - 1);
- // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
- __ sar(rdx, Immediate(32));
- __ xorl(rdx, rbx);
- __ movl(rcx, rdx);
- __ movl(rax, rdx);
- __ movl(rdi, rdx);
- __ sarl(rdx, Immediate(8));
- __ sarl(rcx, Immediate(16));
- __ sarl(rax, Immediate(24));
- __ xorl(rcx, rdx);
- __ xorl(rax, rdi);
- __ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // ST[0] == double value.
- // rbx = bits of double value.
- // rcx = TranscendentalCache::hash(double value).
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(masm->isolate());
- __ movq(rax, cache_array);
- int cache_array_index =
- type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
- __ movq(rax, Operand(rax, cache_array_index));
- // rax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ testq(rax, rax);
- __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- // Two uint_32's and a pointer per element.
- CHECK_EQ(2 * kIntSize + 1 * kPointerSize,
- static_cast<int>(elem2_start - elem_start));
- CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
- CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
- CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
- }
-#endif
- // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
- __ addl(rcx, rcx);
- __ lea(rcx, Operand(rax, rcx, times_8, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmpq(rbx, Operand(rcx, 0));
- __ j(not_equal, &cache_miss, Label::kNear);
- // Cache hit!
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->transcendental_cache_hit(), 1);
- __ movq(rax, Operand(rcx, 2 * kIntSize));
- if (tagged) {
- __ fstp(0); // Clear FPU stack.
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- __ bind(&cache_miss);
- __ IncrementCounter(counters->transcendental_cache_miss(), 1);
- // Update cache with new value.
- if (tagged) {
- __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
- } else { // UNTAGGED.
- __ AllocateHeapNumber(rax, rdi, &skip_cache);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- }
- GenerateOperation(masm, type_);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- if (tagged) {
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
-
- // Skip cache and return answer directly, only in untagged case.
- __ bind(&skip_cache);
- __ subq(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), xmm1);
- __ fld_d(Operand(rsp, 0));
- GenerateOperation(masm, type_);
- __ fstp_d(Operand(rsp, 0));
- __ movsd(xmm1, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- // We return the value in xmm1 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Allocate an unused object bigger than a HeapNumber.
- __ Push(Smi::FromInt(2 * kDoubleSize));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-
- // Call runtime, doing whatever allocation and cleanup is necessary.
- if (tagged) {
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(
- ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
- } else { // UNTAGGED.
- __ bind(&runtime_call_clear_stack);
- __ bind(&runtime_call);
- __ AllocateHeapNumber(rax, rdi, &skip_cache);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
- }
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(
- MacroAssembler* masm, TranscendentalCache::Type type) {
- // Registers:
- // rax: Newly allocated HeapNumber, which must be preserved.
- // rbx: Bits of input double. Must be preserved.
- // rcx: Pointer to cache entry. Must be preserved.
- // st(0): Input double
- Label done;
- if (type == TranscendentalCache::SIN ||
- type == TranscendentalCache::COS ||
- type == TranscendentalCache::TAN) {
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ movq(rdi, rbx);
- // Move exponent and sign bits to low bits.
- __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
- // Remove sign bit.
- __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
- int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
- __ cmpl(rdi, Immediate(supported_exponent_limit));
- __ j(below, &in_range);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmpl(rdi, Immediate(0x7ff));
- Label non_nan_result;
- __ j(not_equal, &non_nan_result, Label::kNear);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ subq(rsp, Immediate(kPointerSize));
- __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
- __ movl(Operand(rsp, 0), Immediate(0x00000000));
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&done);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ movq(rdi, rax); // Save rax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- // FPU Stack: input % 2*pi, 2*pi,
- __ fstp(0);
- // FPU Stack: input % 2*pi
- __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
- __ bind(&in_range);
- switch (type) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- case TranscendentalCache::TAN:
- // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
- // FP register stack.
- __ fptan();
- __ fstp(0); // Pop FP register stack.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
- } else {
- ASSERT(type == TranscendentalCache::LOG);
- __ fldln2();
- __ fxch();
- __ fyl2x();
- }
-}
-
-
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
@@ -897,7 +637,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label call_runtime, done, exponent_not_smi, int_exponent;
// Save 1 in double_result - we need this several times later on.
- __ movq(scratch, Immediate(1));
+ __ movp(scratch, Immediate(1));
__ Cvtlsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
@@ -906,8 +646,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack.
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(base, args.GetArgumentOperand(0));
- __ movq(exponent, args.GetArgumentOperand(1));
+ __ movp(base, args.GetArgumentOperand(0));
+ __ movp(exponent, args.GetArgumentOperand(1));
__ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
@@ -959,7 +699,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label continue_sqrt, continue_rsqrt, not_plus_half;
// Test for 0.5.
// Load double_scratch with 0.5.
- __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64);
+ __ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
__ movq(double_scratch, scratch);
// Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent);
@@ -969,7 +709,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
@@ -1001,7 +741,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
@@ -1067,7 +807,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&int_exponent);
const XMMRegister double_scratch2 = double_exponent;
// Back up exponent as we need to check if exponent is negative later.
- __ movq(scratch, exponent); // Back up exponent.
+ __ movp(scratch, exponent); // Back up exponent.
__ movsd(double_scratch, double_base); // Back up base.
__ movsd(double_scratch2, double_result); // Load double_exponent with 1.
@@ -1136,8 +876,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
// Return value is in xmm0.
__ movsd(double_result, xmm0);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1);
@@ -1237,13 +975,13 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
// Check that elements are FixedArray.
// We rely on StoreIC_ArrayLength below to deal with all types of
// fast elements (including COW).
- __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
+ __ movp(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
__ j(not_equal, &miss);
// Check that the array has fast properties, otherwise the length
// property might have been redefined.
- __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
+ __ movp(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
__ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(equal, &miss);
@@ -1280,7 +1018,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Smi instead of the context. We can't use SmiCompare here, because that
// only works for comparing two smis.
Label adaptor;
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor);
@@ -1295,14 +1033,14 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ SmiSub(rax, rax, rdx);
__ SmiToInteger32(rax, rax);
StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0));
+ __ movp(rax, args.GetArgumentOperand(0));
__ Ret();
// Arguments adaptor case: Check index against actual arguments
// limit found in the arguments adaptor frame. Use unsigned
// comparison to get negative check for free.
__ bind(&adaptor);
- __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ cmpq(rdx, rcx);
__ j(above_equal, &slow);
@@ -1311,7 +1049,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ SmiToInteger32(rcx, rcx);
StackArgumentsAccessor adaptor_args(rbx, rcx,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, adaptor_args.GetArgumentOperand(0));
+ __ movp(rax, adaptor_args.GetArgumentOperand(0));
__ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments
@@ -1343,13 +1081,13 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
Label adaptor_frame, try_allocate;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// No adaptor, parameter count = argument count.
- __ movq(rcx, rbx);
+ __ movp(rcx, rbx);
__ jmp(&try_allocate, Label::kNear);
// We have an adaptor frame. Patch the parameters pointer.
@@ -1359,14 +1097,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(args.GetArgumentOperand(1), rdx);
+ __ movp(args.GetArgumentOperand(1), rdx);
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
// Compute the mapped parameter count = min(rbx, rcx) in rbx.
__ cmpq(rbx, rcx);
__ j(less_equal, &try_allocate, Label::kNear);
- __ movq(rbx, rcx);
+ __ movp(rbx, rcx);
__ bind(&try_allocate);
@@ -1394,18 +1132,18 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rcx = argument count (untagged)
// Get the arguments boilerplate from the current native context into rdi.
Label has_mapped_parameters, copy;
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
+ __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
__ testq(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
- __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
+ __ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
__ jmp(&copy, Label::kNear);
const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
__ bind(&has_mapped_parameters);
- __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
+ __ movp(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
__ bind(&copy);
// rax = address of new object (tagged)
@@ -1414,14 +1152,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rdi = address of boilerplate object (tagged)
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(rdx, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), rdx);
+ __ movp(rdx, FieldOperand(rdi, i));
+ __ movp(FieldOperand(rax, i), rdx);
}
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movq(rdx, args.GetArgumentOperand(0));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ __ movp(rdx, args.GetArgumentOperand(0));
+ __ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize),
rdx);
@@ -1429,7 +1167,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Note: rcx is tagged from here on.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ Integer32ToSmi(rcx, rcx);
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ __ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
@@ -1437,7 +1175,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
__ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
// rax = address of new object (tagged)
// rbx = mapped parameter count (untagged)
@@ -1451,12 +1189,12 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
// rbx contains the untagged argument count. Add 2 and tag to write.
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
__ Integer64PlusConstantToSmi(r9, rbx, 2);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
__ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_parameter_count slots. They index the context,
@@ -1474,7 +1212,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ addq(r8, args.GetArgumentOperand(2));
__ subq(r8, r9);
__ Move(r11, factory->the_hole_value());
- __ movq(rdx, rdi);
+ __ movp(rdx, rdi);
__ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
// r9 = loop variable (tagged)
// r8 = mapping index (tagged)
@@ -1486,11 +1224,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&parameters_loop);
__ SmiSubConstant(r9, r9, Smi::FromInt(1));
__ SmiToInteger64(kScratchRegister, r9);
- __ movq(FieldOperand(rdx, kScratchRegister,
+ __ movp(FieldOperand(rdx, kScratchRegister,
times_pointer_size,
kParameterMapHeaderSize),
r8);
- __ movq(FieldOperand(rdi, kScratchRegister,
+ __ movp(FieldOperand(rdi, kScratchRegister,
times_pointer_size,
FixedArray::kHeaderSize),
r11);
@@ -1506,11 +1244,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Copy arguments header and remaining slots (if there are any).
__ Move(FieldOperand(rdi, FixedArray::kMapOffset),
factory->fixed_array_map());
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
Label arguments_loop, arguments_test;
- __ movq(r8, rbx);
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movp(r8, rbx);
+ __ movp(rdx, args.GetArgumentOperand(1));
// Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
__ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
@@ -1519,8 +1257,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&arguments_loop);
__ subq(rdx, Immediate(kPointerSize));
- __ movq(r9, Operand(rdx, 0));
- __ movq(FieldOperand(rdi, r8,
+ __ movp(r9, Operand(rdx, 0));
+ __ movp(FieldOperand(rdi, r8,
times_pointer_size,
FixedArray::kHeaderSize),
r9);
@@ -1537,7 +1275,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rcx = argument count (untagged)
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
- __ movq(args.GetArgumentOperand(2), rcx); // Patch argument count.
+ __ movp(args.GetArgumentOperand(2), rcx); // Patch argument count.
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
@@ -1550,19 +1288,19 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(args.GetArgumentOperand(2), rcx);
+ __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(args.GetArgumentOperand(1), rdx);
+ __ movp(args.GetArgumentOperand(1), rdx);
__ bind(&runtime);
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@@ -1577,25 +1315,25 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// Get the length from the frame.
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(2));
+ __ movp(rcx, args.GetArgumentOperand(2));
__ SmiToInteger64(rcx, rcx);
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(args.GetArgumentOperand(2), rcx);
+ __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(args.GetArgumentOperand(1), rdx);
+ __ movp(args.GetArgumentOperand(1), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
@@ -1611,22 +1349,22 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
// Get the arguments boilerplate from the current native context.
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
+ __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
const int offset =
Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
- __ movq(rdi, Operand(rdi, offset));
+ __ movp(rdi, Operand(rdi, offset));
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), rbx);
+ __ movp(rbx, FieldOperand(rdi, i));
+ __ movp(FieldOperand(rax, i), rbx);
}
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movq(rcx, args.GetArgumentOperand(2));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ __ movp(rcx, args.GetArgumentOperand(2));
+ __ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
@@ -1636,25 +1374,25 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ j(zero, &done);
// Get the parameters pointer from the stack.
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movp(rdx, args.GetArgumentOperand(1));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
// Untag the length for the loop below.
__ SmiToInteger64(rcx, rcx);
// Copy the fixed array slots.
Label loop;
__ bind(&loop);
- __ movq(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
+ __ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
__ addq(rdi, Immediate(kPointerSize));
__ subq(rdx, Immediate(kPointerSize));
__ decq(rcx);
@@ -1707,13 +1445,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
- __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
+ __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
__ JumpIfSmi(rax, &runtime);
__ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ movp(rax, FieldOperand(rax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
Condition is_smi = masm->CheckSmi(rax);
__ Check(NegateCondition(is_smi),
@@ -1740,10 +1478,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Reset offset for possibly sliced string.
__ Set(r14, 0);
- __ movq(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
+ __ movp(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
__ JumpIfSmi(rdi, &runtime);
- __ movq(r15, rdi); // Make a copy of the original subject string.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(r15, rdi); // Make a copy of the original subject string.
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
// rax: RegExp data (FixedArray)
// rdi: subject string
@@ -1803,10 +1541,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
Heap::kempty_stringRootIndex);
__ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
+ __ movp(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
__ bind(&check_underlying);
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
// (5a) Is subject sequential two byte? If yes, go to (9).
__ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
@@ -1822,7 +1560,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (6) One byte sequential. Load regexp code for one byte.
__ bind(&seq_one_byte_string);
// rax: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
+ __ movp(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
__ Set(rcx, 1); // Type is one byte.
// (E) Carry on. String handling is done.
@@ -1842,7 +1580,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// We have to use r15 instead of rdi to load the length because rdi might
// have been only made to look like a sequential string when it actually
// is an external string.
- __ movq(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
+ __ movp(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
__ JumpIfNotSmi(rbx, &runtime);
__ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
__ j(above_equal, &runtime);
@@ -1865,25 +1603,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 9: Pass current isolate address.
__ LoadAddress(kScratchRegister,
ExternalReference::isolate_address(masm->isolate()));
- __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kRegisterSize),
kScratchRegister);
// Argument 8: Indicate that this is a direct call from JavaScript.
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
+ __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kRegisterSize),
Immediate(1));
// Argument 7: Start (high end) of backtracking stack memory area.
- __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
- __ movq(r9, Operand(kScratchRegister, 0));
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ Move(kScratchRegister, address_of_regexp_stack_memory_address);
+ __ movp(r9, Operand(kScratchRegister, 0));
+ __ Move(kScratchRegister, address_of_regexp_stack_memory_size);
__ addq(r9, Operand(kScratchRegister, 0));
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9);
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
// Argument 6 is passed in r9 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize),
+ __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kRegisterSize),
Immediate(0));
#else
__ Set(r9, 0);
@@ -1894,7 +1632,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference::address_of_static_offsets_vector(isolate));
// Argument 5 passed in r8 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kRegisterSize), r8);
#endif
// rdi: subject string
@@ -1905,7 +1643,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r15: original subject string
// Argument 2: Previous index.
- __ movq(arg_reg_2, rbx);
+ __ movp(arg_reg_2, rbx);
// Argument 4: End of string data
// Argument 3: Start of string data
@@ -1938,7 +1676,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// use rbp, which points exactly to one pointer size below the previous rsp.
// (Because creating a new stack frame pushes the previous rbp onto the stack
// and thereby moves up rsp by one kPointerSize.)
- __ movq(arg_reg_1, r15);
+ __ movp(arg_reg_1, r15);
// Locate the code entry and call it.
__ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -1966,8 +1704,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Load RegExp data.
__ bind(&success);
- __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
+ __ movp(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
__ SmiToInteger32(rax,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
@@ -1975,13 +1713,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rdx: Number of capture registers
// Check that the fourth object is a JSArray object.
- __ movq(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
+ __ movp(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
__ JumpIfSmi(r15, &runtime);
__ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(r15, JSArray::kElementsOffset));
- __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(r15, JSArray::kElementsOffset));
+ __ movp(rax, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &runtime);
// Check that the last match info has space for the capture registers and the
@@ -1996,19 +1734,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rdx: number of capture registers
// Store the capture count.
__ Integer32ToSmi(kScratchRegister, rdx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
+ __ movp(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
kScratchRegister);
// Store last subject and last input.
- __ movq(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rax);
+ __ movp(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
+ __ movp(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movp(rcx, rax);
__ RecordWriteField(rbx,
RegExpImpl::kLastSubjectOffset,
rax,
rdi,
kDontSaveFPRegs);
- __ movq(rax, rcx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
+ __ movp(rax, rcx);
+ __ movp(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
__ RecordWriteField(rbx,
RegExpImpl::kLastInputOffset,
rax,
@@ -2032,7 +1770,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
__ Integer32ToSmi(rdi, rdi);
// Store the smi value in the last match info.
- __ movq(FieldOperand(rbx,
+ __ movp(FieldOperand(rbx,
rdx,
times_pointer_size,
RegExpImpl::kFirstCaptureOffset),
@@ -2041,7 +1779,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&done);
// Return last match info.
- __ movq(rax, r15);
+ __ movp(rax, r15);
__ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
__ bind(&exception);
@@ -2053,11 +1791,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Isolate::kPendingExceptionAddress, isolate);
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address, rbx);
- __ movq(rax, pending_exception_operand);
+ __ movp(rax, pending_exception_operand);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
__ cmpq(rax, rdx);
__ j(equal, &runtime);
- __ movq(pending_exception_operand, rdx);
+ __ movp(pending_exception_operand, rdx);
__ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
Label termination_exception;
@@ -2079,7 +1817,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (8) External string. Short external strings have been ruled out.
__ bind(&external_string);
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
@@ -2087,7 +1825,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ testb(rbx, Immediate(kIsIndirectStringMask));
__ Assert(zero, kExternalStringExpectedButNotFound);
}
- __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+ __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -2100,7 +1838,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rax: RegExp data (FixedArray)
// (9) Two byte sequential. Load regexp code for one byte. Go to (E).
__ bind(&seq_two_byte_string);
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
+ __ movp(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
__ Set(rcx, 0); // Type is two byte.
__ jmp(&check_code); // Go to (E).
@@ -2114,97 +1852,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (11) Sliced string. Replace subject with parent. Go to (5a).
// Load offset into r14 and replace subject string with parent.
__ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
+ __ movp(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
__ jmp(&check_underlying);
#endif // V8_INTERPRETED_REGEXP
}
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(r8, args.GetArgumentOperand(0));
- __ JumpIfNotSmi(r8, &slowcase);
- __ SmiToInteger32(rbx, r8);
- __ cmpl(rbx, Immediate(kMaxInlineLength));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in rbx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ Allocate(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- rbx, // In: Number of elements.
- rax, // Out: Start of allocation (tagged).
- rcx, // Out: End of allocation.
- rdx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // rax: Start of allocated area, object-tagged.
- // rbx: Number of array elements as int32.
- // r8: Number of array elements as smi.
-
- // Set JSArray map to global.regexp_result_map().
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
-
- // Set empty properties FixedArray.
- __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
-
- // Set elements to point to FixedArray allocated right after the JSArray.
- __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
-
- // Set input, index and length fields from arguments.
- __ movq(r8, args.GetArgumentOperand(2));
- __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
- __ movq(r8, args.GetArgumentOperand(1));
- __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
- __ movq(r8, args.GetArgumentOperand(0));
- __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
-
- // Fill out the elements FixedArray.
- // rax: JSArray.
- // rcx: FixedArray.
- // rbx: Number of elements in array as int32.
-
- // Set map.
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
- // Set length.
- __ Integer32ToSmi(rdx, rbx);
- __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
- // Fill fixed array elements with undefined.
- // rax: JSArray.
- // rbx: Number of elements in array that remains to be filled, as int32.
- // rcx: Start of elements in FixedArray.
- // rdx: undefined.
- Label loop;
- __ testl(rbx, rbx);
- __ bind(&loop);
- __ j(less_equal, &done); // Jump if rcx is negative or zero.
- __ subl(rbx, Immediate(1));
- __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
- __ jmp(&loop);
-
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -2222,7 +1875,7 @@ static void CheckInputType(MacroAssembler* masm,
__ JumpIfNotSmi(input, fail);
} else if (expected == CompareIC::NUMBER) {
__ JumpIfSmi(input, &ok);
- __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ CompareMap(input, masm->isolate()->factory()->heap_number_map());
__ j(not_equal, fail);
}
// We could be strict about internalized/non-internalized here, but as long as
@@ -2236,7 +1889,7 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
Register object,
Register scratch) {
__ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzxbq(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -2261,7 +1914,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ j(no_overflow, &smi_done);
__ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
__ bind(&smi_done);
- __ movq(rax, rdx);
+ __ movp(rax, rdx);
__ ret(0);
__ bind(&non_smi);
@@ -2340,7 +1993,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// If heap number, handle it in the slow case.
__ j(equal, &slow);
// Return non-equal. ebx (the lower half of rbx) is not zero.
- __ movq(rax, rbx);
+ __ movp(rax, rbx);
__ ret(0);
__ bind(&not_smis);
@@ -2508,63 +2161,71 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rax : number of arguments to the construct function
- // rbx : cache cell for call target
+ // rbx : Feedback vector
+ // rdx : slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function;
+ Label check_array, initialize_array, initialize_non_array, megamorphic, done;
// Load the cache state into rcx.
- __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ __ SmiToInteger32(rdx, rdx);
+ __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmpq(rcx, rdi);
__ j(equal, &done);
- __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ j(equal, &done);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in rcx.
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ j(not_equal, &miss);
-
- // Make sure the function is the Array() function
+ // Check if we're dealing with the Array function or not.
__ LoadArrayFunction(rcx);
__ cmpq(rdi, rcx);
+ __ j(equal, &check_array);
+
+ // Non-array cache: Reload the cache state and check it.
+ __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ Cmp(rcx, TypeFeedbackInfo::PremonomorphicSentinel(isolate));
+ __ j(equal, &initialize_non_array);
+ __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
__ j(not_equal, &megamorphic);
- __ jmp(&done);
- __ bind(&miss);
+ // Non-array cache: Uninitialized -> premonomorphic. The sentinel is an
+ // immortal immovable object (null) so no write-barrier is needed.
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ TypeFeedbackInfo::PremonomorphicSentinel(isolate));
+ __ jmp(&done, Label::kFar);
+
+ // Array cache: Reload the cache state and check to see if we're in a
+ // monomorphic state where the state object is an AllocationSite object.
+ __ bind(&check_array);
+ __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map = isolate->factory()->allocation_site_map();
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ j(equal, &done);
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
- __ j(equal, &initialize);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
+ // Array cache: Uninitialized or premonomorphic -> monomorphic.
+ __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
+ __ j(equal, &initialize_array);
+ __ Cmp(rcx, TypeFeedbackInfo::PremonomorphicSentinel(isolate));
+ __ j(equal, &initialize_array);
+
+ // Both caches: Monomorphic -> megamorphic. The sentinel is an
+ // immortal immovable object (undefined) so no write-barrier is needed.
__ bind(&megamorphic);
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ jmp(&done);
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
- __ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
+ // Array cache: Uninitialized or premonomorphic -> monomorphic.
+ __ bind(&initialize_array);
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2572,122 +2233,157 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ push(rdi);
+ __ Integer32ToSmi(rdx, rdx);
+ __ push(rdx);
__ push(rbx);
CreateAllocationSiteStub create_stub;
__ CallStub(&create_stub);
__ pop(rbx);
+ __ pop(rdx);
__ pop(rdi);
__ pop(rax);
__ SmiToInteger32(rax, rax);
}
- __ jmp(&done);
+ Label done_no_smi_convert;
+ __ jmp(&done_no_smi_convert);
- __ bind(&not_array_function);
- __ movq(FieldOperand(rbx, Cell::kValueOffset), rdi);
- // No need for a write barrier here - cells are rescanned.
+ // Non-array cache: Premonomorphic -> monomorphic.
+ __ bind(&initialize_non_array);
+ __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ rdi);
+ __ push(rdi);
+ __ push(rbx);
+ __ push(rdx);
+ __ RecordWriteArray(rbx, rdi, rdx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ pop(rdx);
+ __ pop(rbx);
+ __ pop(rdi);
__ bind(&done);
+ __ Integer32ToSmi(rdx, rdx);
+
+ __ bind(&done_no_smi_convert);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
- // rbx : cache cell for call target
+ // rbx : feedback vector
+ // rdx : (only if rbx is not undefined) slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
- Label slow, non_function;
+ Label slow, non_function, wrap, cont;
StackArgumentsAccessor args(rsp, argc_);
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- __ movq(rax, args.GetReceiverOperand());
- // Call as function is indicated with the hole.
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &call, Label::kNear);
- // Patch the receiver on the stack with the global receiver object.
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rcx);
- __ bind(&call);
- }
+ if (NeedsChecks()) {
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(rdi, &non_function);
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &non_function);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
}
// Fast-case: Just invoke the function.
ParameterCount actual(argc_);
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(equal, &call_as_function);
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
+ if (CallAsMethod()) {
+ if (NeedsChecks()) {
+ // Do not transform the receiver for strict mode functions.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rcx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &cont);
+
+ // Do not transform the receiver for natives.
+ // SharedFunctionInfo is already loaded into rcx.
+ __ testb(FieldOperand(rcx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &cont);
+ }
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
+
+ // Load the receiver from the stack.
+ __ movp(rax, args.GetReceiverOperand());
+
+ if (NeedsChecks()) {
+ __ JumpIfSmi(rax, &wrap);
+
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(below, &wrap);
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
}
- // Check for function proxy.
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function);
- __ PopReturnAddressTo(rcx);
- __ push(rdi); // put proxy as additional argument under return address
- __ PushReturnAddressFrom(rcx);
- __ Set(rax, argc_ + 1);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- {
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (NeedsChecks()) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ if (RecordCallTarget()) {
+ // If there is a call target cache, mark it megamorphic in the
+ // non-function case. MegamorphicSentinel is an immortal immovable
+ // object (undefined) so no write barrier is needed.
+ __ SmiToInteger32(rdx, rdx);
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ Integer32ToSmi(rdx, rdx);
+ }
+ // Check for function proxy.
+ __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, &non_function);
+ __ PopReturnAddressTo(rcx);
+ __ push(rdi); // put proxy as additional argument under return address
+ __ PushReturnAddressFrom(rcx);
+ __ Set(rax, argc_ + 1);
+ __ Set(rbx, 0);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ jmp(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ bind(&non_function);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ Set(rax, argc_);
+ __ Set(rbx, 0);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
+ isolate->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
}
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ movq(args.GetReceiverOperand(), rdi);
- __ Set(rax, argc_);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor =
- isolate->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ if (CallAsMethod()) {
+ __ bind(&wrap);
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ push(rdi);
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(rdi);
+ }
+ __ movp(args.GetReceiverOperand(), rax);
+ __ jmp(&cont);
+ }
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
- // rbx : cache cell for call target
+ // rbx : feedback vector
+ // rdx : (only if rbx is not undefined) slot in feedback vector (Smi)
// rdi : constructor function
Label slow, non_function_call;
@@ -2703,8 +2399,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// Jump to the function-specific construct stub.
Register jmp_reg = rcx;
- __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(jmp_reg, FieldOperand(jmp_reg,
+ __ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(jmp_reg, FieldOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
__ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
__ jmp(jmp_reg);
@@ -2724,7 +2420,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ bind(&do_call);
// Set expected number of arguments to zero (not changing rax).
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
@@ -2735,24 +2430,15 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated(Isolate* isolate) {
-#ifdef _WIN64
- return result_size_ == 1;
-#else
- return true;
-#endif
-}
-
-
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- BinaryOpStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -2762,9 +2448,9 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
CEntryStub save_doubles(1, kSaveFPRegs);
- save_doubles.GetCode(isolate)->set_is_pregenerated(true);
+ save_doubles.GetCode(isolate);
}
@@ -2772,7 +2458,7 @@ static void JumpIfOOM(MacroAssembler* masm,
Register value,
Register scratch,
Label* oom_label) {
- __ movq(scratch, value);
+ __ movp(scratch, value);
STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
STATIC_ASSERT(kFailureTag == 3);
__ and_(scratch, Immediate(0xf));
@@ -2810,9 +2496,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
// stack is known to be aligned. This function takes one argument which is
// passed in register.
- __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
- __ movq(arg_reg_1, rax);
- __ movq(kScratchRegister,
+ __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(arg_reg_1, rax);
+ __ Move(kScratchRegister,
ExternalReference::perform_gc_function(masm->isolate()));
__ call(kScratchRegister);
}
@@ -2832,24 +2518,24 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
- __ movq(rcx, r14); // argc.
- __ movq(rdx, r15); // argv.
- __ movq(r8, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(rcx, r14); // argc.
+ __ movp(rdx, r15); // argv.
+ __ Move(r8, ExternalReference::isolate_address(masm->isolate()));
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
__ lea(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
- __ movq(rdx, r14); // argc.
- __ movq(r8, r15); // argv.
- __ movq(r9, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(rdx, r14); // argc.
+ __ movp(r8, r15); // argv.
+ __ Move(r9, ExternalReference::isolate_address(masm->isolate()));
}
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- __ movq(rdi, r14); // argc.
- __ movq(rsi, r15); // argv.
- __ movq(rdx, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(rdi, r14); // argc.
+ __ movp(rsi, r15); // argv.
+ __ Move(rdx, ExternalReference::isolate_address(masm->isolate()));
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
@@ -2869,8 +2555,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Read result values stored on stack. Result is stored
// above the four argument mirror slots and the two
// Arguments object slots.
- __ movq(rax, Operand(rsp, 6 * kPointerSize));
- __ movq(rdx, Operand(rsp, 7 * kPointerSize));
+ __ movq(rax, Operand(rsp, 6 * kRegisterSize));
+ __ movq(rdx, Operand(rsp, 7 * kRegisterSize));
}
#endif
__ lea(rcx, Operand(rax, 1));
@@ -2899,7 +2585,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Isolate::kPendingExceptionAddress, masm->isolate());
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
- __ movq(rax, pending_exception_operand);
+ __ movp(rax, pending_exception_operand);
// See if we just retrieved an OOM exception.
JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
@@ -2908,7 +2594,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ movq(pending_exception_operand, rdx);
+ __ movp(pending_exception_operand, rdx);
// Special handling of termination exceptions which are uncatchable
// by javascript code.
@@ -2979,7 +2665,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Do full GC and retry runtime call one final time.
Failure* failure = Failure::InternalError();
- __ movq(rax, failure, RelocInfo::NONE64);
+ __ Move(rax, failure, Assembler::RelocInfoNone());
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
@@ -3000,7 +2686,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
isolate);
Label already_have_failure;
JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure);
- __ movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64);
+ __ Move(rax, Failure::OutOfMemoryException(0x1), Assembler::RelocInfoNone());
__ bind(&already_have_failure);
__ Store(pending_exception, rax);
// Fall through to the next label.
@@ -3023,16 +2709,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
// Set up frame.
__ push(rbp);
- __ movq(rbp, rsp);
+ __ movp(rbp, rsp);
// Push the stack frame type marker twice.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
// Scratch register is neither callee-save, nor an argument register on any
// platform. It's free to use at this point.
// Cannot use smi-register for loading yet.
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE64);
+ __ Move(kScratchRegister, Smi::FromInt(marker), Assembler::RelocInfoNone());
__ push(kScratchRegister); // context slot
__ push(kScratchRegister); // function slot
// Save callee-saved registers (X64/Win64 calling conventions).
@@ -3082,7 +2766,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ testq(rax, rax);
__ j(not_zero, &not_outermost_js);
__ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ movq(rax, rbp);
+ __ movp(rax, rbp);
__ Store(js_entry_sp, rax);
Label cont;
__ jmp(&cont);
@@ -3100,7 +2784,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate);
__ Store(pending_exception, rax);
- __ movq(rax, Failure::Exception(), RelocInfo::NONE64);
+ __ Move(rax, Failure::Exception(), Assembler::RelocInfoNone());
__ jmp(&exit);
// Invoke: Link this frame into the handler chain. There's only one
@@ -3139,8 +2823,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ pop(rbx);
__ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
- __ movq(kScratchRegister, js_entry_sp);
- __ movq(Operand(kScratchRegister, 0), Immediate(0));
+ __ Move(kScratchRegister, js_entry_sp);
+ __ movp(Operand(kScratchRegister, 0), Immediate(0));
__ bind(&not_outermost_js_2);
// Restore the top frame descriptor from the stack.
@@ -3208,7 +2892,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// __ j(not_equal, &cache_miss);
// __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
// before the offset of the hole value in the root array.
- static const unsigned int kWordBeforeResultValue = 0x458B4909;
+ static const unsigned int kWordBeforeResultValue = 0x458B4906;
// Only the inline check flag is supported on X64.
ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
@@ -3217,7 +2901,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
Label slow;
StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0));
+ __ movp(rax, args.GetArgumentOperand(0));
__ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
@@ -3227,7 +2911,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(above, &slow);
// Get the prototype of the function.
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movp(rdx, args.GetArgumentOperand(1));
// rdx is function, rax is map.
// If there is a call site cache don't look in the global cache, but do the
@@ -3269,12 +2953,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
__ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
}
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
Operand(kScratchRegister, kOffsetToMapCheckValue));
- __ movq(Operand(kScratchRegister, 0), rax);
+ __ movp(Operand(kScratchRegister, 0), rax);
}
- __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
+ __ movp(rcx, FieldOperand(rax, Map::kPrototypeOffset));
// Loop through the prototype chain looking for the function prototype.
Label loop, is_instance, is_not_instance;
@@ -3286,8 +2970,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// The code at is_not_instance assumes that kScratchRegister contains a
// non-zero GCable value (the null object in this case).
__ j(equal, &is_not_instance, Label::kNear);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
__ jmp(&loop);
__ bind(&is_instance);
@@ -3369,7 +3053,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
__ testb(result_, Immediate(kIsNotStringMask));
@@ -3419,11 +3103,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ movq(index_, rax);
+ __ movp(index_, rax);
}
__ pop(object_);
// Reload the instance type.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
@@ -3441,7 +3125,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ push(index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
if (!result_.is(rax)) {
- __ movq(result_, rax);
+ __ movp(result_, rax);
}
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -3461,7 +3145,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
- __ movq(result_, FieldOperand(result_, index.reg, index.scale,
+ __ movp(result_, FieldOperand(result_, index.reg, index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
__ j(equal, &slow_case_);
@@ -3479,7 +3163,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ push(code_);
__ CallRuntime(Runtime::kCharFromCode, 1);
if (!result_.is(rax)) {
- __ movq(result_, rax);
+ __ movp(result_, rax);
}
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -3488,365 +3172,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- // Load the two arguments.
- StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0)); // First argument (left).
- __ movq(rdx, args.GetArgumentOperand(1)); // Second argument (right).
-
- // Make sure that both arguments are strings if not known in advance.
- // Otherwise, at least one of the arguments is definitely a string,
- // and we convert the one that is not known to be a string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
- __ JumpIfSmi(rax, &call_runtime);
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &call_runtime);
-
- // First argument is a a string, test second.
- __ JumpIfSmi(rdx, &call_runtime);
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &call_runtime);
- } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
-
- // Both arguments are strings.
- // rax: first string
- // rdx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
- __ SmiTest(rcx);
- __ j(not_zero, &second_not_zero_length, Label::kNear);
- // Second string is empty, result is first string which is already in rax.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rbx);
- __ j(not_zero, &both_not_zero_length, Label::kNear);
- // First string is empty, result is second string which is in rdx.
- __ movq(rax, rdx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // rax: first string
- // rbx: length of first string
- // rcx: length of second string
- // rdx: second string
- // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
- // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
-
- // If arguments where known to be strings, maps are not loaded to r8 and r9
- // by the code above.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- }
- // Get the instance types of the two strings as they will be needed soon.
- __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
- __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
-
- // Look at the length of the result of adding the two strings.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
- __ SmiAdd(rbx, rbx, rcx);
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return an internalized string here.
- __ SmiCompare(rbx, Smi::FromInt(2));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&make_two_character_string);
- __ Set(rdi, 2);
- __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
- // rbx - first byte: first character
- // rbx - second byte: *maybe* second character
- // Make sure that the second byte of rbx contains the second character.
- __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
- __ shll(rcx, Immediate(kBitsPerByte));
- __ orl(rbx, rcx);
- // Write both characters to the new string.
- __ movw(FieldOperand(rax, SeqOneByteString::kHeaderSize), rbx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
- __ j(below, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
- __ j(above, &call_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object. If
- // both strings are ASCII the result is an ASCII cons string.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii, allocated, ascii_data;
- __ movl(rcx, r8);
- __ and_(rcx, r9);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testl(rcx, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an ASCII cons string.
- __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
- __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-
- Label skip_write_barrier, after_writing;
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(masm->isolate());
- __ Load(rbx, high_promotion_mode);
- __ testb(rbx, Immediate(1));
- __ j(zero, &skip_write_barrier);
-
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ RecordWriteField(rcx,
- ConsString::kFirstOffset,
- rax,
- rbx,
- kDontSaveFPRegs);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
- __ RecordWriteField(rcx,
- ConsString::kSecondOffset,
- rdx,
- rbx,
- kDontSaveFPRegs);
- __ jmp(&after_writing);
-
- __ bind(&skip_write_barrier);
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
-
- __ bind(&after_writing);
-
- __ movq(rax, rcx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only one byte characters.
- // rcx: first instance type AND second instance type.
- // r8: first instance type.
- // r9: second instance type.
- __ testb(rcx, Immediate(kOneByteDataHintMask));
- __ j(not_zero, &ascii_data);
- __ xor_(r8, r9);
- STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
- __ andb(r8, Immediate(kOneByteStringTag | kOneByteDataHintTag));
- __ cmpb(r8, Immediate(kOneByteStringTag | kOneByteDataHintTag));
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // rax: first string
- // rbx: length of resulting flat string as smi
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- Label first_prepared, second_prepared;
- Label first_is_sequential, second_is_sequential;
- __ bind(&string_add_flat_result);
-
- __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
- // r14: length of first string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(r8, Immediate(kStringRepresentationMask));
- __ j(zero, &first_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ testb(r8, Immediate(kShortExternalStringMask));
- __ j(not_zero, &call_runtime);
- __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
- __ jmp(&first_prepared, Label::kNear);
- __ bind(&first_is_sequential);
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rcx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- __ bind(&first_prepared);
-
- // Check whether both strings have same encoding.
- __ xorl(r8, r9);
- __ testb(r8, Immediate(kStringEncodingMask));
- __ j(not_zero, &call_runtime);
-
- __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
- // r15: length of second string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(r9, Immediate(kStringRepresentationMask));
- __ j(zero, &second_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ testb(r9, Immediate(kShortExternalStringMask));
- __ j(not_zero, &call_runtime);
- __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
- __ jmp(&second_prepared, Label::kNear);
- __ bind(&second_is_sequential);
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rdx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // r9: instance type of second string
- // First string and second string have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ SmiToInteger32(rbx, rbx);
- __ testb(r9, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii_string_add_flat_result);
-
- __ bind(&make_flat_ascii_string);
- // Both strings are ASCII strings. As they are short they are both flat.
- __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
- // rax: result string
- // Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- // rcx: first char of first string
- // rbx: first character of result
- // r14: length of first string
- StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, true);
- // rbx: next character of result
- // rdx: first char of second string
- // r15: length of second string
- StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, true);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&non_ascii_string_add_flat_result);
- // Both strings are ASCII strings. As they are short they are both flat.
- __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
- // rax: result string
- // Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
- // rcx: first char of first string
- // rbx: first character of result
- // r14: length of first string
- StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, false);
- // rbx: next character of result
- // rdx: first char of second string
- // r15: length of second string
- StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, false);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(rax);
- __ push(rdx);
-}
-
-
-void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm,
- Register temp) {
- __ PopReturnAddressTo(temp);
- __ pop(rdx);
- __ pop(rax);
- __ PushReturnAddressFrom(temp);
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
- __ j(below, &done);
-
- // Check the number to string cache.
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
- __ movq(arg, scratch1);
- __ movq(Operand(rsp, stack_offset), arg);
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- } else {
- __ movzxwl(kScratchRegister, Operand(src, 0));
- __ movw(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(2));
- __ addq(dest, Immediate(2));
- }
- __ decl(count);
- __ j(not_zero, &loop);
-}
-
-
void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
Register dest,
Register src,
@@ -3903,133 +3228,6 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&done);
}
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ leal(scratch, Operand(c1, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index, Label::kNear);
- __ leal(scratch, Operand(c2, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, Immediate(kBitsPerByte));
- __ orl(chars, c2);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ SmiToInteger32(mask,
- FieldOperand(string_table, StringTable::kCapacityOffset));
- __ decl(mask);
-
- Register map = scratch4;
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string (32-bit int)
- // string_table: string table
- // mask: capacity mask (32-bit int)
- // map: -
- // scratch: -
-
- // Perform a number of probes in the string table.
- static const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- __ movl(scratch, hash);
- if (i > 0) {
- __ addl(scratch, Immediate(StringTable::GetProbeOffset(i)));
- }
- __ andl(scratch, mask);
-
- // Load the entry from the string table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ movq(candidate,
- FieldOperand(string_table,
- scratch,
- times_pointer_size,
- StringTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ CmpObjectType(candidate, ODDBALL_TYPE, map);
- __ j(not_equal, &is_string, Label::kNear);
-
- __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
- __ j(equal, not_found);
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ cmpq(kScratchRegister, candidate);
- __ Assert(equal, kOddballInStringTableIsNotUndefinedOrTheHole);
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // If length is not 2 the string is not a candidate.
- __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
- Smi::FromInt(2));
- __ j(not_equal, &next_probe[i]);
-
- // We use kScratchRegister as a temporary register in assumption that
- // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
- Register temp = kScratchRegister;
-
- // Check that the candidate is a non-external ASCII string.
- __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe[i]);
-
- // Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
- __ andl(temp, Immediate(0x0000ffff));
- __ cmpl(chars, temp);
- __ j(equal, &found_in_string_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- if (!result.is(rax)) {
- __ movq(rax, result);
- }
-}
-
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
@@ -4110,7 +3308,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
ARGUMENTS_DONT_CONTAIN_RECEIVER);
// Make sure first argument is a string.
- __ movq(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
+ __ movp(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
STATIC_ASSERT(kSmiTag == 0);
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
@@ -4120,8 +3318,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: string
// rbx: instance type
// Calculate length of sub string using the smi values.
- __ movq(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
- __ movq(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
+ __ movp(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
+ __ movp(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
__ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
@@ -4163,24 +3361,24 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
Heap::kempty_stringRootIndex);
__ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
+ __ movp(rdi, FieldOperand(rax, ConsString::kFirstOffset));
// Update instance type.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
__ jmp(&underlying_unpacked, Label::kNear);
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
__ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
+ __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
// Update instance type.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
__ jmp(&underlying_unpacked, Label::kNear);
__ bind(&seq_or_external_string);
// Sequential or external string. Just move string to the correct register.
- __ movq(rdi, rax);
+ __ movp(rdi, rax);
__ bind(&underlying_unpacked);
@@ -4204,24 +3402,18 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
- // Make long jumps when allocations tracking is on due to
- // RecordObjectAllocation inside MacroAssembler::Allocate.
- Label::Distance jump_distance =
- masm->isolate()->heap_profiler()->is_tracking_allocations()
- ? Label::kFar
- : Label::kNear;
- __ j(zero, &two_byte_slice, jump_distance);
+ __ j(zero, &two_byte_slice, Label::kNear);
__ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
- __ jmp(&set_slice_header, jump_distance);
+ __ jmp(&set_slice_header, Label::kNear);
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
__ bind(&set_slice_header);
__ Integer32ToSmi(rcx, rcx);
- __ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
- __ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
+ __ movp(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
+ __ movp(FieldOperand(rax, SlicedString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
- __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
- __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
+ __ movp(FieldOperand(rax, SlicedString::kParentOffset), rdi);
+ __ movp(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
@@ -4245,7 +3437,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_CHECK(kShortExternalStringTag != 0);
__ testb(rbx, Immediate(kShortExternalStringMask));
__ j(not_zero, &runtime);
- __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+ __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -4260,7 +3452,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: result string
// rcx: result string length
- __ movq(r14, rsi); // esi used by following code.
+ __ movp(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
@@ -4275,7 +3467,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rsi: character of sub string start
// r14: original value of rsi
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
- __ movq(rsi, r14); // Restore rsi.
+ __ movp(rsi, r14); // Restore rsi.
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
@@ -4285,7 +3477,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: result string
// rcx: result string length
- __ movq(r14, rsi); // esi used by following code.
+ __ movp(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
@@ -4300,7 +3492,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rsi: character of sub string start
// r14: original value of rsi
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
- __ movq(rsi, r14); // Restore esi.
+ __ movp(rsi, r14); // Restore esi.
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
@@ -4330,7 +3522,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare lengths.
Label check_zero_length;
- __ movq(length, FieldOperand(left, String::kLengthOffset));
+ __ movp(length, FieldOperand(left, String::kLengthOffset));
__ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
__ j(equal, &check_zero_length, Label::kNear);
__ Move(rax, Smi::FromInt(NOT_EQUAL));
@@ -4374,8 +3566,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
// Find minimum length and length difference.
- __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
- __ movq(scratch4, scratch1);
+ __ movp(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ movp(scratch4, scratch1);
__ SmiSub(scratch4,
scratch4,
FieldOperand(right, String::kLengthOffset));
@@ -4399,7 +3591,10 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare loop.
Label result_not_equal;
GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal, Label::kNear);
+ &result_not_equal,
+ // In debug-code mode, SmiTest below might push
+ // the target label outside the near range.
+ Label::kFar);
// Completed loop without finding different characters.
// Compare lengths (precomputed).
@@ -4472,8 +3667,8 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// rsp[16] : left string
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rdx, args.GetArgumentOperand(0)); // left
- __ movq(rax, args.GetArgumentOperand(1)); // right
+ __ movp(rdx, args.GetArgumentOperand(0)); // left
+ __ movp(rax, args.GetArgumentOperand(1)); // right
// Check for identity.
Label not_same;
@@ -4504,6 +3699,231 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
+void ArrayPushStub::Generate(MacroAssembler* masm) {
+ int argc = arguments_count();
+
+ StackArgumentsAccessor args(rsp, argc);
+ if (argc == 0) {
+ // Noop, return the length.
+ __ movp(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ ret((argc + 1) * kPointerSize);
+ return;
+ }
+
+ Isolate* isolate = masm->isolate();
+
+ if (argc != 1) {
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ Label call_builtin, attempt_to_grow_elements, with_write_barrier;
+
+ // Get the elements array of the object.
+ __ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ // Check that the elements are in fast mode and writable.
+ __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
+ isolate->factory()->fixed_array_map());
+ __ j(not_equal, &call_builtin);
+ }
+
+ // Get the array's length into rax and calculate new length.
+ __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+ STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
+ __ addl(rax, Immediate(argc));
+
+ // Get the elements' length into rcx.
+ __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ cmpl(rax, rcx);
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ __ j(greater, &attempt_to_grow_elements);
+
+ // Check if value is a smi.
+ __ movp(rcx, args.GetArgumentOperand(1));
+ __ JumpIfNotSmi(rcx, &with_write_barrier);
+
+ // Store the value.
+ __ movp(FieldOperand(rdi,
+ rax,
+ times_pointer_size,
+ FixedArray::kHeaderSize - argc * kPointerSize),
+ rcx);
+ } else {
+ __ j(greater, &call_builtin);
+
+ __ movp(rcx, args.GetArgumentOperand(1));
+ __ StoreNumberToDoubleElements(
+ rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
+ }
+
+ // Save new length.
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
+ __ Integer32ToSmi(rax, rax); // Return new length as smi.
+ __ ret((argc + 1) * kPointerSize);
+
+ if (IsFastDoubleElementsKind(elements_kind())) {
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ __ bind(&with_write_barrier);
+
+ if (IsFastSmiElementsKind(elements_kind())) {
+ if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
+
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
+ isolate->factory()->heap_number_map());
+ __ j(equal, &call_builtin);
+
+ ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
+ ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ __ movp(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
+ __ movp(rbx, ContextOperand(rbx, Context::JS_ARRAY_MAPS_INDEX));
+ const int header_size = FixedArrayBase::kHeaderSize;
+ // Verify that the object can be transitioned in place.
+ const int origin_offset = header_size + elements_kind() * kPointerSize;
+ __ movp(rdi, FieldOperand(rbx, origin_offset));
+ __ cmpq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ j(not_equal, &call_builtin);
+
+ const int target_offset = header_size + target_kind * kPointerSize;
+ __ movp(rbx, FieldOperand(rbx, target_offset));
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, DONT_TRACK_ALLOCATION_SITE, NULL);
+ __ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
+ }
+
+ // Save new length.
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
+ // Store the value.
+ __ lea(rdx, FieldOperand(rdi,
+ rax, times_pointer_size,
+ FixedArray::kHeaderSize - argc * kPointerSize));
+ __ movp(Operand(rdx, 0), rcx);
+
+ __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ Integer32ToSmi(rax, rax); // Return new length as smi.
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&attempt_to_grow_elements);
+ if (!FLAG_inline_new) {
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ __ movp(rbx, args.GetArgumentOperand(1));
+ // Growing elements that are SMI-only requires special handling in case the
+ // new element is non-Smi. For now, delegate to the builtin.
+ Label no_fast_elements_check;
+ __ JumpIfSmi(rbx, &no_fast_elements_check);
+ __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
+ __ bind(&no_fast_elements_check);
+
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate);
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate);
+
+ const int kAllocationDelta = 4;
+ ASSERT(kAllocationDelta >= argc);
+ // Load top.
+ __ Load(rcx, new_space_allocation_top);
+
+ // Check if it's the end of elements.
+ __ lea(rdx, FieldOperand(rdi,
+ rax, times_pointer_size,
+ FixedArray::kHeaderSize - argc * kPointerSize));
+ __ cmpq(rdx, rcx);
+ __ j(not_equal, &call_builtin);
+ __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
+ Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit);
+ __ cmpq(rcx, limit_operand);
+ __ j(above, &call_builtin);
+
+ // We fit and could grow elements.
+ __ Store(new_space_allocation_top, rcx);
+
+ // Push the argument...
+ __ movp(Operand(rdx, 0), rbx);
+ // ... and fill the rest with holes.
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < kAllocationDelta; i++) {
+ __ movp(Operand(rdx, i * kPointerSize), kScratchRegister);
+ }
+
+ if (IsFastObjectElementsKind(elements_kind())) {
+ // We know the elements array is in new space so we don't need the
+ // remembered set, but we just pushed a value onto it so we may have to tell
+ // the incremental marker to rescan the object that we just grew. We don't
+ // need to worry about the holes because they are in old space and already
+ // marked black.
+ __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
+ }
+
+ // Restore receiver to rdx as finish sequence assumes it's here.
+ __ movp(rdx, args.GetReceiverOperand());
+
+ // Increment element's and array's sizes.
+ __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
+ Smi::FromInt(kAllocationDelta));
+
+ // Make new length a smi before returning it.
+ __ Integer32ToSmi(rax, rax);
+ __ movp(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rdx : left
+ // -- rax : right
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ // Load rcx with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ Move(rcx, handle(isolate->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ testb(rcx, Immediate(kSmiTagMask));
+ __ Assert(not_equal, kExpectedAllocationSite);
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
+ isolate->factory()->allocation_site_map());
+ __ Assert(equal, kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(state_);
+ __ TailCallStub(&stub);
+}
+
+
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMI);
Label miss;
@@ -4519,7 +3939,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
// Correct sign of result in case of overflow.
__ not_(rdx);
__ bind(&done);
- __ movq(rax, rdx);
+ __ movp(rax, rdx);
}
__ ret(0);
@@ -4545,7 +3965,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
// Load left and right operand.
Label done, left, left_smi, right_smi;
__ JumpIfSmi(rax, &right_smi, Label::kNear);
- __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ CompareMap(rax, masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
@@ -4555,7 +3975,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&left);
__ JumpIfSmi(rdx, &left_smi, Label::kNear);
- __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ jmp(&done);
@@ -4621,8 +4041,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ j(cond, &miss, Label::kNear);
// Check that both operands are internalized strings.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -4665,8 +4085,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
@@ -4710,11 +4130,11 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Check that both operands are strings. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- __ movq(tmp3, tmp1);
+ __ movp(tmp3, tmp1);
STATIC_ASSERT(kNotStringTag != 0);
__ or_(tmp3, tmp2);
__ testb(tmp3, Immediate(kIsNotStringMask));
@@ -4803,8 +4223,8 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
- __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ Cmp(rcx, known_map_);
__ j(not_equal, &miss, Label::kNear);
__ Cmp(rbx, known_map_);
@@ -4872,7 +4292,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Register entity_name = r0;
// Having undefined at this place means the name is not contained.
ASSERT_EQ(kSmiTagSize, 1);
- __ movq(entity_name, Operand(properties,
+ __ movp(entity_name, Operand(properties,
index,
times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
@@ -4889,7 +4309,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ j(equal, &good, Label::kNear);
// Check if the entry name is not a unique name.
- __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
__ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
miss);
__ bind(&good);
@@ -4990,7 +4410,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
kPointerSize);
for (int i = kInlinedProbes; i < kTotalProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movq(scratch, args.GetArgumentOperand(1));
+ __ movp(scratch, args.GetArgumentOperand(1));
if (i > 0) {
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
@@ -5001,7 +4421,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
- __ movq(scratch, Operand(dictionary_,
+ __ movp(scratch, Operand(dictionary_,
index_,
times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
@@ -5019,7 +4439,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// key we are looking for.
// Check if the entry name is not a unique name.
- __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
&maybe_in_dictionary);
}
@@ -5030,108 +4450,29 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
if (mode_ == POSITIVE_LOOKUP) {
- __ movq(scratch, Immediate(0));
+ __ movp(scratch, Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
}
__ bind(&in_dictionary);
- __ movq(scratch, Immediate(1));
+ __ movp(scratch, Immediate(1));
__ Drop(1);
__ ret(2 * kPointerSize);
__ bind(&not_in_dictionary);
- __ movq(scratch, Immediate(0));
+ __ movp(scratch, Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField and
- // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
- // GenerateStoreField calls the stub with two different permutations of
- // registers. This is the second.
- { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
- // StoreIC::GenerateNormal via GenerateDictionaryStore.
- { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
- // KeyedStoreIC::GenerateGeneric.
- { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
- { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
- { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
- // StoreArrayLiteralElementStub::Generate
- { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub::Generate and
- // StringAddStub::Generate
- { REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
- // StringAddStub::Generate
- { REG(rcx), REG(rax), REG(rbx), EMIT_REMEMBERED_SET},
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
+ stub2.GetCode(isolate);
}
@@ -5185,7 +4526,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
__ JumpIfNotInNewSpace(regs_.scratch0(),
regs_.scratch0(),
&dont_need_remembered_set);
@@ -5259,13 +4600,13 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_object;
- __ movq(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
+ __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
__ and_(regs_.scratch0(), regs_.object());
- __ movq(regs_.scratch1(),
+ __ movp(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ subq(regs_.scratch1(), Immediate(1));
- __ movq(Operand(regs_.scratch0(),
+ __ movp(Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset),
regs_.scratch1());
__ j(negative, &need_incremental);
@@ -5292,7 +4633,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
__ bind(&on_black);
// Get the value from the slot.
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
@@ -5361,9 +4702,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Get array literal index, array literal and its map.
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rdx, args.GetArgumentOperand(1));
- __ movq(rbx, args.GetArgumentOperand(0));
- __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
+ __ movp(rdx, args.GetArgumentOperand(1));
+ __ movp(rbx, args.GetArgumentOperand(0));
+ __ movp(rdi, FieldOperand(rbx, JSObject::kMapOffset));
__ CheckFastElements(rdi, &double_elements);
@@ -5379,7 +4720,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ push(rbx);
__ push(rcx);
__ push(rax);
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ push(rdx);
__ PushReturnAddressFrom(rdi);
@@ -5388,10 +4729,10 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ SmiToInteger32(kScratchRegister, rcx);
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
__ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
FixedArrayBase::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
+ __ movp(Operand(rcx, 0), rax);
// Update the write barrier for the array store.
__ RecordWrite(rbx, rcx, rax,
kDontSaveFPRegs,
@@ -5403,15 +4744,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// FAST_*_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ SmiToInteger32(kScratchRegister, rcx);
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(FieldOperand(rbx, kScratchRegister, times_pointer_size,
FixedArrayBase::kHeaderSize), rax);
__ ret(0);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
- __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(r9, FieldOperand(rbx, JSObject::kElementsOffset));
__ SmiToInteger32(r11, rcx);
__ StoreNumberToDoubleElements(rax,
r9,
@@ -5427,7 +4768,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ movq(rbx, MemOperand(rbp, parameter_count_offset));
+ __ movp(rbx, MemOperand(rbp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ PopReturnAddressTo(rcx);
int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
@@ -5440,10 +4781,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- // It's always safe to call the entry hook stub, as the hook itself
- // is not allowed to call back to V8.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
ProfileEntryHookStub stub;
masm->CallStub(&stub);
}
@@ -5458,18 +4795,19 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ push(arg_reg_2);
// Calculate the original stack pointer and store it in the second arg.
- __ lea(arg_reg_2, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
+ __ lea(arg_reg_2,
+ Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
// Calculate the function address to the first arg.
- __ movq(arg_reg_1, Operand(rsp, kNumSavedRegisters * kPointerSize));
+ __ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
__ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
// Save the remainder of the volatile registers.
masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
// Call the entry hook function.
- __ movq(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
- RelocInfo::NONE64);
+ __ Move(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
+ Assembler::RelocInfoNone());
AllowExternalCallThatCantCauseGC scope(masm);
@@ -5490,9 +4828,7 @@ template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(),
- CONTEXT_CHECK_REQUIRED,
- mode);
+ T stub(GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -5517,7 +4853,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- // rbx - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // rbx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// rdx - kind (if mode != DISABLE_ALLOCATION_SITES)
// rax - number of arguments
// rdi - constructor?
@@ -5543,7 +4879,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(0));
+ __ movp(rcx, args.GetArgumentOperand(0));
__ testq(rcx, rcx);
__ j(zero, &normal_sequence);
@@ -5552,31 +4888,31 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind holey_initial = GetHoleyElementsKind(initial);
ArraySingleArgumentConstructorStub stub_holey(holey_initial,
- CONTEXT_CHECK_REQUIRED,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
ArraySingleArgumentConstructorStub stub(initial,
- CONTEXT_CHECK_REQUIRED,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ incl(rdx);
- __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+
if (FLAG_debug_code) {
Handle<Map> allocation_site_map =
masm->isolate()->factory()->allocation_site_map();
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ Assert(equal, kExpectedAllocationSiteInCell);
+ __ Cmp(FieldOperand(rbx, 0), allocation_site_map);
+ __ Assert(equal, kExpectedAllocationSite);
}
- // Save the resulting elements kind in type info
- __ Integer32ToSmi(rdx, rdx);
- __ movq(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset), rdx);
- __ SmiToInteger32(rdx, rdx);
+ // Save the resulting elements kind in type info. We can't just store r3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ SmiAddConstant(FieldOperand(rbx, AllocationSite::kTransitionInfoOffset),
+ Smi::FromInt(kFastElementsKindPackedToHoley));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -5601,20 +4937,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- ElementsKind initial_kind = GetInitialFastElementsKind();
- ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
-
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
- (!FLAG_track_allocation_sites &&
- (kind == initial_kind || kind == initial_holey_kind))) {
- T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode(isolate);
}
}
}
@@ -5636,11 +4967,11 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ stubh1.GetCode(isolate);
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ stubh2.GetCode(isolate);
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ stubh3.GetCode(isolate);
}
}
@@ -5676,7 +5007,8 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : type info cell
+ // -- rbx : feedback vector (fixed array or undefined)
+ // -- rdx : slot index (if ebx is fixed array)
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
@@ -5690,7 +5022,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
@@ -5698,28 +5030,38 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(rcx, MAP_TYPE, rcx);
__ Check(equal, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in rbx or a valid cell
+ // We should either have undefined in rbx or a valid fixed array.
Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
+ Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &okay_here);
- __ Cmp(FieldOperand(rbx, 0), cell_map);
- __ Assert(equal, kExpectedPropertyCellInRegisterRbx);
+ __ Cmp(FieldOperand(rbx, 0), fixed_array_map);
+ __ Assert(equal, kExpectedFixedArrayInRegisterRbx);
+
+ // rdx should be a smi if we don't have undefined in rbx.
+ __ AssertSmi(rdx);
+
__ bind(&okay_here);
}
Label no_info;
- // If the type cell is undefined, or contains anything other than an
+ // If the feedback slot is undefined, or contains anything other than an
// AllocationSite, call an array constructor that doesn't use AllocationSites.
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &no_info);
- __ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
- __ Cmp(FieldOperand(rdx, 0),
+ __ SmiToInteger32(rdx, rdx);
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ Integer32ToSmi(rdx, rdx);
+ __ Cmp(FieldOperand(rbx, 0),
masm->isolate()->factory()->allocation_site_map());
__ j(not_equal, &no_info);
- __ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
+ // Only look at the lower 16 bits of the transition info.
+ __ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ and_(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
@@ -5745,7 +5087,7 @@ void InternalArrayConstructorStub::GenerateCase(
// We might need to create a holey array
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(0));
+ __ movp(rcx, args.GetArgumentOperand(0));
__ testq(rcx, rcx);
__ j(zero, &normal_sequence);
@@ -5767,7 +5109,6 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : type info cell
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
@@ -5778,7 +5119,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
@@ -5788,7 +5129,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
// Figure out the right elements kind
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
@@ -5817,6 +5158,183 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : callee
+ // -- rbx : call_data
+ // -- rcx : holder
+ // -- rdx : api_function_address
+ // -- rsi : context
+ // --
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -- ...
+ // -- rsp[argc * 8] : first argument
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ Register callee = rax;
+ Register call_data = rbx;
+ Register holder = rcx;
+ Register api_function_address = rdx;
+ Register return_address = rdi;
+ Register context = rsi;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ __ PopReturnAddressTo(return_address);
+
+ // context save
+ __ push(context);
+ // load context from callee
+ __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
+
+ // callee
+ __ push(callee);
+
+ // call data
+ __ push(call_data);
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // return value
+ __ push(scratch);
+ // return value default
+ __ push(scratch);
+ // isolate
+ __ Move(scratch,
+ ExternalReference::isolate_address(masm->isolate()));
+ __ push(scratch);
+ // holder
+ __ push(holder);
+
+ __ movp(scratch, rsp);
+ // Push return address back on stack.
+ __ PushReturnAddressFrom(return_address);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ __ PrepareCallApiFunction(kApiStackSpace);
+
+ // FunctionCallbackInfo::implicit_args_.
+ __ movp(StackSpaceOperand(0), scratch);
+ __ addq(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
+ __ movp(StackSpaceOperand(1), scratch); // FunctionCallbackInfo::values_.
+ __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Set(StackSpaceOperand(3), 0);
+
+#if defined(__MINGW64__) || defined(_WIN64)
+ Register arguments_arg = rcx;
+ Register callback_arg = rdx;
+#else
+ Register arguments_arg = rdi;
+ Register callback_arg = rsi;
+#endif
+
+ // It's okay if api_function_address == callback_arg
+ // but not arguments_arg
+ ASSERT(!api_function_address.is(arguments_arg));
+
+ // v8::InvocationCallback's argument.
+ __ lea(arguments_arg, StackSpaceOperand(0));
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+
+ // Accessor for FunctionCallbackInfo and first js arg.
+ StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
+ FCA::kArgsLength - FCA::kContextSaveIndex);
+ // Stores return the first js argument
+ Operand return_value_operand = args_from_rbp.GetArgumentOperand(
+ is_store ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
+ __ CallApiFunctionAndReturn(
+ api_function_address,
+ thunk_address,
+ callback_arg,
+ argc + FCA::kArgsLength + 1,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16 - kArgsLength*8] : PropertyCallbackArguments object
+ // -- ...
+ // -- r8 : api_function_address
+ // -----------------------------------
+
+#if defined(__MINGW64__) || defined(_WIN64)
+ Register getter_arg = r8;
+ Register accessor_info_arg = rdx;
+ Register name_arg = rcx;
+#else
+ Register getter_arg = rdx;
+ Register accessor_info_arg = rsi;
+ Register name_arg = rdi;
+#endif
+ Register api_function_address = r8;
+ Register scratch = rax;
+
+ // v8::Arguments::values_ and handler for name.
+ const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Allocate v8::AccessorInfo in non-GCed stack space.
+ const int kArgStackSpace = 1;
+
+ __ lea(name_arg, Operand(rsp, kPCOnStackSize));
+
+ __ PrepareCallApiFunction(kArgStackSpace);
+ __ lea(scratch, Operand(name_arg, 1 * kPointerSize));
+
+ // v8::PropertyAccessorInfo::args_.
+ __ movp(StackSpaceOperand(0), scratch);
+
+ // The context register (rsi) has been saved in PrepareCallApiFunction and
+ // could be used to pass arguments.
+ __ lea(accessor_info_arg, StackSpaceOperand(0));
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+
+ // It's okay if api_function_address == getter_arg
+ // but not accessor_info_arg or name_arg
+ ASSERT(!api_function_address.is(accessor_info_arg) &&
+ !api_function_address.is(name_arg));
+
+ // The name handler is counted as an argument.
+ StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
+ Operand return_value_operand = args.GetArgumentOperand(
+ PropertyCallbackArguments::kArgsLength - 1 -
+ PropertyCallbackArguments::kReturnValueOffset);
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_address,
+ getter_arg,
+ kStackSpace,
+ return_value_operand,
+ NULL);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index c76abcf001..c65307a74e 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -29,7 +29,6 @@
#define V8_X64_CODE_STUBS_X64_H_
#include "ic-inl.h"
-#include "type-info.h"
namespace v8 {
namespace internal {
@@ -37,31 +36,6 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- explicit TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) {}
- void Generate(MacroAssembler* masm);
- static void GenerateOperation(MacroAssembler* masm,
- TranscendentalCache::Type type);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
@@ -69,7 +43,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -83,16 +56,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii);
-
// Generate code for copying characters using the rep movs instruction.
// Copies rcx characters from rsi to rdi. Copying of overlapping regions is
// not supported.
@@ -103,19 +66,6 @@ class StringHelper : public AllStatic {
bool ascii);
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register rax.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found);
-
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
@@ -134,31 +84,6 @@ class StringHelper : public AllStatic {
};
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow);
-
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateRegisterArgsPop(MacroAssembler* masm, Register temp);
-
- const StringAddFlags flags_;
-};
-
-
class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -293,8 +218,6 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
@@ -391,11 +314,11 @@ class RecordWriteStub: public PlatformCodeStub {
masm->push(scratch1_);
if (!address_.is(address_orig_)) {
masm->push(address_);
- masm->movq(address_, address_orig_);
+ masm->movp(address_, address_orig_);
}
if (!object_.is(object_orig_)) {
masm->push(object_);
- masm->movq(object_, object_orig_);
+ masm->movp(object_, object_orig_);
}
}
@@ -404,11 +327,11 @@ class RecordWriteStub: public PlatformCodeStub {
// them back. Only in one case is the orig_ reg different from the plain
// one, since only one of them can alias with rcx.
if (!object_.is(object_orig_)) {
- masm->movq(object_orig_, object_);
+ masm->movp(object_orig_, object_);
masm->pop(object_);
}
if (!address_.is(address_orig_)) {
- masm->movq(address_orig_, address_);
+ masm->movp(address_orig_, address_);
masm->pop(address_);
}
masm->pop(scratch1_);
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 390ec7c9c9..f292f7d251 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -55,55 +55,11 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) {
- // Fallback to library function if function cannot be created.
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- }
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // xmm0: raw double input.
- // Move double input into registers.
- __ push(rbx);
- __ push(rdi);
- __ movq(rbx, xmm0);
- __ push(rbx);
- __ fld_d(Operand(rsp, 0));
- TranscendentalCacheStub::GenerateOperation(&masm, type);
- // The return value is expected to be in xmm0.
- __ fstp_d(Operand(rsp, 0));
- __ pop(rbx);
- __ movq(xmm0, rbx);
- __ pop(rdi);
- __ pop(rbx);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &exp;
+ if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
+ if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -136,7 +92,7 @@ UnaryMathFunction CreateSqrtFunction() {
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
- if (buffer == NULL) return &sqrt;
+ if (buffer == NULL) return &std::sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// xmm0: raw double input.
@@ -175,10 +131,10 @@ ModuloFunction CreateModuloFunction() {
// Compute x mod y.
// Load y and x (use argument backing store as temporary storage).
- __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
- __ movsd(Operand(rsp, kPointerSize), xmm0);
- __ fld_d(Operand(rsp, kPointerSize * 2));
- __ fld_d(Operand(rsp, kPointerSize));
+ __ movsd(Operand(rsp, kRegisterSize * 2), xmm1);
+ __ movsd(Operand(rsp, kRegisterSize), xmm0);
+ __ fld_d(Operand(rsp, kRegisterSize * 2));
+ __ fld_d(Operand(rsp, kRegisterSize));
// Clear exception flags before operation.
{
@@ -213,15 +169,15 @@ ModuloFunction CreateModuloFunction() {
__ j(zero, &valid_result);
__ fstp(0); // Drop result in st(0).
int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
- __ movq(rcx, kNaNValue, RelocInfo::NONE64);
- __ movq(Operand(rsp, kPointerSize), rcx);
- __ movsd(xmm0, Operand(rsp, kPointerSize));
+ __ movq(rcx, kNaNValue);
+ __ movq(Operand(rsp, kRegisterSize), rcx);
+ __ movsd(xmm0, Operand(rsp, kRegisterSize));
__ jmp(&return_result);
// If result is valid, return that.
__ bind(&valid_result);
- __ fstp_d(Operand(rsp, kPointerSize));
- __ movsd(xmm0, Operand(rsp, kPointerSize));
+ __ fstp_d(Operand(rsp, kRegisterSize));
+ __ movsd(xmm0, Operand(rsp, kRegisterSize));
// Clean up FPU stack and exceptions and return xmm0
__ bind(&return_result);
@@ -267,7 +223,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
@@ -296,7 +252,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
@@ -312,18 +268,18 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// the same size.
__ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
- __ movq(r14, r8); // Destination array equals source array.
+ __ movp(r14, r8); // Destination array equals source array.
// r8 : source FixedArray
// r9 : elements array length
// r14: destination FixedDoubleArray
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+ __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
__ bind(&allocated);
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
@@ -338,7 +294,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
Label loop, entry, convert_hole;
- __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
+ __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64));
// r15: the-hole NaN
__ jmp(&entry);
@@ -348,10 +304,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+ __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
// Set receiver's backing store.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
- __ movq(r11, r14);
+ __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
+ __ movp(r11, r14);
__ RecordWriteField(rdx,
JSObject::kElementsOffset,
r11,
@@ -361,12 +317,12 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
OMIT_SMI_CHECK);
// Set backing store's length.
__ Integer32ToSmi(r11, r9);
- __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
+ __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
__ jmp(&allocated);
__ bind(&only_change_map);
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
@@ -378,7 +334,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Conversion loop.
__ bind(&loop);
- __ movq(rbx,
+ __ movp(rbx,
FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
// r9 : current element's index
// rbx: current element (smi-tagged)
@@ -421,13 +377,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
__ push(rax);
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
// r8 : source FixedDoubleArray
// r9 : number of elements
@@ -435,12 +391,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
// r11: destination FixedArray
__ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
+ __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
__ Integer32ToSmi(r14, r9);
- __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
+ __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
// Prepare for conversion loop.
- __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
+ __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
__ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
// rsi: the-hole NaN
// rdi: pointer to the-hole
@@ -449,7 +405,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Call into runtime if GC is required.
__ bind(&gc_required);
__ pop(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(fail);
// Box doubles into heap numbers.
@@ -466,13 +422,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(rax, r15, &gc_required);
// rax: new heap number
- __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), r14);
- __ movq(FieldOperand(r11,
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
+ __ movp(FieldOperand(r11,
r9,
times_pointer_size,
FixedArray::kHeaderSize),
rax);
- __ movq(r15, r9);
+ __ movp(r15, r9);
__ RecordWriteArray(r11,
rax,
r15,
@@ -483,7 +439,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
- __ movq(FieldOperand(r11,
+ __ movp(FieldOperand(r11,
r9,
times_pointer_size,
FixedArray::kHeaderSize),
@@ -494,7 +450,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ j(not_sign, &loop);
// Replace receiver's backing store with newly created and filled FixedArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
+ __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
__ RecordWriteField(rdx,
JSObject::kElementsOffset,
r11,
@@ -503,11 +459,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ pop(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&only_change_map);
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
@@ -524,7 +480,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register result,
Label* call_runtime) {
// Fetch the instance type of the receiver into result register.
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
@@ -541,7 +497,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Label indirect_string_loaded;
__ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
__ addq(index, result);
- __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
+ __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
__ jmp(&indirect_string_loaded, Label::kNear);
// Handle cons strings.
@@ -553,10 +509,10 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
Heap::kempty_stringRootIndex);
__ j(not_equal, call_runtime);
- __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+ __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
__ bind(&indirect_string_loaded);
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
// Distinguish sequential and external strings. Only these two string
@@ -583,7 +539,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Check encoding.
STATIC_ASSERT(kTwoByteStringTag == 0);
__ testb(result, Immediate(kStringEncodingMask));
- __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+ __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
__ j(not_equal, &ascii_external, Label::kNear);
// Two-byte string.
__ movzxwl(result, Operand(result, index, times_2, 0));
@@ -635,7 +591,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
Label done;
- __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
+ __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
__ xorpd(result, result);
__ ucomisd(double_scratch, input);
@@ -654,10 +610,10 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ and_(temp2, Immediate(0x7ff));
__ shr(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
- __ movq(kScratchRegister, ExternalReference::math_exp_log_table());
+ __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
__ shl(temp1, Immediate(52));
__ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
- __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
+ __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ subsd(double_scratch, input);
__ movsd(input, double_scratch);
__ subsd(result, double_scratch);
@@ -685,7 +641,7 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
// FUNCTION and OPTIMIZED_FUNCTION code:
CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
patcher.masm()->push(rbp);
- patcher.masm()->movq(rbp, rsp);
+ patcher.masm()->movp(rbp, rsp);
patcher.masm()->push(rsi);
patcher.masm()->push(rdi);
initialized = true;
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 811ac507d5..e637ff0061 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -34,45 +34,9 @@
namespace v8 {
namespace internal {
-// Forward declarations
-class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- explicit CodeGenerator(Isolate* isolate) {
- InitializeAstVisitor(isolate);
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info, const char* kind);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
class StringCharLoadGenerator : public AllStatic {
public:
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 6612242a03..938703ef3e 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -132,7 +132,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ Set(rax, 0); // No arguments (argc == 0).
- __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
+ __ Move(rbx, ExternalReference::debug_break(masm->isolate()));
CEntryStub ceb(1);
__ CallStub(&ceb);
@@ -164,7 +164,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ addq(rsp, Immediate(kPointerSize));
+ __ addq(rsp, Immediate(kPCOnStackSize));
}
// Now that the break point has been handled, resume normal execution by
@@ -172,8 +172,8 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
- __ movq(kScratchRegister, after_break_target);
- __ jmp(Operand(kScratchRegister, 0));
+ __ Move(kScratchRegister, after_break_target);
+ __ Jump(Operand(kScratchRegister, 0));
}
@@ -261,9 +261,11 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-x64.cc).
// ----------- S t a t e -------------
// -- rdi : function
- // -- rbx: cache cell for call target
+ // -- rbx: feedback array
+ // -- rdx: slot in feedback array
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), 0, false);
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
+ 0, false);
}
@@ -285,10 +287,12 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// above IC call.
// ----------- S t a t e -------------
// -- rax: number of arguments
- // -- rbx: cache cell for call target
+ // -- rbx: feedback array
+ // -- rdx: feedback slot (smi)
// -----------------------------------
// The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), rax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
+ rax.bit(), false);
}
@@ -319,8 +323,8 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
masm->isolate());
- __ movq(rax, restarter_frame_function_slot);
- __ movq(Operand(rax, 0), Immediate(0));
+ __ Move(rax, restarter_frame_function_slot);
+ __ movp(Operand(rax, 0), Immediate(0));
// We do not know our frame height, but set rsp based on rbp.
__ lea(rsp, Operand(rbp, -1 * kPointerSize));
@@ -329,11 +333,11 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ pop(rbp);
// Load context from the function.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Get function code.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
// Re-run JSFunction, rdi is function, rsi is context.
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index bc51c54099..aee8be6e1b 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -71,7 +71,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// LLazyBailout instructions with nops if necessary.
CodePatcher patcher(call_address, Assembler::kCallSequenceLength);
patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY),
- RelocInfo::NONE64);
+ Assembler::RelocInfoNone());
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
@@ -97,7 +97,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+ input_->SetFrameSlot(i, Memory::uintptr_at(tos + i));
}
}
@@ -106,7 +106,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
intptr_t handler =
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
- int params = descriptor->environment_length();
+ int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(rax.code(), params);
output_frame->SetRegister(rbx.code(), handler);
}
@@ -156,7 +156,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ push(r);
}
- const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
+ const int kSavedRegistersAreaSize = kNumberOfRegisters * kRegisterSize +
kDoubleRegsSize;
// We use this to keep the value of the fifth argument temporarily.
@@ -165,32 +165,32 @@ void Deoptimizer::EntryGenerator::Generate() {
Register arg5 = r11;
// Get the bailout id from the stack.
- __ movq(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
+ __ movp(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
- __ movq(arg_reg_4,
- Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
+ __ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize));
+ __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
+ kPCOnStackSize));
__ subq(arg5, rbp);
__ neg(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(arg_reg_1, rax);
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(arg_reg_1, rax);
__ Set(arg_reg_2, type());
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9.
#ifdef _WIN64
- __ movq(Operand(rsp, 4 * kPointerSize), arg5);
+ __ movq(Operand(rsp, 4 * kRegisterSize), arg5);
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate()));
- __ movq(Operand(rsp, 5 * kPointerSize), arg5);
+ __ movq(Operand(rsp, 5 * kRegisterSize), arg5);
#else
- __ movq(r8, arg5);
+ __ movp(r8, arg5);
__ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#endif
@@ -199,7 +199,7 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
- __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
+ __ movp(rbx, Operand(rax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
@@ -215,11 +215,11 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Remove the bailout id and return address from the stack.
- __ addq(rsp, Immediate(2 * kPointerSize));
+ __ addq(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ addq(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
@@ -239,7 +239,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Compute the output frame in the deoptimizer.
__ push(rax);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, rax);
+ __ movp(arg_reg_1, rax);
__ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate()));
{
AllowExternalCallThatCantCauseGC scope(masm());
@@ -254,13 +254,13 @@ void Deoptimizer::EntryGenerator::Generate() {
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
- __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
+ __ movp(rax, Operand(rax, Deoptimizer::output_offset()));
__ lea(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
- __ movq(rbx, Operand(rax, 0));
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ movp(rbx, Operand(rax, 0));
+ __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 7735b552fe..476eab2b42 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -934,6 +934,7 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
+ case 0xFC: mnem = "frndint"; break;
case 0xFD: mnem = "fscale"; break;
case 0xFE: mnem = "fsin"; break;
case 0xFF: mnem = "fcos"; break;
@@ -956,6 +957,8 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
has_register = true;
} else if (modrm_byte == 0xE2) {
mnem = "fclex";
+ } else if (modrm_byte == 0xE3) {
+ mnem = "fninit";
} else {
UnimplementedInstruction();
}
@@ -1260,19 +1263,37 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte_size_operand_ = idesc.byte_size_operation;
current += PrintOperands(idesc.mnem, idesc.op_order_, current);
- } else if (opcode == 0x54) {
- // xorps xmm, xmm/m128
+ } else if (opcode >= 0x53 && opcode <= 0x5F) {
+ const char* const pseudo_op[] = {
+ "rcpps",
+ "andps",
+ "andnps",
+ "orps",
+ "xorps",
+ "addps",
+ "mulps",
+ "cvtps2pd",
+ "cvtdq2ps",
+ "subps",
+ "minps",
+ "divps",
+ "maxps",
+ };
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("andps %s,", NameOfXMMRegister(regop));
+ AppendToBuffer("%s %s,",
+ pseudo_op[opcode - 0x53],
+ NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
- } else if (opcode == 0x57) {
- // xorps xmm, xmm/m128
+ } else if (opcode == 0xC6) {
+ // shufps xmm, xmm/m128, imm8
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("xorps %s,", NameOfXMMRegister(regop));
+ AppendToBuffer("shufps %s, ", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %d", (*current) & 3);
+ current += 1;
} else if (opcode == 0x50) {
// movmskps reg, xmm
@@ -1558,9 +1579,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else {
AppendToBuffer("mov%c ", operand_size_code());
data += PrintRightOperand(data);
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 4;
+ if (operand_size() == OPERAND_WORD_SIZE) {
+ int16_t imm = *reinterpret_cast<int16_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 2;
+ } else {
+ int32_t imm = *reinterpret_cast<int32_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 4;
+ }
}
}
break;
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index 5cc27a6e12..3154d80a60 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -40,10 +40,24 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return rbp; }
Register JavaScriptFrame::context_register() { return rsi; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
Register StubFailureTrampolineFrame::fp_register() { return rbp; }
Register StubFailureTrampolineFrame::context_register() { return rsi; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index fb17964ada..6eb02a9179 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -66,6 +66,8 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
+ static const int kFrameSize = 2 * kPointerSize;
+
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 02ba67b90e..badf18ed67 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -118,6 +118,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -132,17 +135,23 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). rcx is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
+ // Classic mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->is_classic_mode() && !info->is_native()) {
Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetReceiverOperand(), kScratchRegister);
+ __ movp(rcx, args.GetReceiverOperand());
+
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &ok, Label::kNear);
+
+ __ movp(rcx, GlobalObjectOperand());
+ __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+
+ __ movp(args.GetReceiverOperand(), rcx);
+
__ bind(&ok);
}
@@ -176,20 +185,22 @@ void FullCodeGenerator::Generate() {
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ push(rdi);
__ Push(info->scope()->GetScopeInfo());
__ CallRuntime(Runtime::kNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
+ __ push(rdi);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ // Context is returned in rax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in rsi.
+ __ movp(rsi, rax);
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
@@ -199,10 +210,10 @@ void FullCodeGenerator::Generate() {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
+ __ movp(rax, Operand(rbp, parameter_offset));
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
- __ movq(Operand(rsi, context_offset), rax);
+ __ movp(Operand(rsi, context_offset), rax);
// Update the write barrier. This clobbers rax and rbx.
__ RecordWriteContextSlot(
rsi, context_offset, rax, rbx, kDontSaveFPRegs);
@@ -302,7 +313,7 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
+ __ Move(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ SmiAddConstant(FieldOperand(rbx, Cell::kValueOffset),
Smi::FromInt(-delta));
}
@@ -310,15 +321,9 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing; if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
- RelocInfo::NONE64);
- __ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
+ __ Move(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
+ __ Move(kScratchRegister, Smi::FromInt(reset_value));
+ __ movp(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
}
@@ -327,13 +332,10 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
__ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
@@ -364,31 +366,24 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- __ push(rax);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- __ call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- }
- __ pop(rax);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ j(positive, &ok, Label::kNear);
+ __ push(rax);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ pop(rax);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
@@ -398,7 +393,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
- __ movq(rsp, rbp);
+ __ movp(rsp, rbp);
__ pop(rbp);
int no_frame_start = masm_->pc_offset();
@@ -555,7 +550,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
ASSERT(count > 0);
if (count > 1) __ Drop(count - 1);
- __ movq(Operand(rsp, 0), reg);
+ __ movp(Operand(rsp, 0), reg);
}
@@ -646,7 +641,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
+ CallIC(ic, condition->test_id());
__ testq(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -698,7 +693,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
void FullCodeGenerator::GetVar(Register dest, Variable* var) {
ASSERT(var->IsContextSlot() || var->IsStackAllocated());
MemOperand location = VarOperand(var, dest);
- __ movq(dest, location);
+ __ movp(dest, location);
}
@@ -711,7 +706,7 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch0.is(scratch1));
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
- __ movq(location, src);
+ __ movp(location, src);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
@@ -746,7 +741,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
- __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
__ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
__ Check(not_equal, kDeclarationInWithContext);
__ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
@@ -778,7 +773,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(StackOperand(variable), kScratchRegister);
+ __ movp(StackOperand(variable), kScratchRegister);
}
break;
@@ -787,7 +782,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
+ __ movp(ContextOperand(rsi, variable->index()), kScratchRegister);
// No write barrier since the hole value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -837,7 +832,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case Variable::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
- __ movq(StackOperand(variable), result_register());
+ __ movp(StackOperand(variable), result_register());
break;
}
@@ -845,7 +840,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ movq(ContextOperand(rsi, variable->index()), result_register());
+ __ movp(ContextOperand(rsi, variable->index()), result_register());
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(rsi,
@@ -882,11 +877,11 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
// Load instance object.
__ LoadContext(rax, scope_->ContextChainLength(scope_->GlobalScope()));
- __ movq(rax, ContextOperand(rax, variable->interface()->Index()));
- __ movq(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
+ __ movp(rax, ContextOperand(rax, variable->interface()->Index()));
+ __ movp(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
// Assign it.
- __ movq(ContextOperand(rsi, variable->index()), rax);
+ __ movp(ContextOperand(rsi, variable->index()), rax);
// We know that we have written a module, which is not a smi.
__ RecordWriteContextSlot(rsi,
Context::SlotOffset(variable->index()),
@@ -980,12 +975,12 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
VisitForAccumulatorValue(clause->label());
// Perform the comparison as if via '==='.
- __ movq(rdx, Operand(rsp, 0)); // Switch value.
+ __ movp(rdx, Operand(rsp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ movq(rcx, rdx);
+ __ movp(rcx, rdx);
__ or_(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
@@ -999,9 +994,18 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
+ Label skip;
+ __ jmp(&skip, Label::kNear);
+ PrepareForBailout(clause, TOS_REG);
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &next_test);
+ __ Drop(1);
+ __ jmp(clause->body_target());
+ __ bind(&skip);
+
__ testq(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
@@ -1034,6 +1038,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1078,7 +1083,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
Label use_cache;
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
@@ -1104,8 +1109,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &no_descriptors);
__ LoadInstanceDescriptors(rax, rcx);
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
+ __ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ push(rax); // Map.
@@ -1122,16 +1127,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ Move(rbx, cell);
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ // No need for a write barrier, we are storing a Smi in the feedback vector.
+ __ Move(rbx, FeedbackVector());
+ __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(slot)),
+ Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
- __ movq(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
+ __ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
__ j(above, &non_proxy);
@@ -1139,33 +1145,33 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&non_proxy);
__ push(rbx); // Smi
__ push(rax); // Array
- __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+ __ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
- __ movq(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
+ __ movp(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
__ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
__ j(above_equal, loop_statement.break_label());
// Get the current entry of the array into register rbx.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movp(rbx, Operand(rsp, 2 * kPointerSize));
SmiIndex index = masm()->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rbx,
+ __ movp(rbx, FieldOperand(rbx,
index.reg,
index.scale,
FixedArray::kHeaderSize));
// Get the expected map from the stack or a smi in the
// permanent slow case into register rdx.
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+ __ movp(rdx, Operand(rsp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
Label update_each;
- __ movq(rcx, Operand(rsp, 4 * kPointerSize));
+ __ movp(rcx, Operand(rsp, 4 * kPointerSize));
__ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
@@ -1182,12 +1188,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ Cmp(rax, Smi::FromInt(0));
__ j(equal, loop_statement.continue_label());
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
// Update the 'each' property or variable from the possibly filtered
// entry in register rbx.
__ bind(&update_each);
- __ movq(result_register(), rbx);
+ __ movp(result_register(), rbx);
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each());
@@ -1323,7 +1329,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ j(not_equal, slow);
}
// Load next context in chain.
- __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
@@ -1339,7 +1345,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// safe to use raw labels here.
Label next, fast;
if (!context.is(temp)) {
- __ movq(temp, context);
+ __ movp(temp, context);
}
// Load map for comparison into register, outside loop.
__ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
@@ -1351,20 +1357,19 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
- __ movq(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
+ __ movp(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
__ bind(&fast);
}
// All extension objects were empty and it is safe to use a global
// load IC call.
- __ movq(rax, GlobalObjectOperand());
+ __ movp(rax, GlobalObjectOperand());
__ Move(rcx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- CallIC(ic, mode);
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
}
@@ -1382,7 +1387,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Immediate(0));
__ j(not_equal, slow);
}
- __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
@@ -1412,7 +1417,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
__ jmp(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
- __ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
+ __ movp(rax, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == LET ||
local->mode() == CONST ||
local->mode() == CONST_HARMONY) {
@@ -1439,13 +1444,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
- __ movq(rax, GlobalObjectOperand());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ movp(rax, GlobalObjectOperand());
+ CallLoadIC(CONTEXTUAL);
context()->Plug(rax);
break;
}
@@ -1453,7 +1457,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
+ : "[ Stack slot");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1515,12 +1520,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup slot");
__ push(rsi); // Context.
__ Push(var->name());
__ CallRuntime(Runtime::kLoadContextSlot, 2);
@@ -1540,11 +1545,11 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// rcx = literals array.
// rbx = regexp literal.
// rax = regexp literal clone.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ movq(rbx, FieldOperand(rcx, literal_offset));
+ __ movp(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
@@ -1555,7 +1560,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ Push(expr->pattern());
__ Push(expr->flags());
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
@@ -1573,14 +1578,14 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// Copy the content into the newly allocated memory.
// (Unroll copy loop once for better throughput).
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
+ __ movp(rdx, FieldOperand(rbx, i));
+ __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
+ __ movp(FieldOperand(rax, i), rdx);
+ __ movp(FieldOperand(rax, i + kPointerSize), rcx);
}
if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
+ __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
+ __ movp(FieldOperand(rax, size - kPointerSize), rdx);
}
context()->Plug(rax);
}
@@ -1597,6 +1602,8 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
@@ -1609,15 +1616,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_properties);
__ Move(rdx, Smi::FromInt(flags));
@@ -1656,11 +1663,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->value());
- __ movq(rdx, Operand(rsp, 0));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
+ __ movp(rdx, Operand(rsp, 0));
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1725,6 +1729,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
@@ -1736,51 +1745,50 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
- } else if (expr->depth() > 1) {
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_elements);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_elements);
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ __ Push(Smi::FromInt(flags));
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
if (has_constant_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
@@ -1808,10 +1816,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
+ __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
// Store the subexpression value in the array's elements.
- __ movq(FieldOperand(rbx, offset), result_register());
+ __ movp(FieldOperand(rbx, offset), result_register());
// Update the write barrier for the array store.
__ RecordWriteField(rbx, offset, result_register(), rcx,
kDontSaveFPRegs,
@@ -1874,7 +1882,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForAccumulatorValue(property->key());
- __ movq(rdx, Operand(rsp, 0));
+ __ movp(rdx, Operand(rsp, 0));
__ push(rax);
} else {
VisitForStackValue(property->obj());
@@ -1974,8 +1982,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(continuation.pos()));
- __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
- __ movq(rcx, rsi);
+ __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
__ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
@@ -1983,7 +1991,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ j(equal, &post_runtime);
__ push(rax); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ movq(context_register(),
+ __ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
@@ -2042,17 +2050,17 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ jmp(&l_resume);
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + handler_size;
- __ movq(rax, Operand(rsp, generator_object_depth));
+ __ movp(rax, Operand(rsp, generator_object_depth));
__ push(rax); // g
ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(l_continuation.pos()));
- __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
- __ movq(rcx, rsi);
+ __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ movq(context_register(),
+ __ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ pop(rax); // result
EmitReturnSequence();
@@ -2068,17 +2076,23 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result = receiver[f](arg);
__ bind(&l_call);
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
- CallIC(ic);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The key is still on the stack; drop it.
+ __ movp(rdx, Operand(rsp, kPointerSize));
+ __ movp(rax, Operand(rsp, 2 * kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ movp(rdi, rax);
+ __ movp(Operand(rsp, 2 * kPointerSize), rdi);
+ CallFunctionStub stub(1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
// if (!result.done) goto l_try;
__ bind(&l_loop);
__ push(rax); // save result
__ LoadRoot(rcx, Heap::kdone_stringRootIndex); // "done"
- Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(done_ic); // result.done in rax
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in rax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ testq(result_register(), result_register());
@@ -2087,8 +2101,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result.value
__ pop(rax); // result
__ LoadRoot(rcx, Heap::kvalue_stringRootIndex); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in rax
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in rax
context()->DropAndPlug(2, rax); // drop iter and g
break;
}
@@ -2100,29 +2113,31 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in rax, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. rbx
- // will hold the generator object until the activation has been resumed.
+ // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // rbx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
__ pop(rbx);
// Check generator state.
- Label wrong_state, done;
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ Label wrong_state, closed_state, done;
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
__ SmiCompare(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(0));
- __ j(less_equal, &wrong_state);
+ __ j(equal, &closed_state);
+ __ j(less, &wrong_state);
// Load suspended function and context.
- __ movq(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
- __ movq(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+ __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+ __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
// Push receiver.
__ push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
// Push holes for arguments to generator function.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movsxlq(rdx,
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
@@ -2142,13 +2157,13 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ jmp(&done);
__ bind(&resume_frame);
__ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
+ __ movp(rbp, rsp);
__ push(rsi); // Callee's context.
__ push(rdi); // Callee's JS Function.
// Load the operand stack size.
- __ movq(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
- __ movq(rdx, FieldOperand(rdx, FixedArray::kLengthOffset));
+ __ movp(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
+ __ movp(rdx, FieldOperand(rdx, FixedArray::kLengthOffset));
__ SmiToInteger32(rdx, rdx);
// If we are sending a value and there is no operand stack, we can jump back
@@ -2157,7 +2172,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Label slow_resume;
__ cmpq(rdx, Immediate(0));
__ j(not_zero, &slow_resume);
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ SmiToInteger64(rcx,
FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
__ addq(rdx, rcx);
@@ -2183,6 +2198,20 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(rax);
+ __ CallRuntime(Runtime::kThrow, 1);
+ }
+ __ jmp(&done);
+
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(rbx);
@@ -2205,7 +2234,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ movq(context_register(),
+ __ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
@@ -2213,14 +2242,14 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ pop(rcx);
__ Move(rdx, isolate()->factory()->ToBoolean(done));
ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
__ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
isolate()->factory()->empty_fixed_array());
__ Move(FieldOperand(rax, JSObject::kElementsOffset),
isolate()->factory()->empty_fixed_array());
- __ movq(FieldOperand(rax, JSGeneratorObject::kResultValuePropertyOffset),
+ __ movp(FieldOperand(rax, JSGeneratorObject::kResultValuePropertyOffset),
rcx);
- __ movq(FieldOperand(rax, JSGeneratorObject::kResultDonePropertyOffset),
+ __ movp(FieldOperand(rax, JSGeneratorObject::kResultDonePropertyOffset),
rdx);
// Only the value field needs a write barrier, as the other values are in the
@@ -2234,15 +2263,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->value());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2256,16 +2284,15 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// rcx to make the shifts easier.
Label done, stub_call, smi_case;
__ pop(rdx);
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
__ or_(rax, rdx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
__ bind(&stub_call);
- __ movq(rax, rcx);
- BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ __ movp(rax, rcx);
+ BinaryOpICStub stub(op, mode);
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2312,10 +2339,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(rdx);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
@@ -2350,20 +2376,17 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
case NAMED_PROPERTY: {
__ push(rax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->value());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
__ push(rax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
__ pop(rdx);
__ pop(rax); // Restore value.
Handle<Code> ic = is_classic_mode()
@@ -2377,90 +2400,86 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ movp(location, rax);
+ if (var->IsContextSlot()) {
+ __ movp(rdx, rax);
+ __ RecordWriteContextSlot(
+ rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, LanguageMode mode) {
+ __ push(rax); // Value.
+ __ push(rsi); // Context.
+ __ Push(name);
+ __ Push(Smi::FromInt(mode));
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(rcx, var->name());
- __ movq(rdx, GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ movp(rdx, GlobalObjectOperand());
+ CallStoreIC();
+
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ movq(rdx, StackOperand(var));
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &skip);
- __ movq(StackOperand(var), rax);
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ if (var->IsLookupSlot()) {
__ push(rax);
__ push(rsi);
__ Push(var->name());
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, rcx);
+ __ movp(rdx, location);
+ __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), language_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
MemOperand location = VarOperand(var, rcx);
- __ movq(rdx, location);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &assign, Label::kNear);
__ Push(var->name());
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&assign);
- __ movq(location, rax);
- if (var->IsContextSlot()) {
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
} else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), language_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, rcx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
- __ movq(rdx, location);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ Check(equal, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ movq(location, rax);
- if (var->IsContextSlot()) {
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2477,10 +2496,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
SetSourcePosition(expr->position());
__ Move(rcx, prop->key()->AsLiteral()->value());
__ pop(rdx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2497,7 +2513,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2524,72 +2540,105 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
ic_total_count_++;
- __ call(code, rmode, ast_id);
+ __ call(code, RelocInfo::CODE_TARGET, ast_id);
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithIC(Call* expr) {
+ Expression* callee = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
+
+ CallFunctionFlags flags;
+ // Get the target function;
+ if (callee->IsVariableProxy()) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a classic mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ flags = NO_CALL_FUNCTION_FLAGS;
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ movp(rax, Operand(rsp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ push(Operand(rsp, 0));
+ __ movp(Operand(rsp, kPointerSize), rax);
+ flags = CALL_AS_METHOD;
+ }
+
+ // Load the arguments.
{ PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
- __ Move(rcx, name);
}
+
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
+ CallFunctionStub stub(arg_count, flags);
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
RecordJSReturnSite(expr);
+
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->Plug(rax);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, rax);
}
+// Common code for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(rcx);
- __ push(rax);
- __ push(rcx);
-
- // Load the arguments.
+ Expression* callee = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
+
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ movp(rdx, Operand(rsp, 0));
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ push(Operand(rsp, 0));
+ __ movp(Operand(rsp, kPointerSize), rax);
+
+ // Load the arguments.
{ PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
}
+
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
+ CallFunctionStub stub(arg_count, CALL_AS_METHOD);
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
RecordJSReturnSite(expr);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax); // Drop the key still on the stack.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, rax);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2601,20 +2650,19 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ Move(rbx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ Move(rbx, FeedbackVector());
+ __ Move(rdx, Smi::FromInt(expr->CallFeedbackSlot()));
- CallFunctionStub stub(arg_count, flags);
- __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ // Record call targets in unoptimized code.
+ CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
context()->DropAndPlug(1, rax);
}
@@ -2652,10 +2700,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
+ Call::CallType call_type = expr->GetCallType(isolate());
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
@@ -2677,25 +2724,24 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
- __ movq(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
- __ movq(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
+ __ movp(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
+ __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
- __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, rax);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Call to a global variable. Push global object as receiver for the
- // call IC lookup.
- __ push(GlobalObjectOperand());
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2722,35 +2768,32 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ push(rax);
// The receiver is implicitly the global receiver. Indicate this by
// passing the hole to the call function stub.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
__ bind(&call);
}
// The receiver is either the global receiver or an object found by
- // LoadContextSlot. That object could be the hole if the receiver is
- // implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
+ // LoadContextSlot.
+ EmitCallWithStub(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
}
if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->value(),
- RelocInfo::CODE_TARGET);
+ EmitCallWithIC(expr);
} else {
EmitKeyedCallWithIC(expr, property->key());
}
} else {
+ ASSERT(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
- // Load global receiver object.
- __ movq(rbx, GlobalObjectOperand());
- __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ EmitCallWithStub(expr);
}
#ifdef DEBUG
@@ -2784,14 +2827,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Load function and argument count into rdi and rax.
__ Set(rax, arg_count);
- __ movq(rdi, Operand(rsp, arg_count * kPointerSize));
+ __ movp(rdi, Operand(rsp, arg_count * kPointerSize));
// Record call targets in unoptimized code, but not in the snapshot.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ Move(rbx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ __ Move(rbx, FeedbackVector());
+ __ Move(rdx, Smi::FromInt(expr->CallNewFeedbackSlot()));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -2858,7 +2901,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
__ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -2910,7 +2953,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -2938,14 +2981,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Check whether this map has already been checked to be safe for default
// valueOf.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ j(not_zero, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
- __ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
__ j(equal, if_false);
@@ -2974,7 +3017,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// internalized string "valueOf" the result is false.
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, FieldOperand(r8, 0));
+ __ movp(rdx, FieldOperand(r8, 0));
__ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
__ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
@@ -2992,12 +3035,12 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ __ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
__ testq(rcx, Immediate(kSmiTagMask));
__ j(zero, if_false);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
__ cmpq(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -3029,6 +3072,33 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(rax, map, if_false, DO_SMI_CHECK);
+ __ cmpl(FieldOperand(rax, HeapNumber::kExponentOffset),
+ Immediate(0x80000000));
+ __ j(not_equal, if_false);
+ __ cmpl(FieldOperand(rax, HeapNumber::kMantissaOffset),
+ Immediate(0x00000000));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -3085,14 +3155,14 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
- __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker);
- __ movq(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
@@ -3136,7 +3206,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
// ArgumentsAccessStub expects the key in rdx and the formal
// parameter count in rax.
VisitForAccumulatorValue(args->at(0));
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
@@ -3152,14 +3222,14 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
__ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
// Check if the calling frame is an arguments adaptor frame.
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &exit, Label::kNear);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame.
- __ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
__ AssertSmi(rax);
@@ -3197,14 +3267,14 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
+ __ movp(rax, FieldOperand(rax, Map::kConstructorOffset));
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &non_function_constructor);
// rax now contains the constructor function. Grab the
// instance class name from there.
- __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ movp(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
__ jmp(&done);
// Functions have class 'Function'.
@@ -3249,47 +3319,6 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rbx, rax);
-
- __ bind(&heapnumber_allocated);
-
- // Return a random uint32 number in rax.
- // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
- __ PrepareCallCFunction(1);
- __ movq(arg_reg_1,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ movq(arg_reg_1,
- FieldOperand(arg_reg_1, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, rcx);
- __ movd(xmm0, rax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorps(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-
- __ movq(rax, rbx);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
@@ -3329,7 +3358,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
// If the object is not a value type, return the object.
__ CmpObjectType(rax, JS_VALUE_TYPE, rbx);
__ j(not_equal, &done);
- __ movq(rax, FieldOperand(rax, JSValue::kValueOffset));
+ __ movp(rax, FieldOperand(rax, JSValue::kValueOffset));
__ bind(&done);
context()->Plug(rax);
@@ -3354,25 +3383,25 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ j(not_equal, &not_date_object);
if (index->value() == 0) {
- __ movq(result, FieldOperand(object, JSDate::kValueOffset));
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset));
__ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
- __ movq(scratch, stamp_operand);
+ __ movp(scratch, stamp_operand);
__ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
- __ movq(result, FieldOperand(object, JSDate::kValueOffset +
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done);
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, object);
- __ movq(arg_reg_2, index, RelocInfo::NONE64);
+ __ movp(arg_reg_1, object);
+ __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
}
@@ -3383,30 +3412,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask) {
- __ Check(masm()->CheckSmi(index), kNonSmiIndex);
- __ Check(masm()->CheckSmi(value), kNonSmiValue);
-
- __ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, kIndexIsTooLarge);
-
- __ SmiCompare(index, Smi::FromInt(0));
- __ Check(greater_equal, kIndexIsNegative);
-
- __ push(value);
- __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- __ cmpq(value, Immediate(encoding_mask));
- __ Check(equal, kUnexpectedStringType);
- __ pop(value);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
@@ -3417,17 +3422,23 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
__ pop(value);
__ pop(index);
- VisitForAccumulatorValue(args->at(0)); // string
if (FLAG_debug_code) {
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ Check(__ CheckSmi(value), kNonSmiValue);
+ __ Check(__ CheckSmi(index), kNonSmiValue);
}
__ SmiToInteger32(value, value);
__ SmiToInteger32(index, index);
+
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
__ movb(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
value);
context()->Plug(string);
@@ -3444,17 +3455,23 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
__ pop(value);
__ pop(index);
- VisitForAccumulatorValue(args->at(0)); // string
if (FLAG_debug_code) {
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ Check(__ CheckSmi(value), kNonSmiValue);
+ __ Check(__ CheckSmi(index), kNonSmiValue);
}
__ SmiToInteger32(value, value);
__ SmiToInteger32(index, index);
+
+ if (FLAG_debug_code) {
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ }
+
__ movw(FieldOperand(string, index, times_2, SeqTwoByteString::kHeaderSize),
value);
context()->Plug(rax);
@@ -3490,10 +3507,10 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
__ j(not_equal, &done);
// Store the value.
- __ movq(FieldOperand(rbx, JSValue::kValueOffset), rax);
+ __ movp(FieldOperand(rbx, JSValue::kValueOffset), rax);
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
__ bind(&done);
@@ -3630,11 +3647,11 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(1));
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
+ __ pop(rdx);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3653,50 +3670,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
+ // Load the argument on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallStub(&stub);
+ __ CallRuntime(Runtime::kMath_log, 1);
context()->Plug(rax);
}
@@ -3728,11 +3707,10 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ j(not_equal, &runtime);
// InvokeFunction requires the function in rdi. Move it in there.
- __ movq(rdi, result_register());
+ __ movp(rdi, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ InvokeFunction(rdi, count, CALL_FUNCTION, NullCallWrapper());
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
__ bind(&runtime);
@@ -3750,7 +3728,9 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
+ VisitForAccumulatorValue(args->at(2));
+ __ pop(rbx);
+ __ pop(rcx);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3777,18 +3757,18 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Register key = rax;
Register cache = rbx;
Register tmp = rcx;
- __ movq(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(cache,
+ __ movp(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movp(cache,
FieldOperand(cache, GlobalObject::kNativeContextOffset));
- __ movq(cache,
+ __ movp(cache,
ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movq(cache,
+ __ movp(cache,
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
Label done, not_found;
- // tmp now holds finger offset as a smi.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+ __ movp(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+ // tmp now holds finger offset as a smi.
SmiIndex index =
__ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
__ cmpq(key, FieldOperand(cache,
@@ -3796,7 +3776,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
index.scale,
FixedArray::kHeaderSize));
__ j(not_equal, &not_found, Label::kNear);
- __ movq(rax, FieldOperand(cache,
+ __ movp(rax, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize + kPointerSize));
@@ -3813,45 +3793,6 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = rax;
- Register left = rbx;
- Register tmp = rcx;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmpq(left, right);
- __ j(equal, &ok, Label::kNear);
- // Fail if either is a non-HeapObject.
- Condition either_smi = masm()->CheckEitherSmi(left, right, tmp);
- __ j(either_smi, &fail, Label::kNear);
- __ j(zero, &fail, Label::kNear);
- __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(JS_REGEXP_TYPE));
- __ j(not_equal, &fail, Label::kNear);
- __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail, Label::kNear);
- __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Move(rax, isolate()->factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&ok);
- __ Move(rax, isolate()->factory()->true_value());
- __ bind(&done);
-
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -3933,7 +3874,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Array has fast elements, so its length must be a smi.
// If the array has length zero, return the empty string.
- __ movq(array_length, FieldOperand(array, JSArray::kLengthOffset));
+ __ movp(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ SmiCompare(array_length, Smi::FromInt(0));
__ j(not_zero, &non_trivial_array);
__ LoadRoot(rax, Heap::kempty_stringRootIndex);
@@ -3947,7 +3888,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Save the FixedArray containing array's elements.
// End of array's live range.
elements = array;
- __ movq(elements, FieldOperand(array, JSArray::kElementsOffset));
+ __ movp(elements, FieldOperand(array, JSArray::kElementsOffset));
array = no_reg;
@@ -3963,12 +3904,12 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
- __ movq(string, FieldOperand(elements,
+ __ movp(string, FieldOperand(elements,
index,
times_pointer_size,
FixedArray::kHeaderSize));
__ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
@@ -3990,7 +3931,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// If array_length is 1, return elements[0], a string.
__ cmpl(array_length, Immediate(1));
__ j(not_equal, &not_size_one_array);
- __ movq(rax, FieldOperand(elements, FixedArray::kHeaderSize));
+ __ movp(rax, FieldOperand(elements, FixedArray::kHeaderSize));
__ jmp(&return_result);
__ bind(&not_size_one_array);
@@ -4005,9 +3946,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// index: Array length.
// Check that the separator is a sequential ASCII string.
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
@@ -4034,10 +3975,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// elements: FixedArray of strings.
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
- __ movq(result_operand, result_pos);
+ __ movp(result_operand, result_pos);
__ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
Smi::FromInt(1));
__ j(equal, &one_char_separator);
@@ -4058,7 +3999,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// scratch: array length.
// Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
+ __ movp(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
@@ -4102,7 +4043,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&loop_2_entry);
// Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
+ __ movp(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
@@ -4129,12 +4070,12 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Replace separator string with pointer to its first character, and
// make scratch be its length.
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ movq(separator_operand, string);
+ __ movp(separator_operand, string);
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -4149,13 +4090,13 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// separator_operand (rsp[0x10]): Address of first char of separator.
// Copy the separator to the result.
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ movl(string_length, scratch);
__ CopyBytes(result_pos, string, string_length, 2);
__ bind(&loop_3_entry);
// Get string = array[index].
- __ movq(string, Operand(elements, index, times_pointer_size, 0));
+ __ movp(string, Operand(elements, index, times_pointer_size, 0));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
@@ -4165,12 +4106,12 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ j(not_equal, &loop_3); // Loop while (index < 0).
__ bind(&done);
- __ movq(rax, result_operand);
+ __ movp(rax, result_operand);
__ bind(&return_result);
// Drop temp values from the stack, and restore context register.
__ addq(rsp, Immediate(3 * kPointerSize));
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
}
@@ -4185,32 +4126,47 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ movq(rax, GlobalObjectOperand());
+ // Push the builtins object as receiver.
+ __ movp(rax, GlobalObjectOperand());
__ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- if (expr->is_jsruntime()) {
- // Call the JS runtime function using a call IC.
+ // Load the function from the receiver.
+ __ movp(rax, Operand(rsp, 0));
__ Move(rcx, expr->name());
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ push(Operand(rsp, 0));
+ __ movp(Operand(rsp, kPointerSize), rax);
+
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, rax);
+
} else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime.
__ CallRuntime(expr->function(), arg_count);
+ context()->Plug(rax);
}
- context()->Plug(rax);
}
@@ -4370,7 +4326,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
+ __ movp(rdx, Operand(rsp, 0)); // Leave receiver on stack
__ push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
}
@@ -4384,14 +4340,47 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
+ // Inline smi case if we are in a loop.
+ Label done, stub_call;
+ JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(expr->op())) {
- __ JumpIfSmi(rax, &no_conversion, Label::kNear);
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(rax, &slow, Label::kNear);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(rax);
+ break;
+ case NAMED_PROPERTY:
+ __ movp(Operand(rsp, kPointerSize), rax);
+ break;
+ case KEYED_PROPERTY:
+ __ movp(Operand(rsp, 2 * kPointerSize), rax);
+ break;
+ }
+ }
+ }
+
+ SmiOperationExecutionMode mode;
+ mode.Add(PRESERVE_SOURCE_REGISTER);
+ mode.Add(BAILOUT_ON_NO_OVERFLOW);
+ if (expr->op() == Token::INC) {
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1), mode, &done, Label::kNear);
+ } else {
+ __ SmiSubConstant(rax, rax, Smi::FromInt(1), mode, &done, Label::kNear);
+ }
+ __ jmp(&stub_call, Label::kNear);
+ __ bind(&slow);
}
+
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4404,49 +4393,24 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ push(rax);
break;
case NAMED_PROPERTY:
- __ movq(Operand(rsp, kPointerSize), rax);
+ __ movp(Operand(rsp, kPointerSize), rax);
break;
case KEYED_PROPERTY:
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ __ movp(Operand(rsp, 2 * kPointerSize), rax);
break;
}
}
}
- // Inline smi case if we are in a loop.
- Label done, stub_call;
- JumpPatchSite patch_site(masm_);
-
- if (ShouldInlineSmiCase(expr->op())) {
- if (expr->op() == Token::INC) {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- } else {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1));
- }
- __ j(overflow, &stub_call, Label::kNear);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(rax, &done, Label::kNear);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1));
- } else {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- }
- }
-
// Record position before stub call.
SetSourcePosition(expr->position());
// Call stub for +1/-1.
- __ movq(rdx, rax);
+ __ bind(&stub_call);
+ __ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
- BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
+ BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4477,10 +4441,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->value());
__ pop(rdx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4497,7 +4458,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4518,16 +4479,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ Move(rcx, proxy->name());
- __ movq(rax, GlobalObjectOperand());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ __ movp(rax, GlobalObjectOperand());
// Use a regular load, not a contextual load, to avoid a reference
// error.
- CallIC(ic);
+ CallLoadIC(NOT_CONTEXTUAL);
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4566,7 +4527,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
if (check->Equals(isolate()->heap()->number_string())) {
__ JumpIfSmi(rax, if_true);
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
Split(equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->string_string())) {
@@ -4595,7 +4556,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ j(equal, if_true);
__ JumpIfSmi(rax, if_false);
// Check for undetectable objects => true.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
@@ -4675,7 +4636,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ movq(rcx, rdx);
+ __ movp(rcx, rdx);
__ or_(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpq(rdx, rax);
@@ -4686,7 +4647,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4721,7 +4682,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
__ testq(rax, rax);
Split(not_zero, if_true, if_false, fall_through);
}
@@ -4730,7 +4691,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
context()->Plug(rax);
}
@@ -4747,12 +4708,12 @@ Register FullCodeGenerator::context_register() {
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
- __ movq(Operand(rbp, frame_offset), value);
+ __ movp(Operand(rbp, frame_offset), value);
}
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ movq(dst, ContextOperand(rsi, context_index));
+ __ movp(dst, ContextOperand(rsi, context_index));
}
@@ -4861,8 +4822,8 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
__ Drop(*stack_depth); // Down to the handler block.
if (*context_length > 0) {
// Restore the context to its dedicated register and the stack.
- __ movq(rsi, Operand(rsp, StackHandlerConstants::kContextOffset));
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ __ movp(rsi, Operand(rsp, StackHandlerConstants::kContextOffset));
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
}
__ PopTryHandler();
__ call(finally_entry_);
@@ -4878,9 +4839,11 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x1d;
-static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
+#ifdef DEBUG
+static const byte kCallInstruction = 0xe8;
+#endif
void BackEdgeTable::PatchAt(Code* unoptimized_code,
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 15f410c134..d2340c83c4 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -74,7 +74,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
__ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
- __ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movp(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
__ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
__ j(below, miss);
@@ -90,7 +90,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
(1 << Map::kHasNamedInterceptor)));
__ j(not_zero, miss);
- __ movq(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ movp(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, miss);
@@ -150,7 +150,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(result,
+ __ movp(result,
Operand(elements, r1, times_pointer_size,
kValueOffset - kHeapObjectTag));
}
@@ -216,10 +216,10 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
scratch1,
times_pointer_size,
kValueOffset - kHeapObjectTag));
- __ movq(Operand(scratch1, 0), value);
+ __ movp(Operand(scratch1, 0), value);
// Update write barrier. Make sure not to clobber the value.
- __ movq(scratch0, value);
+ __ movp(scratch0, value);
__ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
}
@@ -284,7 +284,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
//
// scratch - used to hold elements of the receiver and the loaded value.
- __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
__ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
@@ -299,7 +299,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
__ j(above_equal, out_of_range);
// Fast case: Do the load.
SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
- __ movq(scratch, FieldOperand(elements,
+ __ movp(scratch, FieldOperand(elements,
index.reg,
index.scale,
FixedArray::kHeaderSize));
@@ -308,7 +308,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
if (!result.is(scratch)) {
- __ movq(result, scratch);
+ __ movp(result, scratch);
}
}
@@ -384,7 +384,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_number_dictionary);
__ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// rdx: receiver
@@ -412,14 +412,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ movl(rcx, rbx);
__ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
@@ -438,7 +438,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
- __ movq(rdi, rcx);
+ __ movp(rdi, rcx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
@@ -479,14 +479,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&load_in_object_property);
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rcx, rdi);
- __ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
+ __ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Load property array property.
__ bind(&property_array_property);
- __ movq(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ movq(rax, FieldOperand(rax, rdi, times_pointer_size,
+ __ movp(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movp(rax, FieldOperand(rax, rdi, times_pointer_size,
FixedArray::kHeaderSize));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -498,7 +498,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// rax: key
// rbx: elements
- __ movq(rcx, FieldOperand(rdx, JSObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kMapOffset));
__ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
@@ -540,7 +540,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -560,7 +560,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ JumpUnlessNonNegativeSmi(rax, &slow);
// Get the map of the receiver.
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
@@ -583,7 +583,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1);
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -605,10 +605,25 @@ static void KeyedStoreGenerateGenericHelper(
// rdx: receiver (a JSArray)
// r9: map of receiver
if (check_map == kCheckMap) {
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, fast_double);
}
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ movp(kScratchRegister, FieldOperand(rbx,
+ rcx,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(rdx, rdi, kScratchRegister, slow);
+
+ __ bind(&holecheck_passed1);
+
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(rax, &non_smi_value);
@@ -618,7 +633,7 @@ static void KeyedStoreGenerateGenericHelper(
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
}
// It's irrelevant whether array is smi-only or not when writing a smi.
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
__ ret(0);
@@ -633,9 +648,9 @@ static void KeyedStoreGenerateGenericHelper(
__ leal(rdi, Operand(rcx, 1));
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
}
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
- __ movq(rdx, rax); // Preserve the value which is returned.
+ __ movp(rdx, rax); // Preserve the value which is returned.
__ RecordWriteArray(
rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0);
@@ -648,6 +663,15 @@ static void KeyedStoreGenerateGenericHelper(
__ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
__ j(not_equal, slow);
}
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmpl(FieldOperand(rbx, rcx, times_8, offset), Immediate(kHoleNanUpper32));
+ __ j(not_equal, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(rdx, rdi, kScratchRegister, slow);
+
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
&transition_double_elements);
@@ -659,10 +683,10 @@ static void KeyedStoreGenerateGenericHelper(
__ ret(0);
__ bind(&transition_smi_elements);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Transition the array appropriately depending on the value type.
- __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(r9, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &non_double_value);
@@ -676,7 +700,7 @@ static void KeyedStoreGenerateGenericHelper(
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
@@ -689,14 +713,14 @@ static void KeyedStoreGenerateGenericHelper(
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
rbx,
@@ -704,7 +728,7 @@ static void KeyedStoreGenerateGenericHelper(
slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -724,11 +748,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver.
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
+ __ movp(r9, FieldOperand(rdx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
__ testb(FieldOperand(r9, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
+ Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
__ j(not_zero, &slow_with_tagged_index);
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &slow_with_tagged_index);
@@ -744,7 +768,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rdx: JSObject
// rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check array bounds.
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value
@@ -772,7 +796,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
__ j(below_equal, &slow);
// Increment index to get new length.
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_if_double_array);
__ jmp(&fast_object_grow);
@@ -790,7 +814,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rdx: receiver (a JSArray)
// rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
@@ -804,347 +828,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rdx : receiver
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, rdx, rcx, rbx, rax);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(rdx, &number);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
- __ j(not_equal, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, rdx);
- __ jmp(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ CmpInstanceType(rbx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, rdx);
- __ jmp(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &boolean);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, rdx);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, rdx, rcx, rbx, no_reg);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rdi : function
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- __ JumpIfSmi(rdi, miss);
- // Check that the value is a JavaScript function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx);
- __ j(not_equal, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-// The generated code falls through if the call should be handled by runtime.
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
-
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- GenerateNameDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
-
- // rax: elements
- // Search the dictionary placing the result in rdi.
- GenerateDictionaryLoad(masm, &miss, rax, rcx, rbx, rdi, rdi);
-
- GenerateFunctionTailCall(masm, argc, &miss);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(counters->call_miss(), 1);
- } else {
- __ IncrementCounter(counters->keyed_call_miss(), 1);
- }
-
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ push(rdx);
- __ push(rcx);
-
- // Call the entry.
- CEntryStub stub(1);
- __ Set(rax, 2);
- __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
- __ CallStub(&stub);
-
- // Move result to rdi and exit the internal frame.
- __ movq(rdi, rax);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ movq(rdx, args.GetReceiverOperand());
- __ JumpIfSmi(rdx, &invoke);
- __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
- __ j(equal, &global);
- __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- Label do_call, slow_call, slow_load;
- Label check_number_dictionary, check_name, lookup_monomorphic_cache;
- Label index_smi, index_name;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &check_name);
-
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rax, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
-
- __ bind(&do_call);
- // receiver in rdx is not used after this point.
- // rcx: key
- // rdi: function
- GenerateFunctionTailCall(masm, argc, &slow_call);
-
- __ bind(&check_number_dictionary);
- // rax: elements
- // rcx: smi key
- // Check whether the elements is a number dictionary.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &slow_load);
- __ SmiToInteger32(rbx, rcx);
- // ebx: untagged index
- __ LoadFromNumberDictionary(&slow_load, rax, rcx, rbx, r9, rdi, rdi);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rcx); // save the key
- __ push(rdx); // pass the receiver
- __ push(rcx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(rcx); // restore the key
- }
- __ movq(rdi, rax);
- __ jmp(&do_call);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, rcx, rax, rbx, &index_name, &slow_call);
-
- // The key is known to be a unique name.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &lookup_monomorphic_cache);
-
- GenerateDictionaryLoad(masm, &slow_load, rbx, rcx, rax, rdi, rdi);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor a unique name,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
- GenerateMiss(masm, argc);
-
- __ bind(&index_name);
- __ IndexFromHash(rbx, rcx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // Check if the name is really a name.
- Label miss;
- __ JumpIfSmi(rcx, &miss);
- Condition cond = masm->IsObjectNameType(rcx, rax, rax);
- __ j(NegateCondition(cond), &miss);
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register object,
Register key,
@@ -1170,11 +853,11 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load the elements into scratch1 and check its map. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ movq(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments.
- __ movq(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
__ cmpq(key, scratch2);
__ j(greater_equal, unmapped_case);
@@ -1182,7 +865,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load element index and check whether it is the hole.
const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
__ SmiToInteger64(scratch3, key);
- __ movq(scratch2, FieldOperand(scratch1,
+ __ movp(scratch2, FieldOperand(scratch1,
scratch3,
times_pointer_size,
kHeaderSize));
@@ -1192,7 +875,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load value from context and return it. We can reuse scratch1 because
// we do not jump to the unmapped lookup (which requires the parameter
// map in scratch1).
- __ movq(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
+ __ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
__ SmiToInteger64(scratch3, scratch2);
return FieldOperand(scratch1,
scratch3,
@@ -1212,10 +895,10 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
// overwritten.
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
- __ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ __ movp(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmpq(key, scratch);
__ j(greater_equal, slow_case);
__ SmiToInteger64(scratch, key);
@@ -1236,7 +919,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Operand mapped_location =
GenerateMappedArgumentsLookup(
masm, rdx, rax, rbx, rcx, rdi, &notin, &slow);
- __ movq(rax, mapped_location);
+ __ movp(rax, mapped_location);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in rbx.
@@ -1244,10 +927,10 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
__ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
__ j(equal, &slow);
- __ movq(rax, unmapped_location);
+ __ movp(rax, unmapped_location);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -1261,9 +944,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Label slow, notin;
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
- __ movq(mapped_location, rax);
+ __ movp(mapped_location, rax);
__ lea(r9, mapped_location);
- __ movq(r8, rax);
+ __ movp(r8, rax);
__ RecordWrite(rbx,
r9,
r8,
@@ -1275,9 +958,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
// The unmapped lookup expects that the parameter map is in rbx.
Operand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
- __ movq(unmapped_location, rax);
+ __ movp(unmapped_location, rax);
__ lea(r9, unmapped_location);
- __ movq(r8, rax);
+ __ movp(r8, rax);
__ RecordWrite(rbx,
r9,
r8,
@@ -1286,38 +969,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
INLINE_SMI_CHECK);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label slow, notin;
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
- Operand mapped_location = GenerateMappedArgumentsLookup(
- masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
- __ movq(rdi, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rax, &slow);
- __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
- __ j(equal, &slow);
- __ movq(rdi, unmapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&slow);
- GenerateMiss(masm, argc);
+ GenerateMiss(masm);
}
@@ -1329,9 +981,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rax, rcx, rbx, rdx);
@@ -1399,7 +1049,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -1415,10 +1065,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
}
@@ -1440,8 +1088,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1450,9 +1097,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, strict_mode,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rdx, rcx, rbx, no_reg);
@@ -1588,7 +1233,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1603,10 +1248,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 0aff6c94c1..a94dcee227 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -50,9 +50,7 @@ class SafepointGenerator V8_FINAL : public CallWrapper {
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const V8_OVERRIDE {
- codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
- }
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
@@ -105,7 +103,7 @@ void LChunkBuilder::Abort(BailoutReason reason) {
void LCodeGen::MakeSureStackPagesMapped(int offset) {
const int kPageSize = 4 * KB;
for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
- __ movq(Operand(rsp, offset), rax);
+ __ movp(Operand(rsp, offset), rax);
}
}
#endif
@@ -156,17 +154,23 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). rcx is zero for method calls and non-zero for function
- // calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
+ // Classic mode functions need to replace the receiver with the global proxy
+ // when called as functions (without an explicit receiver object).
+ if (info_->this_has_uses() &&
+ info_->is_classic_mode() &&
+ !info_->is_native()) {
Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
StackArgumentsAccessor args(rsp, scope()->num_parameters());
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetReceiverOperand(), kScratchRegister);
+ __ movp(rcx, args.GetReceiverOperand());
+
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &ok, Label::kNear);
+
+ __ movp(rcx, GlobalObjectOperand());
+ __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+
+ __ movp(args.GetReceiverOperand(), rcx);
+
__ bind(&ok);
}
}
@@ -189,10 +193,10 @@ bool LCodeGen::GeneratePrologue() {
#endif
__ push(rax);
__ Set(rax, slots);
- __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
+ __ movq(kScratchRegister, kSlotsZapValue);
Label loop;
__ bind(&loop);
- __ movq(MemOperand(rsp, rax, times_pointer_size, 0),
+ __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
kScratchRegister);
__ decl(rax);
__ j(not_zero, &loop);
@@ -214,17 +218,18 @@ bool LCodeGen::GeneratePrologue() {
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
+ __ push(rdi);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ // Context is returned in rax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in rsi.
+ __ movp(rsi, rax);
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
@@ -234,10 +239,10 @@ bool LCodeGen::GeneratePrologue() {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
+ __ movp(rax, Operand(rbp, parameter_offset));
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
- __ movq(Operand(rsi, context_offset), rax);
+ __ movp(Operand(rsi, context_offset), rax);
// Update the write barrier. This clobbers rax and rbx.
__ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
}
@@ -285,13 +290,14 @@ bool LCodeGen::GenerateJumpTable() {
}
if (jump_table_[i].needs_frame) {
ASSERT(!info()->saves_caller_doubles());
- __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
+ __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
if (needs_frame.is_bound()) {
__ jmp(&needs_frame);
} else {
__ bind(&needs_frame);
+ __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
__ push(rbp);
- __ movq(rbp, rsp);
+ __ movp(rbp, rsp);
__ push(rsi);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
@@ -299,7 +305,7 @@ bool LCodeGen::GenerateJumpTable() {
ASSERT(info()->IsStub());
__ Move(rsi, Smi::FromInt(StackFrame::STUB));
__ push(rsi);
- __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ movp(rsi, MemOperand(rsp, kPointerSize));
__ call(kScratchRegister);
}
} else {
@@ -322,7 +328,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -348,7 +355,7 @@ bool LCodeGen::GenerateDeferredCode() {
Comment(";;; Destroy frame");
ASSERT(frame_is_built_);
frame_is_built_ = false;
- __ movq(rsp, rbp);
+ __ movp(rsp, rbp);
__ pop(rbp);
}
__ jmp(code->exit());
@@ -442,11 +449,23 @@ Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
}
+static int ArgumentsOffsetWithoutFrame(int index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize + kPCOnStackSize;
+}
+
+
Operand LCodeGen::ToOperand(LOperand* op) const {
// Does not handle registers. In X64 assembler, plain registers are not
// representable as an Operand.
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return Operand(rbp, StackSlotOffset(op->index()));
+ if (NeedsEagerFrame()) {
+ return Operand(rbp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
}
@@ -583,7 +602,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode,
int argc) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
ASSERT(instr != NULL);
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
@@ -617,10 +635,30 @@ void LCodeGen::CallRuntime(const Runtime::Function* function,
}
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ if (!ToRegister(context).is(rsi)) {
+ __ movp(rsi, ToRegister(context));
+ }
+ } else if (context->IsStackSlot()) {
+ __ movp(rsi, ToOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr) {
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
+
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
@@ -677,7 +715,27 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return;
}
- ASSERT(FLAG_deopt_every_n_times == 0); // Not yet implemented on x64.
+ if (DeoptEveryNTimes()) {
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+ Label no_deopt;
+ __ pushfq();
+ __ push(rax);
+ Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
+ __ movl(rax, count_operand);
+ __ subl(rax, Immediate(1));
+ __ j(not_zero, &no_deopt, Label::kNear);
+ if (FLAG_trap_on_deopt) __ int3();
+ __ movl(rax, Immediate(FLAG_deopt_every_n_times));
+ __ movl(count_operand, rax);
+ __ pop(rax);
+ __ popfq();
+ ASSERT(frame_is_built_);
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ __ bind(&no_deopt);
+ __ movl(count_operand, rax);
+ __ pop(rax);
+ __ popfq();
+ }
if (info()->ShouldTrapOnDeopt()) {
Label done;
@@ -724,36 +782,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<JSObject> > objects(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else if (it.rinfo()->target_object()->IsJSObject()) {
- Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded objects after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
- }
-}
-
-
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
@@ -764,6 +792,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -847,10 +876,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
- if (kind & Safepoint::kWithRegisters) {
- // Register rsi always contains a pointer to the context.
- safepoint.DefinePointerRegister(rsi, zone());
- }
}
@@ -926,13 +951,9 @@ void LCodeGen::DoParameter(LParameter* instr) {
void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->result()).is(rax));
switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::RegExpExec: {
RegExpExecStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -948,12 +969,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -969,7 +984,7 @@ void LCodeGen::DoModI(LModI* instr) {
HMod* hmod = instr->hydrogen();
HValue* left = hmod->left();
HValue* right = hmod->right();
- if (hmod->HasPowerOf2Divisor()) {
+ if (hmod->RightIsPowerOf2()) {
// TODO(svenpanne) We should really do the strength reduction on the
// Hydrogen level.
Register left_reg = ToRegister(instr->left());
@@ -994,36 +1009,6 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&left_is_not_negative);
__ andl(left_reg, Immediate(divisor - 1));
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
- Register right_reg = ToRegister(instr->right());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- __ cmpl(right_reg, Immediate(divisor));
- DeoptimizeIf(not_equal, instr->environment());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ testl(left_reg, left_reg);
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ negl(left_reg);
- __ andl(left_reg, Immediate(divisor - 1));
- __ negl(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&left_is_not_negative);
- __ andl(left_reg, Immediate(divisor - 1));
- __ bind(&done);
-
} else {
Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(rax));
@@ -1139,10 +1124,10 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
double multiplier_f =
static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
int64_t multiplier;
- if (multiplier_f - floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(floor(multiplier_f));
+ if (multiplier_f - std::floor(multiplier_f) < 0.5) {
+ multiplier = static_cast<int64_t>(std::floor(multiplier_f));
} else {
- multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
+ multiplier = static_cast<int64_t>(std::floor(multiplier_f)) + 1;
}
// The multiplier is a uint32.
ASSERT(multiplier > 0 &&
@@ -1154,7 +1139,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
__ neg(reg1);
DeoptimizeIf(zero, instr->environment());
}
- __ movq(reg2, multiplier, RelocInfo::NONE64);
+ __ Set(reg2, multiplier);
// Result just fit in r64, because it's int32 * uint32.
__ imul(reg2, reg1);
@@ -1165,57 +1150,40 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
+ if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
-
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- Label done, negative;
- __ cmpl(dividend, Immediate(0));
- __ j(less, &negative, Label::kNear);
- __ sarl(dividend, Immediate(power));
- if (divisor < 0) __ negl(dividend);
- __ jmp(&done, Label::kNear);
+ HDiv* hdiv = instr->hydrogen();
+ int32_t divisor = hdiv->right()->GetInteger32Constant();
+ Register result = ToRegister(instr->result());
+ ASSERT(!result.is(dividend));
- __ bind(&negative);
- __ negl(dividend);
- __ sarl(dividend, Immediate(power));
- if (divisor > 0) __ negl(dividend);
- __ bind(&done);
- return; // Don't fall through to "__ neg" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ testl(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sarl(dividend, Immediate(power));
- }
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->left()->RangeCanInclude(0) && divisor < 0 &&
+ hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
}
-
- if (divisor < 0) __ negl(dividend);
-
+ // Check for (kMinInt / -1).
+ if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 &&
+ hdiv->CheckFlag(HValue::kCanOverflow)) {
+ __ cmpl(dividend, Immediate(kMinInt));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ testl(dividend, Immediate(Abs(divisor) - 1));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Move(result, dividend);
+ int32_t shift = WhichPowerOf2(Abs(divisor));
+ if (shift > 0) {
+ // The arithmetic shift is always OK, the 'if' is an optimization only.
+ if (shift > 1) __ sarl(result, Immediate(31));
+ __ shrl(result, Immediate(32 - shift));
+ __ addl(result, dividend);
+ __ sarl(result, Immediate(shift));
+ }
+ if (divisor < 0) __ negl(result);
return;
}
@@ -1281,7 +1249,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ movq(kScratchRegister, left);
+ __ movp(kScratchRegister, left);
} else {
__ movl(kScratchRegister, left);
}
@@ -1591,40 +1559,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte.
- __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(result, Immediate(Map::kElementsKindMask));
- __ shr(result, Immediate(Map::kElementsKindShift));
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Label done;
-
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
- }
-
- // If the object is not a value type, return the object.
- __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
- __ j(not_equal, &done, Label::kNear);
- __ movq(result, FieldOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
@@ -1639,67 +1573,108 @@ void LCodeGen::DoDateField(LDateField* instr) {
DeoptimizeIf(not_equal, instr->environment());
if (index->value() == 0) {
- __ movq(result, FieldOperand(object, JSDate::kValueOffset));
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset));
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
- __ movq(kScratchRegister, stamp_operand);
+ __ movp(kScratchRegister, stamp_operand);
__ cmpq(kScratchRegister, FieldOperand(object,
JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
- __ movq(result, FieldOperand(object, JSDate::kValueOffset +
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, object);
- __ movq(arg_reg_2, index, RelocInfo::NONE64);
+ __ movp(arg_reg_1, object);
+ __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&done);
}
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+Operand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldOperand(string, SeqString::kHeaderSize + offset);
+ }
+ return FieldOperand(
+ string, ToRegister(index),
+ encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
+ SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register result = ToRegister(instr->result());
Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register value = ToRegister(instr->value());
- String::Encoding encoding = instr->encoding();
if (FLAG_debug_code) {
- __ push(value);
- __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+ __ push(string);
+ __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset));
- __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
+ __ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
- __ pop(value);
+ __ pop(string);
}
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (encoding == String::ONE_BYTE_ENCODING) {
- __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
+ __ movzxbl(result, operand);
} else {
- __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
- value);
+ __ movzxwl(result, operand);
}
}
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToRegister(instr->value()));
- CallRuntime(Runtime::kThrow, 1, instr);
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
if (FLAG_debug_code) {
- Comment("Unreachable code.");
- __ int3();
+ Register value = ToRegister(instr->value());
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (instr->value()->IsConstantOperand()) {
+ int value = ToInteger32(LConstantOperand::cast(instr->value()));
+ ASSERT_LE(0, value);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ ASSERT_LE(value, String::kMaxOneByteCharCode);
+ __ movb(operand, Immediate(value));
+ } else {
+ ASSERT_LE(value, String::kMaxUtf16CodeUnit);
+ __ movw(operand, Immediate(value));
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ movb(operand, value);
+ } else {
+ __ movw(operand, value);
+ }
}
}
@@ -1708,14 +1683,22 @@ void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
+ Representation target_rep = instr->hydrogen()->representation();
+ bool is_q = target_rep.IsSmi() || target_rep.IsExternal();
+
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
int32_t offset = ToInteger32(LConstantOperand::cast(right));
- __ leal(ToRegister(instr->result()),
- MemOperand(ToRegister(left), offset));
+ if (is_q) {
+ __ lea(ToRegister(instr->result()),
+ MemOperand(ToRegister(left), offset));
+ } else {
+ __ leal(ToRegister(instr->result()),
+ MemOperand(ToRegister(left), offset));
+ }
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
- if (instr->hydrogen()->representation().IsSmi()) {
+ if (is_q) {
__ lea(ToRegister(instr->result()), address);
} else {
__ leal(ToRegister(instr->result()), address);
@@ -1723,16 +1706,21 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
} else {
if (right->IsConstantOperand()) {
- __ addl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
+ if (is_q) {
+ __ addq(ToRegister(left),
+ Immediate(ToInteger32(LConstantOperand::cast(right))));
+ } else {
+ __ addl(ToRegister(left),
+ Immediate(ToInteger32(LConstantOperand::cast(right))));
+ }
} else if (right->IsRegister()) {
- if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_q) {
__ addq(ToRegister(left), ToRegister(right));
} else {
__ addl(ToRegister(left), ToRegister(right));
}
} else {
- if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_q) {
__ addq(ToRegister(left), ToOperand(right));
} else {
__ addl(ToRegister(left), ToOperand(right));
@@ -1762,7 +1750,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
ASSERT(!instr->hydrogen_value()->representation().IsSmi());
__ cmpl(left_reg, right_imm);
__ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_imm);
+ __ movp(left_reg, right_imm);
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1771,7 +1759,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ cmpl(left_reg, right_reg);
}
__ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_reg);
+ __ movp(left_reg, right_reg);
} else {
Operand right_op = ToOperand(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1780,7 +1768,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ cmpl(left_reg, right_op);
}
__ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_op);
+ __ movp(left_reg, right_op);
}
__ bind(&return_left);
} else {
@@ -1802,7 +1790,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
// At this point, both left and right are either 0 or -0.
if (operation == HMathMinMax::kMathMin) {
- __ orpd(left_reg, right_reg);
+ __ orps(left_reg, right_reg);
} else {
// Since we operate on +0 and/or -0, addsd and andsd have the same effect.
__ addsd(left_reg, right_reg);
@@ -1813,7 +1801,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ ucomisd(left_reg, left_reg); // NaN check.
__ j(parity_even, &return_left, Label::kNear);
__ bind(&return_right);
- __ movsd(left_reg, right_reg);
+ __ movaps(left_reg, right_reg);
__ bind(&return_left);
}
@@ -1848,8 +1836,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ movaps(xmm_scratch, left);
ASSERT(right.is(xmm1));
__ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ ExternalReference::mod_two_doubles_operation(isolate()), 2);
__ movaps(result, xmm_scratch);
break;
}
@@ -1861,11 +1848,12 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->left()).is(rdx));
ASSERT(ToRegister(instr->right()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
@@ -1986,7 +1974,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
const Register map = kScratchRegister;
if (expected.NeedsMap()) {
- __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));
+ __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
if (expected.CanBeUndetectable()) {
// Undetectable -> false.
@@ -2183,6 +2171,33 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+
+ if (rep.IsDouble()) {
+ XMMRegister value = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, value);
+ EmitFalseBranch(instr, not_equal);
+ __ movmskpd(kScratchRegister, value);
+ __ testl(kScratchRegister, Immediate(1));
+ EmitBranch(instr, not_zero);
+ } else {
+ Register value = ToRegister(instr->value());
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
+ Immediate(0x80000000));
+ EmitFalseBranch(instr, not_equal);
+ __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
+ Immediate(0x00000000));
+ EmitBranch(instr, equal);
+ }
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Label* is_not_object,
Label* is_object) {
@@ -2193,7 +2208,7 @@ Condition LCodeGen::EmitIsObject(Register input,
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, is_object);
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
__ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -2267,7 +2282,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
if (!instr->hydrogen()->value()->IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(temp, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
EmitBranch(instr, not_zero);
@@ -2275,6 +2290,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -2372,7 +2388,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
} else {
// Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range.
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
@@ -2382,7 +2398,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
- __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
+ __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
@@ -2394,8 +2410,8 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// temp now contains the constructor function. Grab the
// instance class name from there.
- __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ movq(temp, FieldOperand(temp,
+ __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(temp, FieldOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
// The class name we are testing against is internalized since it's a literal.
// The name in the constructor is internalized because of the way the context
@@ -2431,6 +2447,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
InstanceofStub stub(InstanceofStub::kNoFlags);
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
@@ -2462,7 +2479,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Label map_check_;
};
-
+ ASSERT(ToRegister(instr->context()).is(rsi));
DeferredInstanceOfKnownGlobal* deferred;
deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
@@ -2470,7 +2487,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register object = ToRegister(instr->value());
// A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result);
+ __ JumpIfSmi(object, &false_result, Label::kNear);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
@@ -2478,10 +2495,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Label cache_miss;
// Use a temp register to avoid memory operands with variable lengths.
Register map = ToRegister(instr->temp());
- __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
+ __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
- __ movq(kScratchRegister, cache_cell, RelocInfo::CELL);
+ __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
__ cmpq(map, Operand(kScratchRegister, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Patched to load either true or false.
@@ -2492,7 +2509,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ bind(&end_of_patched_code);
ASSERT(true);
#endif
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// The inlined call site cache did not match. Check for null and string
// before calling the deferred code.
@@ -2542,14 +2559,14 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Move result to a register that survives the end of the
// PushSafepointRegisterScope.
- __ movq(kScratchRegister, rax);
+ __ movp(kScratchRegister, rax);
}
__ testq(kScratchRegister, kScratchRegister);
Label load_false;
Label done;
- __ j(not_zero, &load_false);
+ __ j(not_zero, &load_false, Label::kNear);
__ LoadRoot(rax, Heap::kTrueValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&load_false);
__ LoadRoot(rax, Heap::kFalseValueRootIndex);
__ bind(&done);
@@ -2557,6 +2574,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -2576,9 +2594,12 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace && info()->IsOptimizing()) {
- // Preserve the return value on the stack and rely on the runtime
- // call to return the value in the same register.
+ // Preserve the return value on the stack and rely on the runtime call
+ // to return the value in the same register. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
__ push(rax);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
@@ -2586,7 +2607,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
- __ movq(rsp, rbp);
+ __ movp(rsp, rbp);
__ pop(rbp);
no_frame_start = masm_->pc_offset();
}
@@ -2620,14 +2641,14 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->global_object()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rcx, instr->name());
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
- RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2643,36 +2664,24 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// We have a temp because CompareRoot might clobber kScratchRegister.
Register cell = ToRegister(instr->temp());
ASSERT(!value.is(cell));
- __ movq(cell, cell_handle, RelocInfo::CELL);
+ __ Move(cell, cell_handle, RelocInfo::CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
// Store the value.
- __ movq(Operand(cell, 0), value);
+ __ movp(Operand(cell, 0), value);
} else {
// Store the value.
- __ movq(kScratchRegister, cell_handle, RelocInfo::CELL);
- __ movq(Operand(kScratchRegister, 0), value);
+ __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
+ __ movp(Operand(kScratchRegister, 0), value);
}
// Cells are always rescanned, so no write barrier here.
}
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(rdx));
- ASSERT(ToRegister(instr->value()).is(rax));
-
- __ Move(rcx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ movq(result, ContextOperand(context, instr->slot_index()));
+ __ movp(result, ContextOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
@@ -2702,7 +2711,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ j(not_equal, &skip_assignment);
}
}
- __ movq(target, value);
+ __ movp(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
@@ -2740,8 +2749,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register object = ToRegister(instr->object());
- if (FLAG_track_double_fields &&
- instr->hydrogen()->representation().IsDouble()) {
+ if (instr->hydrogen()->representation().IsDouble()) {
XMMRegister result = ToDoubleRegister(instr->result());
__ movsd(result, FieldOperand(object, offset));
return;
@@ -2749,19 +2757,36 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register result = ToRegister(instr->result());
if (!access.IsInobject()) {
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
object = result;
}
- __ Load(result, FieldOperand(object, offset), access.representation());
+
+ Representation representation = access.representation();
+ if (representation.IsSmi() &&
+ instr->hydrogen()->representation().IsInteger32()) {
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(object, offset), representation);
+ __ AssertSmi(scratch);
+#endif
+
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
+ representation = Representation::Integer32();
+ }
+ __ Load(result, FieldOperand(object, offset), representation);
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->object()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rcx, instr->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2781,7 +2806,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
__ j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
- __ movq(result,
+ __ movp(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
@@ -2794,13 +2819,13 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
__ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
- __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
__ jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in the function's map.
__ bind(&non_instance);
- __ movq(result, FieldOperand(result, Map::kConstructorOffset));
+ __ movp(result, FieldOperand(result, Map::kConstructorOffset));
// All done.
__ bind(&done);
@@ -2813,15 +2838,6 @@ void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
}
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ movq(result, FieldOperand(input,
- ExternalPixelArray::kExternalPointerOffset));
-}
-
-
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
@@ -2832,7 +2848,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
StackArgumentsAccessor args(arguments, const_length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(result, args.GetArgumentOperand(const_index));
+ __ movp(result, args.GetArgumentOperand(const_index));
} else {
Register length = ToRegister(instr->length());
// There are two words between the frame pointer and the last argument.
@@ -2844,7 +2860,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
StackArgumentsAccessor args(arguments, length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(result, args.GetArgumentOperand(0));
+ __ movp(result, args.GetArgumentOperand(0));
}
}
@@ -2865,47 +2881,61 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ movsxlq(key_reg, key_reg);
}
}
+ int base_offset = instr->is_fixed_typed_array()
+ ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
+ : 0;
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
elements_kind,
- 0,
+ base_offset,
instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
__ cvtss2sd(result, result);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
__ movsd(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
__ movsxbq(result, operand);
break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ movzxbq(result, operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
__ movsxwq(result, operand);
break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ movzxwq(result, operand);
break;
- case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
__ movsxlq(result, operand);
break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ testl(result, result);
DeoptimizeIf(negative, instr->environment());
}
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -2961,6 +2991,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ HLoadKeyed* hinstr = instr->hydrogen();
Register result = ToRegister(instr->result());
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
@@ -2970,24 +3001,48 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// gets replaced during bound check elimination with the index
// argument to the bounds check, which can be tagged, so that
// case must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
+ if (hinstr->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
}
- // Load the result.
- __ movq(result,
+ bool requires_hole_check = hinstr->RequiresHoleCheck();
+ int offset = FixedArray::kHeaderSize - kHeapObjectTag;
+ Representation representation = hinstr->representation();
+
+ if (representation.IsInteger32() &&
+ hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+ ASSERT(!requires_hole_check);
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ offset,
+ instr->additional_index()),
+ Representation::Smi());
+ __ AssertSmi(scratch);
+#endif
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
+ }
+
+ __ Load(result,
BuildFastArrayOperand(instr->elements(),
key,
FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
+ offset,
+ instr->additional_index()),
+ representation);
// Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ if (requires_hole_check) {
+ if (IsFastSmiElementsKind(hinstr->elements_kind())) {
Condition smi = __ CheckSmi(result);
DeoptimizeIf(NegateCondition(smi), instr->environment());
} else {
@@ -2999,7 +3054,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3036,6 +3091,7 @@ Operand LCodeGen::BuildFastArrayOperand(
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rax));
@@ -3052,18 +3108,18 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
} else {
// Check for arguments adapter frame.
Label done, adapted;
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adapted, Label::kNear);
// No arguments adaptor frame.
- __ movq(result, rbp);
+ __ movp(result, rbp);
__ jmp(&done, Label::kNear);
// Arguments adaptor frame present.
__ bind(&adapted);
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
@@ -3087,7 +3143,7 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
__ j(equal, &done, Label::kNear);
// Arguments adaptor frame present. Get argument length from there.
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ SmiToInteger32(result,
Operand(result,
ArgumentsAdaptorFrameConstants::kLengthOffset));
@@ -3105,21 +3161,24 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok;
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- // Do not transform the receiver to object for strict mode
- // functions.
- __ movq(kScratchRegister,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &receiver_ok, Label::kNear);
-
- // Do not transform the receiver to object for builtins.
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &receiver_ok, Label::kNear);
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ movp(kScratchRegister,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &receiver_ok, dist);
+
+ // Do not transform the receiver to object for builtins.
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &receiver_ok, dist);
+ }
// Normal function. Replace undefined or null with global receiver.
__ CompareRoot(receiver, Heap::kNullValueRootIndex);
@@ -3132,15 +3191,16 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
DeoptimizeIf(is_smi, instr->environment());
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok, Label::kNear);
+ __ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
- // TODO(kmillikin): We have a hydrogen value for the global object. See
- // if it's better to use it than to explicitly fetch it from the context
- // here.
- __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
+ __ movp(receiver,
+ Operand(receiver,
+ Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(receiver,
+ FieldOperand(receiver, GlobalObject::kGlobalReceiverOffset));
+
__ bind(&receiver_ok);
}
@@ -3161,7 +3221,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
DeoptimizeIf(above, instr->environment());
__ push(receiver);
- __ movq(receiver, length);
+ __ movp(receiver, length);
// Loop through the arguments pushing them onto the execution
// stack.
@@ -3183,9 +3243,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(rax);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
}
@@ -3202,25 +3260,23 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, rsi);
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ movq(result,
- Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ if (info()->IsOptimizing()) {
+ __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in rsi.
+ ASSERT(result.is(rsi));
+ }
}
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
__ push(rsi); // The context is the first argument.
__ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
@@ -3228,24 +3284,10 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
}
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register result = ToRegister(instr->result());
- __ movq(result, GlobalObjectOperand());
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
RDIState rdi_state) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -3260,7 +3302,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Change context.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Set rax to arguments count if adaption is not needed. Assumes that rax
// is available to write to at this point.
@@ -3269,11 +3311,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function.
- __ SetCallKind(rcx, call_kind);
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
- __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
// Set up deoptimization.
@@ -3284,23 +3325,63 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(
- function, expected, count, CALL_FUNCTION, generator, call_kind);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
}
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ ASSERT(ToRegister(instr->result()).is(rax));
- // Restore context.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(target);
+ }
+ generator.AfterCall();
}
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
- CallKnownFunction(instr->hydrogen()->function(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- RDI_UNINITIALIZED);
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ Set(rax, instr->arity());
+ }
+
+ // Change context.
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ bool is_self_call = false;
+ if (instr->hydrogen()->function()->IsConstant()) {
+ Handle<JSFunction> jsfun = Handle<JSFunction>::null();
+ HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+ jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
+ is_self_call = jsfun.is_identical_to(info()->closure());
+ }
+
+ if (is_self_call) {
+ __ CallSelf();
+ } else {
+ Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
+ generator.BeforeCall(__ CallSize(target));
+ __ Call(target);
+ }
+ generator.AfterCall();
}
@@ -3330,17 +3411,18 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateHeapNumber, 0, instr, instr->context());
// Set the pointer to the new heap number in tmp.
- if (!tmp.is(rax)) __ movq(tmp, rax);
+ if (!tmp.is(rax)) __ movp(tmp, rax);
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
__ bind(&allocated);
- __ MoveDouble(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ shl(tmp2, Immediate(1));
__ shr(tmp2, Immediate(1));
- __ MoveDouble(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
+ __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
@@ -3441,7 +3523,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ testq(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr->environment());
__ Set(output_reg, 0);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&positive_sign);
}
@@ -3475,10 +3557,11 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
Label done, round_to_zero, below_one_half, do_not_compensate, restore;
- __ movq(kScratchRegister, one_half, RelocInfo::NONE64);
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+ __ movq(kScratchRegister, one_half);
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_one_half);
+ __ j(above, &below_one_half, Label::kNear);
// CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
__ addsd(xmm_scratch, input_reg);
@@ -3487,13 +3570,13 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cmpl(output_reg, Immediate(0x80000000));
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ jmp(&done);
+ __ jmp(&done, dist);
__ bind(&below_one_half);
- __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64);
+ __ movq(kScratchRegister, minus_one_half);
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &round_to_zero);
+ __ j(below_equal, &round_to_zero, Label::kNear);
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
// compare and compensate.
@@ -3512,7 +3595,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// No overflow because we already ruled out minint.
__ bind(&restore);
__ movq(input_reg, kScratchRegister); // Restore input_reg.
- __ jmp(&done);
+ __ jmp(&done, dist);
__ bind(&round_to_zero);
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
@@ -3546,7 +3629,7 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
Label done, sqrt;
// Check base for -Infinity. According to IEEE-754, double-precision
// -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
- __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
+ __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
// Comparing -Infinity with NaN results in "unordered", which sets the
@@ -3585,7 +3668,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(exponent, &no_deopt);
+ __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr->environment());
__ bind(&no_deopt);
@@ -3602,67 +3685,6 @@ void LCodeGen::DoPower(LPower* instr) {
}
-void LCodeGen::DoRandom(LRandom* instr) {
- // Assert that register size is twice the size of each seed.
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
-
- // Load native context
- Register global_object = ToRegister(instr->global_object());
- Register native_context = global_object;
- __ movq(native_context, FieldOperand(
- global_object, GlobalObject::kNativeContextOffset));
-
- // Load state (FixedArray of the native context's random seeds)
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- Register state = native_context;
- __ movq(state, FieldOperand(native_context, kRandomSeedOffset));
-
- // Load state[0].
- Register state0 = ToRegister(instr->scratch());
- __ movl(state0, FieldOperand(state, ByteArray::kHeaderSize));
- // Load state[1].
- Register state1 = ToRegister(instr->scratch2());
- __ movl(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- Register scratch3 = ToRegister(instr->scratch3());
- __ movzxwl(scratch3, state0);
- __ imull(scratch3, scratch3, Immediate(18273));
- __ shrl(state0, Immediate(16));
- __ addl(state0, scratch3);
- // Save state[0].
- __ movl(FieldOperand(state, ByteArray::kHeaderSize), state0);
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzxwl(scratch3, state1);
- __ imull(scratch3, scratch3, Immediate(36969));
- __ shrl(state1, Immediate(16));
- __ addl(state1, scratch3);
- // Save state[1].
- __ movl(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- Register random = state0;
- __ shll(random, Immediate(14));
- __ andl(state1, Immediate(0x3FFFF));
- __ addl(random, state1);
-
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- XMMRegister result = ToDoubleRegister(instr->result());
- XMMRegister scratch4 = double_scratch0();
- __ movq(scratch3, V8_INT64_C(0x4130000000000000),
- RelocInfo::NONE64); // 1.0 x 2^20 as double
- __ movq(scratch4, scratch3);
- __ movd(result, random);
- __ xorps(result, scratch4);
- __ subsd(result, scratch4);
-}
-
-
void LCodeGen::DoMathExp(LMathExp* instr) {
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
@@ -3682,7 +3704,7 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
- __ j(equal, &zero, Label::kNear);
+ __ j(not_carry, &zero, Label::kNear);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
Operand nan_operand = masm()->ExternalOperand(nan);
@@ -3707,31 +3729,8 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
}
-void LCodeGen::DoMathTan(LMathTan* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->function()).is(rdi));
ASSERT(instr->HasPointerMap());
@@ -3740,79 +3739,30 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
- CALL_AS_METHOD,
RDI_CONTAINS_TARGET);
}
}
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->key()).is(rcx));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ Move(rcx, instr->name());
- CallCode(ic, mode, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->function()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+ CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ Move(rcx, instr->name());
- CallCode(ic, mode, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- CallKnownFunction(instr->hydrogen()->target(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- RDI_UNINITIALIZED);
}
void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->constructor()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
@@ -3826,20 +3776,20 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->constructor()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
__ Set(rax, instr->arity());
- __ Move(rbx, instr->hydrogen()->property_cell());
+ __ Move(rbx, factory()->undefined_value());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
- ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
+ ArrayNoArgumentConstructorStub stub(kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
@@ -3847,29 +3797,29 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
Label packed_case;
// We might need a change here
// look at the first argument
- __ movq(rcx, Operand(rsp, 0));
+ __ movp(rcx, Operand(rsp, 0));
__ testq(rcx, rcx);
- __ j(zero, &packed_case);
+ __ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
- override_mode);
+ ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
+ ArraySingleArgumentConstructorStub stub(kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
+ ArrayNArgumentsConstructorStub stub(kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
}
@@ -3878,29 +3828,35 @@ void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
__ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
- __ movq(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+ __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
}
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
- __ lea(result, Operand(base, instr->offset()));
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ lea(result, Operand(base, ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ lea(result, Operand(base, offset, times_1, 0));
+ }
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ HStoreNamedField* hinstr = instr->hydrogen();
Representation representation = instr->representation();
- HObjectAccess access = instr->hydrogen()->access();
+ HObjectAccess access = hinstr->access();
int offset = access.offset();
if (access.IsExternalMemory()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ ASSERT(!hinstr->NeedsWriteBarrier());
Register value = ToRegister(instr->value());
if (instr->object()->IsConstantOperand()) {
ASSERT(value.is(rax));
- ASSERT(!access.representation().IsSpecialization());
LConstantOperand* object = LConstantOperand::cast(instr->object());
__ store_rax(ToExternalReference(object));
} else {
@@ -3912,11 +3868,14 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed = hinstr->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (FLAG_track_fields && representation.IsSmi()) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (!IsSmiConstant(operand_value)) {
+ if (!IsInteger32Constant(operand_value) &&
+ !IsSmiConstant(operand_value)) {
DeoptimizeIf(no_condition, instr->environment());
}
}
@@ -3927,28 +3886,31 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
DeoptimizeIf(no_condition, instr->environment());
}
} else {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ if (!hinstr->value()->type().IsHeapObject()) {
Register value = ToRegister(instr->value());
Condition cc = masm()->CheckSmi(value);
DeoptimizeIf(cc, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
}
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
ASSERT(transition.is_null());
ASSERT(access.IsInobject());
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ ASSERT(!hinstr->NeedsWriteBarrier());
XMMRegister value = ToDoubleRegister(instr->value());
__ movsd(FieldOperand(object, offset), value);
return;
}
if (!transition.is_null()) {
- if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ if (!hinstr->NeedsWriteBarrierForMap()) {
__ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
__ Move(kScratchRegister, transition);
- __ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
// Update the write barrier for the map field.
__ RecordWriteField(object,
HeapObject::kMapOffset,
@@ -3961,32 +3923,52 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
- __ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
}
- if (instr->value()->IsConstantOperand()) {
+ if (representation.IsSmi() &&
+ hinstr->value()->representation().IsInteger32()) {
+ ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(write_register, offset), representation);
+ __ AssertSmi(scratch);
+#endif
+ // Store int value directly to upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
+ representation = Representation::Integer32();
+ }
+
+ Operand operand = FieldOperand(write_register, offset);
+
+ if (instr->value()->IsRegister()) {
+ Register value = ToRegister(instr->value());
+ __ Store(operand, value, representation);
+ } else {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (operand_value->IsRegister()) {
- Register value = ToRegister(operand_value);
- __ Store(FieldOperand(write_register, offset), value, representation);
+ if (IsInteger32Constant(operand_value)) {
+ ASSERT(!hinstr->NeedsWriteBarrier());
+ int32_t value = ToInteger32(operand_value);
+ if (representation.IsSmi()) {
+ __ Move(operand, Smi::FromInt(value));
+
+ } else {
+ __ movl(operand, Immediate(value));
+ }
+
} else {
Handle<Object> handle_value = ToHandle(operand_value);
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- __ Move(FieldOperand(write_register, offset), handle_value);
+ ASSERT(!hinstr->NeedsWriteBarrier());
+ __ Move(operand, handle_value);
}
- } else {
- Register value = ToRegister(instr->value());
- __ Store(FieldOperand(write_register, offset), value, representation);
}
- if (instr->hydrogen()->NeedsWriteBarrier()) {
+ if (hinstr->NeedsWriteBarrier()) {
Register value = ToRegister(instr->value());
Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
// Update the write barrier for the object for in-object properties.
@@ -4002,13 +3984,13 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(),
+ instr->strict_mode_flag());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4084,37 +4066,51 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ movsxlq(key_reg, key_reg);
}
}
+ int base_offset = instr->is_fixed_typed_array()
+ ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
+ : 0;
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
elements_kind,
- 0,
+ base_offset,
instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
__ cvtsd2ss(value, value);
__ movss(operand, value);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
__ movsd(operand, ToDoubleRegister(instr->value()));
} else {
Register value(ToRegister(instr->value()));
switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ movb(operand, value);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ movw(operand, value);
break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ movl(operand, value);
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -4151,7 +4147,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Label have_value;
__ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
+ __ j(parity_odd, &have_value, Label::kNear); // NaN.
__ Set(kScratchRegister, BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
@@ -4172,7 +4168,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register elements = ToRegister(instr->elements());
+ HStoreKeyed* hinstr = instr->hydrogen();
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
@@ -4181,38 +4177,67 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
// input gets replaced during bound check elimination with the index
// argument to the bounds check, which can be tagged, so that case
// must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
+ if (hinstr->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
}
+ int offset = FixedArray::kHeaderSize - kHeapObjectTag;
+ Representation representation = hinstr->value()->representation();
+
+ if (representation.IsInteger32()) {
+ ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ offset,
+ instr->additional_index()),
+ Representation::Smi());
+ __ AssertSmi(scratch);
+#endif
+ // Store int value directly to upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
+ }
+
Operand operand =
BuildFastArrayOperand(instr->elements(),
key,
FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
+ offset,
instr->additional_index());
+
if (instr->value()->IsRegister()) {
- __ movq(operand, ToRegister(instr->value()));
+ __ Store(operand, ToRegister(instr->value()), representation);
} else {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (IsInteger32Constant(operand_value)) {
- Smi* smi_value = Smi::FromInt(ToInteger32(operand_value));
- __ Move(operand, smi_value);
+ int32_t value = ToInteger32(operand_value);
+ if (representation.IsSmi()) {
+ __ Move(operand, Smi::FromInt(value));
+
+ } else {
+ __ movl(operand, Immediate(value));
+ }
} else {
Handle<Object> handle_value = ToHandle(operand_value);
__ Move(operand, handle_value);
}
}
- if (instr->hydrogen()->NeedsWriteBarrier()) {
+ if (hinstr->NeedsWriteBarrier()) {
+ Register elements = ToRegister(instr->elements());
ASSERT(instr->value()->IsRegister());
Register value = ToRegister(instr->value());
- ASSERT(!instr->key()->IsConstantOperand());
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ ASSERT(!key->IsConstantOperand());
+ SmiCheck check_needed = hinstr->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
Register key_reg(ToRegister(key));
@@ -4228,7 +4253,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -4239,6 +4264,7 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax));
@@ -4263,16 +4289,17 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ j(not_equal, &not_applicable);
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
Register new_map_reg = ToRegister(instr->new_map_temp());
- __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+ __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
+ __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
ASSERT_NE(instr->temp(), NULL);
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ToRegister(instr->temp()), kDontSaveFPRegs);
} else {
+ ASSERT(ToRegister(instr->context()).is(rsi));
PushSafepointRegistersScope scope(this);
if (!object_reg.is(rax)) {
- __ movq(rax, object_reg);
+ __ movp(rax, object_reg);
}
__ Move(rbx, to_map);
TransitionElementsKindStub stub(from_kind, to_kind);
@@ -4295,9 +4322,11 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- EmitPushTaggedOperand(instr->left());
- EmitPushTaggedOperand(instr->right());
- StringAddStub stub(instr->hydrogen()->flags());
+ ASSERT(ToRegister(instr->context()).is(rsi));
+ ASSERT(ToRegister(instr->left()).is(rdx));
+ ASSERT(ToRegister(instr->right()).is(rax));
+ StringAddStub stub(instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -4349,7 +4378,8 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ Integer32ToSmi(index, index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+ CallRuntimeFromDeferred(
+ Runtime::kStringCharCodeAt, 2, instr, instr->context());
__ AssertSmi(rax);
__ SmiToInteger32(rax, rax);
__ StoreToSafepointRegisterSlot(result, rax);
@@ -4381,7 +4411,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
__ j(above, deferred->entry());
__ movsxlq(char_code, char_code);
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ movq(result, FieldOperand(result,
+ __ movp(result, FieldOperand(result,
char_code, times_pointer_size,
FixedArray::kHeaderSize));
__ CompareRoot(result, Heap::kUndefinedValueRootIndex);
@@ -4402,7 +4432,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ Integer32ToSmi(char_code, char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -4522,8 +4552,17 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
// integer value.
__ StoreToSafepointRegisterSlot(reg, Immediate(0));
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- if (!reg.is(rax)) __ movq(reg, rax);
+ // NumberTagU uses the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+
+ if (!reg.is(rax)) __ movp(reg, rax);
// Done. Put the value in temp_xmm into the value of the allocated heap
// number.
@@ -4570,11 +4609,18 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
{
PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- // Ensure that value in rax survives popping registers.
- __ movq(kScratchRegister, rax);
+ // NumberTagD uses the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ movp(kScratchRegister, rax);
}
- __ movq(reg, kScratchRegister);
+ __ movp(reg, kScratchRegister);
}
@@ -4620,7 +4666,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
if (can_convert_undefined_to_nan) {
- __ j(not_equal, &convert);
+ __ j(not_equal, &convert, Label::kNear);
} else {
DeoptimizeIf(not_equal, env);
}
@@ -4823,7 +4869,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->value());
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
if (instr->hydrogen()->is_interval_check()) {
InstanceType first;
@@ -4877,7 +4923,11 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
PushSafepointRegistersScope scope(this);
__ push(object);
- CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
+ __ Set(rsi, 0);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+
__ testq(rax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr->environment());
@@ -4918,12 +4968,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Label success;
for (int i = 0; i < map_set.size() - 1; i++) {
Handle<Map> map = map_set.at(i).handle();
- __ CompareMap(reg, map, &success);
- __ j(equal, &success);
+ __ CompareMap(reg, map);
+ __ j(equal, &success, Label::kNear);
}
Handle<Map> map = map_set.at(map_set.size() - 1).handle();
- __ CompareMap(reg, map, &success);
+ __ CompareMap(reg, map);
if (instr->hydrogen()->has_migration_target()) {
__ j(not_equal, deferred->entry());
} else {
@@ -4955,8 +5005,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
-
- __ JumpIfSmi(input_reg, &is_smi);
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+ __ JumpIfSmi(input_reg, &is_smi, dist);
// Check for heap number
__ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -4967,7 +5017,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
- __ movq(input_reg, Immediate(0));
+ __ movp(input_reg, Immediate(0));
__ jmp(&done, Label::kNear);
// Heap number
@@ -5066,16 +5116,21 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(size));
}
+ int flags = 0;
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
} else {
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
+ __ Push(Smi::FromInt(flags));
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -5088,6 +5143,7 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
Label materialized;
// Registers will be used as follows:
// rcx = literals array.
@@ -5096,7 +5152,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
__ Move(rcx, instr->hydrogen()->literals());
- __ movq(rbx, FieldOperand(rcx, literal_offset));
+ __ movp(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
@@ -5107,13 +5163,13 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ Push(instr->hydrogen()->pattern());
__ Push(instr->hydrogen()->flags());
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
__ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
+ __ jmp(&allocated, Label::kNear);
__ bind(&runtime_allocate);
__ push(rbx);
@@ -5125,19 +5181,20 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// Copy the content into the newly allocated memory.
// (Unroll copy loop once for better throughput).
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
+ __ movp(rdx, FieldOperand(rbx, i));
+ __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
+ __ movp(FieldOperand(rax, i), rdx);
+ __ movp(FieldOperand(rax, i + kPointerSize), rcx);
}
if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
+ __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
+ __ movp(FieldOperand(rax, size - kPointerSize), rdx);
}
}
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
@@ -5157,6 +5214,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
LOperand* input = instr->value();
EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
@@ -5177,44 +5235,49 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
-
- Condition final_branch_condition =
- EmitTypeofIs(instr->TrueLabel(chunk_),
- instr->FalseLabel(chunk_), input, instr->type_literal());
+ Condition final_branch_condition = EmitTypeofIs(instr, input);
if (final_branch_condition != no_condition) {
EmitBranch(instr, final_branch_condition);
}
}
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
+Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Handle<String> type_name = instr->type_literal();
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+ int next_block = GetNextEmittedBlock();
+
+ Label::Distance true_distance = left_block == next_block ? Label::kNear
+ : Label::kFar;
+ Label::Distance false_distance = right_block == next_block ? Label::kNear
+ : Label::kFar;
Condition final_branch_condition = no_condition;
if (type_name->Equals(heap()->number_string())) {
- __ JumpIfSmi(input, true_label);
+ __ JumpIfSmi(input, true_label, true_distance);
__ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->string_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label);
+ __ j(above_equal, false_label, false_distance);
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
} else if (type_name->Equals(heap()->symbol_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
@@ -5224,39 +5287,39 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (type_name->Equals(heap()->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
- __ j(equal, true_label);
- __ JumpIfSmi(input, false_label);
+ __ j(equal, true_label, true_distance);
+ __ JumpIfSmi(input, false_label, false_distance);
// Check for undetectable objects => true.
- __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
} else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->object_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
if (!FLAG_harmony_typeof) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
}
__ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
- __ j(below, false_label);
+ __ j(below, false_label, false_distance);
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, false_label);
+ __ j(above, false_label, false_distance);
// Check for undetectable objects => false.
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
} else {
- __ jmp(false_label);
+ __ jmp(false_label, false_distance);
}
return final_branch_condition;
@@ -5273,14 +5336,14 @@ void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
void LCodeGen::EmitIsConstructCall(Register temp) {
// Get the frame pointer for the calling frame.
- __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker, Label::kNear);
- __ movq(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
@@ -5290,20 +5353,21 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
}
+ last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5338,7 +5402,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
ASSERT(instr->HasEnvironment());
@@ -5369,11 +5433,13 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
Label done;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &done, Label::kNear);
+
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(rsi));
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5385,7 +5451,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(below, deferred_stack_check->entry());
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5412,6 +5477,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(equal, instr->environment());
@@ -5430,7 +5496,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
@@ -5451,14 +5517,14 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Label load_cache, done;
__ EnumLength(result, map);
__ Cmp(result, Smi::FromInt(0));
- __ j(not_equal, &load_cache);
+ __ j(not_equal, &load_cache, Label::kNear);
__ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
- __ movq(result,
+ __ movp(result,
FieldOperand(result, DescriptorArray::kEnumCacheOffset));
- __ movq(result,
+ __ movp(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
@@ -5481,18 +5547,18 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
Label out_of_object, done;
__ SmiToInteger32(index, index);
__ cmpl(index, Immediate(0));
- __ j(less, &out_of_object);
- __ movq(object, FieldOperand(object,
+ __ j(less, &out_of_object, Label::kNear);
+ __ movp(object, FieldOperand(object,
index,
times_pointer_size,
JSObject::kHeaderSize));
__ jmp(&done, Label::kNear);
__ bind(&out_of_object);
- __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
__ negl(index);
// Index is now equal to out of object property index plus 1.
- __ movq(object, FieldOperand(object,
+ __ movp(object, FieldOperand(object,
index,
times_pointer_size,
FixedArray::kHeaderSize - kPointerSize));
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index c387d81ac4..0f1a9cdb70 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -197,7 +197,10 @@ class LCodeGen: public LCodeGenBase {
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ LOperand* context);
+
+ void LoadContextFromDeferred(LOperand* context);
enum RDIState {
RDI_UNINITIALIZED,
@@ -210,7 +213,6 @@ class LCodeGen: public LCodeGenBase {
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
RDIState rdi_state);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
@@ -224,6 +226,10 @@ class LCodeGen: public LCodeGenBase {
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
+ bool DeoptEveryNTimes() {
+ return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
+ }
+
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
@@ -231,7 +237,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -246,6 +251,10 @@ class LCodeGen: public LCodeGenBase {
uint32_t offset,
uint32_t additional_index = 0);
+ Operand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
void EmitIntegerMathAbs(LMathAbs* instr);
void EmitSmiMathAbs(LMathAbs* instr);
@@ -263,6 +272,8 @@ class LCodeGen: public LCodeGenBase {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
@@ -278,10 +289,7 @@ class LCodeGen: public LCodeGenBase {
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
+ Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index 8d1c2a2835..5b4e32d2c4 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -172,23 +172,23 @@ void LGapResolver::EmitMove(int index) {
Register src = cgen_->ToRegister(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
+ __ movp(dst, src);
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movq(dst, src);
+ __ movp(dst, src);
}
} else if (source->IsStackSlot()) {
Operand src = cgen_->ToOperand(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
+ __ movp(dst, src);
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movq(kScratchRegister, src);
- __ movq(dst, kScratchRegister);
+ __ movp(kScratchRegister, src);
+ __ movp(dst, kScratchRegister);
}
} else if (source->IsConstantOperand()) {
@@ -198,7 +198,7 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsSmiConstant(constant_source)) {
__ Move(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ __ Set(dst, cgen_->ToInteger32(constant_source));
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
@@ -209,7 +209,7 @@ void LGapResolver::EmitMove(int index) {
if (int_val == 0) {
__ xorps(dst, dst);
} else {
- __ movq(kScratchRegister, int_val, RelocInfo::NONE64);
+ __ Set(kScratchRegister, int_val);
__ movq(dst, kScratchRegister);
}
} else {
@@ -220,10 +220,10 @@ void LGapResolver::EmitMove(int index) {
} else if (cgen_->IsInteger32Constant(constant_source)) {
// Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
// value.
- __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ __ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
__ Move(kScratchRegister, cgen_->ToHandle(constant_source));
- __ movq(dst, kScratchRegister);
+ __ movp(dst, kScratchRegister);
}
}
@@ -271,9 +271,9 @@ void LGapResolver::EmitSwap(int index) {
cgen_->ToRegister(source->IsRegister() ? source : destination);
Operand mem =
cgen_->ToOperand(source->IsRegister() ? destination : source);
- __ movq(kScratchRegister, mem);
- __ movq(mem, reg);
- __ movq(reg, kScratchRegister);
+ __ movp(kScratchRegister, mem);
+ __ movp(mem, reg);
+ __ movp(reg, kScratchRegister);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
(source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
@@ -281,9 +281,9 @@ void LGapResolver::EmitSwap(int index) {
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
__ movsd(xmm0, src);
- __ movq(kScratchRegister, dst);
+ __ movp(kScratchRegister, dst);
__ movsd(dst, xmm0);
- __ movq(src, kScratchRegister);
+ __ movp(src, kScratchRegister);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// Swap two double registers.
@@ -305,7 +305,7 @@ void LGapResolver::EmitSwap(int index) {
Operand other_operand = cgen_->ToOperand(other);
__ movsd(xmm0, other_operand);
__ movsd(other_operand, reg);
- __ movsd(reg, xmm0);
+ __ movaps(reg, xmm0);
} else {
// No other combinations are possible.
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 8f26a09e96..e342acbcb8 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -259,7 +259,7 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
+ hydrogen()->type_literal()->ToCString().get(),
true_block_id(), false_block_id());
}
@@ -275,11 +275,23 @@ void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
- stream->Add(" + %d", offset());
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
stream->Add("#%d / ", arity());
}
@@ -304,28 +316,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[rcx] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -383,7 +373,7 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -507,6 +497,13 @@ LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
}
+LOperand* LChunkBuilder::UseTempRegisterOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseTempRegister(value);
+}
+
+
LOperand* LChunkBuilder::Use(HValue* value) {
return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
}
@@ -568,8 +565,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
@@ -577,41 +573,36 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr,
+ LTemplateResultInstruction<1>* instr,
int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
Register reg) {
return Define(instr, ToUnallocated(reg));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr,
+ LTemplateResultInstruction<1>* instr,
XMMRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -781,10 +772,11 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* left_operand = UseFixed(left, rdx);
LOperand* right_operand = UseFixed(right, rax);
LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -865,10 +857,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (current->OperandCount() == 0) {
instr = DefineAsRegister(new(zone()) LDummy());
} else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
instr = DefineAsRegister(new(zone())
LDummyUse(UseAny(current->OperandAt(0))));
}
for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
LInstruction* dummy =
new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
dummy->set_hydrogen_value(current);
@@ -927,90 +921,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- int object_index = objects_to_materialize->length();
- for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- LOperand* op;
- HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- for (int i = object_index; i < objects_to_materialize->length(); ++i) {
- HValue* object_to_materialize = objects_to_materialize->at(i);
- int previously_materialized_object = -1;
- for (int prev = 0; prev < i; ++prev) {
- if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
- previously_materialized_object = prev;
- break;
- }
- }
- int length = object_to_materialize->OperandCount();
- bool is_arguments = object_to_materialize->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- continue;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
- }
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* value = object_to_materialize->OperandAt(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else {
- ASSERT(!value->IsPushArgument());
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
- }
-
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
- }
-
- return result;
-}
-
-
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -1042,6 +952,9 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return new(zone()) LCmpMapAndBranch(value);
@@ -1063,7 +976,8 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LOperand* left = UseFixed(instr->left(), rax);
LOperand* right = UseFixed(instr->right(), rdx);
- LInstanceOf* result = new(zone()) LInstanceOf(left, right);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1071,7 +985,8 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), rax),
+ new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->context(), rsi),
+ UseFixed(instr->left(), rax),
FixedTemp(rdi));
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1113,11 +1028,11 @@ LInstruction* LChunkBuilder::DoStoreCodeEntry(
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* inner_object) {
- LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
- LInnerAllocatedObject* result =
- new(zone()) LInnerAllocatedObject(base_object);
- return DefineAsRegister(result);
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
}
@@ -1129,48 +1044,55 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- // If there is a non-return use, the context must be allocated in a register.
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- return DefineAsRegister(new(zone()) LContext);
- }
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, rsi);
}
- return NULL;
+ return DefineAsRegister(new(zone()) LContext);
}
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
-}
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), rdi);
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- return DefineAsRegister(new(zone()) LGlobalObject);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
-}
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, rax), instr);
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* function = UseFixed(instr->function(), rdi);
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1181,9 +1103,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathRound: return DoMathRound(instr);
case kMathAbs: return DoMathAbs(instr);
case kMathLog: return DoMathLog(instr);
- case kMathSin: return DoMathSin(instr);
- case kMathCos: return DoMathCos(instr);
- case kMathTan: return DoMathTan(instr);
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
@@ -1209,8 +1128,9 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ LOperand* context = UseAny(instr->context());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(input);
+ LMathAbs* result = new(zone()) LMathAbs(context, input);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
@@ -1219,29 +1139,7 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathLog* result = new(zone()) LMathLog(input);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathSin* result = new(zone()) LMathSin(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathCos* result = new(zone()) LMathCos(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathTan* result = new(zone()) LMathTan(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
}
@@ -1270,52 +1168,34 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* key = UseFixed(instr->key(), rcx);
- LCallKeyed* result = new(zone()) LCallKeyed(key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* constructor = UseFixed(instr->constructor(), rdi);
- LCallNew* result = new(zone()) LCallNew(constructor);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* constructor = UseFixed(instr->constructor(), rdi);
- LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* function = UseFixed(instr->function(), rdi);
- LCallFunction* result = new(zone()) LCallFunction(function);
- return MarkAsCall(DefineFixed(result, rax), instr);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, rax), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, rax), instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LCallRuntime* result = new(zone()) LCallRuntime(context);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1358,12 +1238,12 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
+ if (instr->RightIsPowerOf2()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
+ LOperand* value = UseRegister(instr->left());
LDivI* div =
new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ return AssignEnvironment(DefineAsRegister(div));
}
// The temporary operand is necessary to ensure that right is not allocated
// into rdx.
@@ -1380,25 +1260,6 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
- }
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
HValue* right = instr->right();
if (!right->IsConstant()) {
@@ -1441,7 +1302,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(left->representation().Equals(instr->representation()));
ASSERT(right->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
+ if (instr->RightIsPowerOf2()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
UseOrConstant(right),
@@ -1451,11 +1312,6 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegisterAtStart(right),
- NULL);
- return AssignEnvironment(DefineSameAsFirst(mod));
} else {
// The temporary operand is necessary to ensure that right is not
// allocated into edx.
@@ -1544,6 +1400,21 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
result = AssignEnvironment(result);
}
return result;
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ bool use_lea = LAddI::UseLea(instr);
+ LOperand* left = UseRegisterAtStart(instr->left());
+ HValue* right_candidate = instr->right();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
@@ -1588,25 +1459,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseTempRegister(instr->global_object());
- LOperand* scratch = TempRegister();
- LOperand* scratch2 = TempRegister();
- LOperand* scratch3 = TempRegister();
- LRandom* result = new(zone()) LRandom(
- global_object, scratch, scratch2, scratch3);
- return DefineFixedDouble(result, xmm1);
-}
-
-
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* left = UseFixed(instr->left(), rdx);
LOperand* right = UseFixed(instr->right(), rax);
- LCmpT* result = new(zone()) LCmpT(left, right);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1655,6 +1514,15 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
}
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ return new(zone()) LCompareMinusZeroAndBranch(value);
+}
+
+
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
@@ -1689,10 +1557,11 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* left = UseFixed(instr->left(), rdx);
LOperand* right = UseFixed(instr->right(), rax);
LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
+ new(zone()) LStringCompareAndBranch(context, left, right);
return MarkAsCall(result, instr);
}
@@ -1738,19 +1607,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object);
- return DefineSameAsFirst(result);
-}
-
-
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), rax);
LDateField* result = new(zone()) LDateField(object, instr->index());
@@ -1758,14 +1614,28 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- ASSERT(rcx.is_byte_register());
- LOperand* value = UseFixed(instr->value(), rcx);
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineSameAsFirst(result);
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = FLAG_debug_code
+ ? UseRegisterAtStart(instr->value())
+ : UseRegisterOrConstantAtStart(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), rsi) : NULL;
+ LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
+ index, value);
+ if (FLAG_debug_code) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -1790,12 +1660,6 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
}
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* value = UseFixed(instr->value(), rax);
- return MarkAsCall(new(zone()) LThrow(value), instr);
-}
-
-
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
@@ -1976,9 +1840,10 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub() ? UseFixed(instr->context(), rsi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), rax),
- parameter_count);
+ return new(zone()) LReturn(
+ UseFixed(instr->value(), rax), context, parameter_count);
}
@@ -2011,8 +1876,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* global_object = UseFixed(instr->global_object(), rax);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -2027,15 +1894,6 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), rdx);
- LOperand* value = UseFixed(instr->value(), rax);
- LStoreGlobalGeneric* result = new(zone()) LStoreGlobalGeneric(global_object,
- value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2048,12 +1906,11 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LOperand* context;
LOperand* value;
LOperand* temp;
+ context = UseRegister(instr->context());
if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
temp = TempRegister();
} else {
- context = UseRegister(instr->context());
value = UseRegister(instr->value());
temp = NULL;
}
@@ -2080,8 +1937,9 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* object = UseFixed(instr->object(), rax);
- LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(object);
+ LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -2098,37 +1956,29 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
result = new(zone()) LLoadKeyed(obj, key);
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
(instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
+ (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(backing_store, key);
}
DefineAsRegister(result);
bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ (elements_kind == EXTERNAL_UINT32_ELEMENTS) ||
+ (elements_kind == UINT32_ELEMENTS);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
return can_deoptimize ? AssignEnvironment(result) : result;
@@ -2136,10 +1986,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* object = UseFixed(instr->object(), rdx);
LOperand* key = UseFixed(instr->key(), rax);
- LLoadKeyedGeneric* result = new(zone()) LLoadKeyedGeneric(object, key);
+ LLoadKeyedGeneric* result =
+ new(zone()) LLoadKeyedGeneric(context, object, key);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -2147,24 +1999,27 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
LOperand* key = NULL;
LOperand* val = NULL;
- if (instr->value()->representation().IsDouble()) {
+ Representation value_representation = instr->value()->representation();
+ if (value_representation.IsDouble()) {
object = UseRegisterAtStart(instr->elements());
val = UseTempRegister(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
- ASSERT(instr->value()->representation().IsSmiOrTagged());
- object = UseTempRegister(instr->elements());
+ ASSERT(value_representation.IsSmiOrTagged() ||
+ value_representation.IsInteger32());
if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
val = UseTempRegister(instr->value());
key = UseTempRegister(instr->key());
} else {
+ object = UseRegisterAtStart(instr->elements());
val = UseRegisterOrConstantAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
}
@@ -2174,25 +2029,28 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ (instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS;
LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
: UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
+ LOperand* backing_store = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(backing_store, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* object = UseFixed(instr->object(), rdx);
LOperand* key = UseFixed(instr->key(), rcx);
LOperand* value = UseFixed(instr->value(), rax);
@@ -2202,7 +2060,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
ASSERT(instr->value()->representation().IsTagged());
LStoreKeyedGeneric* result =
- new(zone()) LStoreKeyedGeneric(object, key, value);
+ new(zone()) LStoreKeyedGeneric(context, object, key, value);
return MarkAsCall(result, instr);
}
@@ -2214,12 +2072,13 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
+ LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(
+ object, NULL, new_map_reg, temp_reg);
return result;
} else {
+ LOperand* context = UseFixed(instr->context(), rsi);
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL, NULL);
+ new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
return AssignPointerMap(result);
}
}
@@ -2271,7 +2130,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
} else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
- val = UseTempRegister(instr->value());
+ val = UseRegister(instr->value());
} else if (FLAG_track_double_fields &&
instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
@@ -2296,55 +2155,67 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* object = UseFixed(instr->object(), rdx);
LOperand* value = UseFixed(instr->value(), rax);
- LStoreNamedGeneric* result = new(zone()) LStoreNamedGeneric(object, value);
+ LStoreNamedGeneric* result =
+ new(zone()) LStoreNamedGeneric(context, object, value);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* left = UseOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), rax),
- instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LStringAdd(context, left, right), rax), instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp = TempRegister();
- LAllocate* result = new(zone()) LAllocate(size, temp);
+ LAllocate* result = new(zone()) LAllocate(context, size, temp);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, rax), instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LRegExpLiteral* result = new(zone()) LRegExpLiteral(context);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, rax), instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LFunctionLiteral* result = new(zone()) LFunctionLiteral(context);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -2366,7 +2237,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
int index = static_cast<int>(instr->index());
- Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2391,7 +2262,9 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, rax), instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LCallStub* result = new(zone()) LCallStub(context);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -2436,12 +2309,17 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseAtStart(instr->value()));
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LOperand* value = UseAtStart(instr->value());
+ LTypeof* result = new(zone()) LTypeof(context, value);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
@@ -2476,10 +2354,13 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
info()->MarkAsDeferredCalling();
if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
}
}
@@ -2491,8 +2372,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
+ instr->inlining_kind());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2524,8 +2404,9 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* object = UseFixed(instr->enumerable(), rax);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 1a889b3964..cfaed15077 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -52,12 +52,9 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallConstantFunction) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@@ -72,6 +69,7 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
+ V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -93,13 +91,10 @@ class LCodeGen;
V(Drop) \
V(DummyUse) \
V(Dummy) \
- V(ElementsKind) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@@ -118,7 +113,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadExternalArrayPointer) \
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
@@ -130,7 +124,6 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
- V(MathCos) \
V(MathExp) \
V(MathFloor) \
V(MathFloorOfDiv) \
@@ -138,9 +131,7 @@ class LCodeGen;
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
- V(MathSin) \
V(MathSqrt) \
- V(MathTan) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -148,13 +139,12 @@ class LCodeGen;
V(NumberTagU) \
V(NumberUntagD) \
V(OsrEntry) \
- V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
- V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringGetChar) \
V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
@@ -163,7 +153,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -175,7 +164,6 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
@@ -184,7 +172,6 @@ class LCodeGen;
V(Uint32ToDouble) \
V(Uint32ToSmi) \
V(UnknownOSRValue) \
- V(ValueOf) \
V(WrapReceiver)
@@ -302,10 +289,8 @@ class LInstruction : public ZoneObject {
// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LInstruction {
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
@@ -317,6 +302,15 @@ class LTemplateInstruction : public LInstruction {
protected:
EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
@@ -481,14 +475,16 @@ class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
};
@@ -556,6 +552,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
};
@@ -736,12 +733,14 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LMathAbs(LOperand* value) {
+ explicit LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
inputs_[0] = value;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
@@ -761,42 +760,6 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathTan(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
-};
-
-
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
@@ -865,6 +828,21 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCompareMinusZeroAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+
class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsObjectAndBranch(LOperand* value) {
@@ -930,15 +908,19 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
public:
- explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ explicit LStringCompareAndBranch(LOperand* context,
+ LOperand* left,
+ LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
@@ -1016,15 +998,17 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -1033,28 +1017,32 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
temps_[0] = temp;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@@ -1246,32 +1234,6 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LValueOf(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDateField(LOperand* date, Smi* index) : index_(index) {
@@ -1289,39 +1251,39 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
- inputs_[2] = value;
}
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
- private:
- String::Encoding encoding_;
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
};
@@ -1375,28 +1337,6 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
- public:
- LRandom(LOperand* global_object,
- LOperand* scratch,
- LOperand* scratch2,
- LOperand* scratch3) {
- inputs_[0] = global_object;
- temps_[0] = scratch;
- temps_[1] = scratch2;
- temps_[2] = scratch3;
- }
-
- LOperand* global_object() { return inputs_[0]; }
- LOperand* scratch() const { return temps_[0]; }
- LOperand* scratch2() const { return temps_[1]; }
- LOperand* scratch3() const { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1420,17 +1360,22 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
: op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
virtual Opcode opcode() const V8_OVERRIDE {
return LInstruction::kArithmeticT;
@@ -1443,14 +1388,18 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value, LOperand* parameter_count) {
+ explicit LReturn(LOperand* value,
+ LOperand* context,
+ LOperand* parameter_count) {
inputs_[0] = value;
- inputs_[1] = parameter_count;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[1]; }
bool has_constant_parameter_count() {
return parameter_count()->IsConstantOperand();
@@ -1459,7 +1408,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
ASSERT(has_constant_parameter_count());
return LConstantOperand::cast(parameter_count());
}
- LOperand* parameter_count() { return inputs_[1]; }
+ LOperand* parameter_count() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
DECLARE_HYDROGEN_ACCESSOR(Return)
@@ -1479,16 +1428,18 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
+ explicit LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
@@ -1515,20 +1466,6 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadExternalArrayPointer V8_FINAL
- : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
@@ -1542,6 +1479,12 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
bool is_external() const {
return hydrogen()->is_external();
}
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -1552,17 +1495,19 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
+ LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
};
@@ -1573,16 +1518,18 @@ class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
+ explicit LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool for_typeof() const { return hydrogen()->for_typeof(); }
};
@@ -1603,25 +1550,6 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
- }
-
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1702,19 +1630,19 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInnerAllocatedObject(LOperand* base_object) {
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
+ inputs_[1] = offset;
}
- LOperand* base_object() { return inputs_[0]; }
- int offset() { return hydrogen()->offset(); }
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
- DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@@ -1732,65 +1660,29 @@ class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LOuterContext(LOperand* context) {
+ explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-};
-
-
-class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
-
-class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> function() { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInvokeFunction(LOperand* function) {
+ explicit LCallJSFunction(LOperand* function) {
inputs_[0] = function;
}
LOperand* function() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -1798,79 +1690,81 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
}
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+ LOperand* target() const { return inputs_[0]; }
- LOperand* key() { return inputs_[0]; }
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+ ZoneList<LOperand*> inputs_;
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- Handle<String> name() const { return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- LOperand* function() { return inputs_[0]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- int arity() const { return hydrogen()->argument_count() - 1; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+ int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
@@ -1881,13 +1775,15 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNewArray(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
@@ -1898,8 +1794,14 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
@@ -2119,15 +2021,17 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2148,6 +2052,12 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@@ -2162,17 +2072,22 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* object,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ inputs_[3] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2183,17 +2098,20 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
+ LOperand* context,
LOperand* new_map_temp,
LOperand* temp) {
inputs_[0] = object;
+ inputs_[1] = context;
temps_[0] = new_map_temp;
temps_[1] = temp;
}
LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[1]; }
LOperand* new_map_temp() { return temps_[0]; }
LOperand* temp() { return temps_[1]; }
@@ -2228,43 +2146,49 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
};
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
}
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
}
- LOperand* char_code() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
@@ -2374,14 +2298,16 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LAllocate(LOperand* size, LOperand* temp) {
- inputs_[0] = size;
+ LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = size;
temps_[0] = temp;
}
- LOperand* size() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
@@ -2389,15 +2315,27 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
@@ -2416,13 +2354,15 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -2470,8 +2410,14 @@ class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2482,13 +2428,15 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
@@ -2549,18 +2497,17 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- argument_count_(0),
allocator_(allocator),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
@@ -2575,15 +2522,10 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
-
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathTan(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
@@ -2599,7 +2541,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2632,6 +2573,9 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in a register that may be trashed.
MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+ // An input operand in a register that may be trashed or a constant operand.
+ MUST_USE_RESULT LOperand* UseTempRegisterOrConstant(HValue* value);
+
// An input operand in a register or stack slot.
MUST_USE_RESULT LOperand* Use(HValue* value);
MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
@@ -2649,7 +2593,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2658,22 +2602,16 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg);
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ XMMRegister reg);
// Assigns an environment to an instruction. An instruction which can
// deoptimize must have an environment.
LInstruction* AssignEnvironment(LInstruction* instr);
@@ -2691,10 +2629,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
-
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
@@ -2707,12 +2641,10 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
- Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
- int argument_count_;
LAllocator* allocator_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index a18ff0d274..c0ae4e8d71 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -45,7 +45,6 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false),
root_array_available_(true) {
if (isolate() != NULL) {
@@ -80,7 +79,7 @@ Operand MacroAssembler::ExternalOperand(ExternalReference target,
return Operand(kRootRegister, static_cast<int32_t>(delta));
}
}
- movq(scratch, target);
+ Move(scratch, target);
return Operand(scratch, 0);
}
@@ -90,7 +89,7 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
intptr_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
- movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
@@ -98,8 +97,8 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
if (destination.is(rax)) {
load_rax(source);
} else {
- movq(kScratchRegister, source);
- movq(destination, Operand(kScratchRegister, 0));
+ Move(kScratchRegister, source);
+ movp(destination, Operand(kScratchRegister, 0));
}
}
@@ -109,7 +108,7 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
intptr_t delta = RootRegisterDelta(destination);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
- movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
+ movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
return;
}
}
@@ -117,8 +116,8 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
if (source.is(rax)) {
store_rax(destination);
} else {
- movq(kScratchRegister, destination);
- movq(Operand(kScratchRegister, 0), source);
+ Move(kScratchRegister, destination);
+ movp(Operand(kScratchRegister, 0), source);
}
}
@@ -134,7 +133,7 @@ void MacroAssembler::LoadAddress(Register destination,
}
}
// Safe code.
- movq(destination, source);
+ Move(destination, source);
}
@@ -155,7 +154,7 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
return size;
}
}
- // Size of movq(destination, src);
+ // Size of movp(destination, src);
return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
}
@@ -164,7 +163,7 @@ void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
if (is_int32(address) && !Serializer::enabled()) {
if (emit_debug_code()) {
- movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
}
push(Immediate(static_cast<int32_t>(address)));
return;
@@ -176,7 +175,7 @@ void MacroAssembler::PushAddress(ExternalReference source) {
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- movq(destination, Operand(kRootRegister,
+ movp(destination, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
@@ -185,7 +184,7 @@ void MacroAssembler::LoadRootIndexed(Register destination,
Register variable_offset,
int fixed_offset) {
ASSERT(root_array_available_);
- movq(destination,
+ movp(destination,
Operand(kRootRegister,
variable_offset, times_pointer_size,
(fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
@@ -194,7 +193,7 @@ void MacroAssembler::LoadRootIndexed(Register destination,
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
+ movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
source);
}
@@ -235,7 +234,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Load store buffer top.
LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Store pointer to buffer.
- movq(Operand(scratch, 0), addr);
+ movp(Operand(scratch, 0), addr);
// Increment buffer top.
addq(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
@@ -276,20 +275,21 @@ void MacroAssembler::InNewSpace(Register object,
// case the size of the new space is different between the snapshot maker
// and the running system.
if (scratch.is(object)) {
- movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
+ Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
and_(scratch, kScratchRegister);
} else {
- movq(scratch, ExternalReference::new_space_mask(isolate()));
+ Move(scratch, ExternalReference::new_space_mask(isolate()));
and_(scratch, object);
}
- movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
+ Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
cmpq(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
intptr_t new_space_start =
reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
+ Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
+ Assembler::RelocInfoNone());
if (scratch.is(object)) {
addq(scratch, kScratchRegister);
} else {
@@ -310,11 +310,6 @@ void MacroAssembler::RecordWriteField(
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!value.is(rsi) && !dst.is(rsi));
-
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -345,8 +340,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ Move(value, kZapValue, Assembler::RelocInfoNone());
+ Move(dst, kZapValue, Assembler::RelocInfoNone());
}
}
@@ -379,8 +374,8 @@ void MacroAssembler::RecordWriteArray(Register object,
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ Move(value, kZapValue, Assembler::RelocInfoNone());
+ Move(index, kZapValue, Assembler::RelocInfoNone());
}
}
@@ -391,11 +386,6 @@ void MacroAssembler::RecordWrite(Register object,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!value.is(rsi) && !address.is(rsi));
-
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
@@ -414,6 +404,10 @@ void MacroAssembler::RecordWrite(Register object,
bind(&ok);
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
@@ -445,8 +439,8 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ Move(address, kZapValue, Assembler::RelocInfoNone());
+ Move(value, kZapValue, Assembler::RelocInfoNone());
}
}
@@ -511,17 +505,8 @@ void MacroAssembler::NegativeZeroTest(Register result,
void MacroAssembler::Abort(BailoutReason reason) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -534,20 +519,17 @@ void MacroAssembler::Abort(BailoutReason reason) {
#endif
push(rax);
- movq(kScratchRegister, p0, RelocInfo::NONE64);
- push(kScratchRegister);
- movq(kScratchRegister,
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
- RelocInfo::NONE64);
+ Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
+ Assembler::RelocInfoNone());
push(kScratchRegister);
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Control will not return here.
int3();
@@ -561,8 +543,6 @@ void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ ||
- stub->CompilingCallsToThisStubIsGCSafe(isolate()));
Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -574,8 +554,7 @@ void MacroAssembler::StubReturn(int argc) {
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -680,7 +659,7 @@ void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
void MacroAssembler::CallApiFunctionAndReturn(
- Address function_address,
+ Register function_address,
Address thunk_address,
Register thunk_last_arg,
int stack_space,
@@ -706,13 +685,14 @@ void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address(isolate());
+ ASSERT(rdx.is(function_address) || r8.is(function_address));
// Allocate HandleScope in callee-save registers.
Register prev_next_address_reg = r14;
Register prev_limit_reg = rbx;
Register base_reg = r15;
- movq(base_reg, next_address);
- movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
- movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ Move(base_reg, next_address);
+ movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
+ movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
if (FLAG_log_timer_events) {
@@ -730,19 +710,18 @@ void MacroAssembler::CallApiFunctionAndReturn(
bool* is_profiling_flag =
isolate()->cpu_profiler()->is_profiling_address();
STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- movq(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
+ Move(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
cmpb(Operand(rax, 0), Immediate(0));
j(zero, &profiler_disabled);
// Third parameter is the address of the actual getter function.
- movq(thunk_last_arg, function_address, RelocInfo::EXTERNAL_REFERENCE);
- movq(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
+ Move(thunk_last_arg, function_address);
+ Move(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
jmp(&end_profiler_check);
bind(&profiler_disabled);
// Call the api function!
- movq(rax, reinterpret_cast<Address>(function_address),
- RelocInfo::EXTERNAL_REFERENCE);
+ Move(rax, function_address);
bind(&end_profiler_check);
@@ -759,19 +738,19 @@ void MacroAssembler::CallApiFunctionAndReturn(
}
// Load the value from ReturnValue
- movq(rax, return_value_operand);
+ movp(rax, return_value_operand);
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
subl(Operand(base_reg, kLevelOffset), Immediate(1));
- movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
+ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
// Check if the function scheduled an exception.
- movq(rsi, scheduled_exception_address);
+ Move(rsi, scheduled_exception_address);
Cmp(Operand(rsi, 0), factory->the_hole_value());
j(not_equal, &promote_scheduled_exception);
bind(&exception_handled);
@@ -783,7 +762,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
Register map = rcx;
JumpIfSmi(return_value, &ok, Label::kNear);
- movq(map, FieldOperand(return_value, HeapObject::kMapOffset));
+ movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
CmpInstanceType(map, FIRST_NONSTRING_TYPE);
j(below, &ok, Label::kNear);
@@ -813,7 +792,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
- movq(rsi, *context_restore_operand);
+ movp(rsi, *context_restore_operand);
}
LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
@@ -827,13 +806,13 @@ void MacroAssembler::CallApiFunctionAndReturn(
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
- movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
- movq(prev_limit_reg, rax);
+ movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
+ movp(prev_limit_reg, rax);
LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
LoadAddress(rax,
ExternalReference::delete_handle_scope_extensions(isolate()));
call(rax);
- movq(rax, prev_limit_reg);
+ movp(rax, prev_limit_reg);
jmp(&leave_exit_frame);
}
@@ -858,16 +837,16 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinEntry(rdx, id);
- InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
+ InvokeCode(rdx, expected, expected, flag, call_wrapper);
}
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- movq(target, FieldOperand(target,
+ movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+ movp(target, FieldOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
@@ -876,7 +855,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
ASSERT(!target.is(rdi));
// Load the JavaScript builtin function from the builtins object.
GetBuiltinFunction(rdi, id);
- movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
@@ -950,24 +929,32 @@ void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
ASSERT(!r.IsDouble());
- if (r.IsByte()) {
+ if (r.IsInteger8()) {
+ movsxbq(dst, src);
+ } else if (r.IsUInteger8()) {
movzxbl(dst, src);
+ } else if (r.IsInteger16()) {
+ movsxwq(dst, src);
+ } else if (r.IsUInteger16()) {
+ movzxwl(dst, src);
} else if (r.IsInteger32()) {
movl(dst, src);
} else {
- movq(dst, src);
+ movp(dst, src);
}
}
void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
ASSERT(!r.IsDouble());
- if (r.IsByte()) {
+ if (r.IsInteger8() || r.IsUInteger8()) {
movb(dst, src);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ movw(dst, src);
} else if (r.IsInteger32()) {
movl(dst, src);
} else {
- movq(dst, src);
+ movp(dst, src);
}
}
@@ -980,17 +967,22 @@ void MacroAssembler::Set(Register dst, int64_t x) {
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
- movq(dst, x, RelocInfo::NONE64);
+ movq(dst, x);
}
}
-void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
+void MacroAssembler::Set(const Operand& dst, intptr_t x) {
+ if (kPointerSize == kInt64Size) {
+ if (is_int32(x)) {
+ movp(dst, Immediate(static_cast<int32_t>(x)));
+ } else {
+ Set(kScratchRegister, x);
+ movp(dst, kScratchRegister);
+ }
} else {
- Set(kScratchRegister, x);
- movq(dst, kScratchRegister);
+ ASSERT(kPointerSize == kInt32Size);
+ movp(dst, Immediate(static_cast<int32_t>(x)));
}
}
@@ -1045,18 +1037,10 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (emit_debug_code()) {
- movq(dst,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE64);
+ Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
+ Assembler::RelocInfoNone());
cmpq(dst, kSmiConstantRegister);
- if (allow_stub_calls()) {
- Assert(equal, kUninitializedKSmiConstantRegister);
- } else {
- Label ok;
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
+ Assert(equal, kUninitializedKSmiConstantRegister);
}
int value = source->value();
if (value == 0) {
@@ -1088,13 +1072,13 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
break;
case 1:
- movq(dst, kSmiConstantRegister);
+ movp(dst, kSmiConstantRegister);
break;
case 0:
UNREACHABLE();
return;
default:
- movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64);
+ Move(dst, source, Assembler::RelocInfoNone());
return;
}
if (negative) {
@@ -1117,11 +1101,7 @@ void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
testb(dst, Immediate(0x01));
Label ok;
j(zero, &ok, Label::kNear);
- if (allow_stub_calls()) {
- Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
- } else {
- int3();
- }
+ Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
bind(&ok);
}
ASSERT(kSmiShift % kBitsPerByte == 0);
@@ -1144,7 +1124,7 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
void MacroAssembler::SmiToInteger32(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
shr(dst, Immediate(kSmiShift));
}
@@ -1158,7 +1138,7 @@ void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
sar(dst, Immediate(kSmiShift));
}
@@ -1242,7 +1222,7 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
return;
}
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
if (power < kSmiShift) {
sar(dst, Immediate(kSmiShift - power));
@@ -1270,12 +1250,12 @@ void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
if (dst.is(src1) || dst.is(src2)) {
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
or_(kScratchRegister, src2);
JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
- movq(dst, kScratchRegister);
+ movp(dst, kScratchRegister);
} else {
- movq(dst, src1);
+ movp(dst, src1);
or_(dst, src2);
JumpIfNotSmi(dst, on_not_smis, near_jump);
}
@@ -1299,7 +1279,7 @@ Condition MacroAssembler::CheckSmi(const Operand& src) {
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
STATIC_ASSERT(kSmiTag == 0);
// Test that both bits of the mask 0x8000000000000001 are zero.
- movq(kScratchRegister, src);
+ movp(kScratchRegister, src);
rol(kScratchRegister, Immediate(1));
testb(kScratchRegister, Immediate(3));
return zero;
@@ -1322,7 +1302,7 @@ Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
if (first.is(second)) {
return CheckNonNegativeSmi(first);
}
- movq(kScratchRegister, first);
+ movp(kScratchRegister, first);
or_(kScratchRegister, second);
rol(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3));
@@ -1462,7 +1442,7 @@ void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
return;
} else if (dst.is(src)) {
@@ -1518,27 +1498,41 @@ void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
void MacroAssembler::SmiAddConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
+ SmiOperationExecutionMode mode,
+ Label* bailout_label,
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
-
- Label done;
LoadSmiConstant(kScratchRegister, constant);
addq(dst, kScratchRegister);
- j(no_overflow, &done, Label::kNear);
- // Restore src.
- subq(dst, kScratchRegister);
- jmp(on_not_smi_result, near_jump);
- bind(&done);
+ if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
+ j(no_overflow, bailout_label, near_jump);
+ ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ subq(dst, kScratchRegister);
+ } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
+ if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
+ Label done;
+ j(no_overflow, &done, Label::kNear);
+ subq(dst, kScratchRegister);
+ jmp(bailout_label, near_jump);
+ bind(&done);
+ } else {
+ // Bailout if overflow without reserving src.
+ j(overflow, bailout_label, near_jump);
+ }
+ } else {
+ CHECK(mode.IsEmpty());
+ }
} else {
+ ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
LoadSmiConstant(dst, constant);
addq(dst, src);
- j(overflow, on_not_smi_result, near_jump);
+ j(overflow, bailout_label, near_jump);
}
}
@@ -1546,7 +1540,7 @@ void MacroAssembler::SmiAddConstant(Register dst,
void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
@@ -1570,43 +1564,49 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiSubConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
+ SmiOperationExecutionMode mode,
+ Label* bailout_label,
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result, near_jump);
- LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ LoadSmiConstant(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
+ j(no_overflow, bailout_label, near_jump);
+ ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ addq(dst, kScratchRegister);
+ } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
+ if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
+ Label done;
+ j(no_overflow, &done, Label::kNear);
+ addq(dst, kScratchRegister);
+ jmp(bailout_label, near_jump);
+ bind(&done);
+ } else {
+ // Bailout if overflow without reserving src.
+ j(overflow, bailout_label, near_jump);
+ }
} else {
- // Subtract by adding the negation.
- LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
- addq(kScratchRegister, dst);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
+ CHECK(mode.IsEmpty());
}
} else {
+ ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result, near_jump);
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
+ ASSERT(!dst.is(kScratchRegister));
+ movp(dst, src);
+ LoadSmiConstant(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ j(overflow, bailout_label, near_jump);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
addq(dst, src);
- j(overflow, on_not_smi_result, near_jump);
+ j(overflow, bailout_label, near_jump);
}
}
}
@@ -1618,14 +1618,14 @@ void MacroAssembler::SmiNeg(Register dst,
Label::Distance near_jump) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- movq(kScratchRegister, src);
+ movp(kScratchRegister, src);
neg(dst); // Low 32 bits are retained as zero by negation.
// Test if result is zero or Smi::kMinValue.
cmpq(dst, kScratchRegister);
j(not_equal, on_smi_result, near_jump);
- movq(src, kScratchRegister);
+ movp(src, kScratchRegister);
} else {
- movq(dst, src);
+ movp(dst, src);
neg(dst);
cmpq(dst, src);
// If the result is zero or Smi::kMinValue, negation failed to create a smi.
@@ -1650,7 +1650,7 @@ static void SmiAddHelper(MacroAssembler* masm,
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
- masm->movq(dst, src1);
+ masm->movp(dst, src1);
masm->addq(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
@@ -1686,7 +1686,7 @@ void MacroAssembler::SmiAdd(Register dst,
// overflowing is impossible.
if (!dst.is(src1)) {
if (emit_debug_code()) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
addq(kScratchRegister, src2);
Check(no_overflow, kSmiAdditionOverflow);
}
@@ -1714,7 +1714,7 @@ static void SmiSubHelper(MacroAssembler* masm,
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
- masm->movq(dst, src1);
+ masm->movp(dst, src1);
masm->subq(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
@@ -1751,7 +1751,7 @@ static void SmiSubNoOverflowHelper(MacroAssembler* masm,
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
if (!dst.is(src1)) {
- masm->movq(dst, src1);
+ masm->movp(dst, src1);
}
masm->subq(dst, src2);
masm->Assert(no_overflow, kSmiSubtractionOverflow);
@@ -1783,7 +1783,7 @@ void MacroAssembler::SmiMul(Register dst,
if (dst.is(src1)) {
Label failure, zero_correct_result;
- movq(kScratchRegister, src1); // Create backup for later testing.
+ movp(kScratchRegister, src1); // Create backup for later testing.
SmiToInteger64(dst, src1);
imul(dst, src2);
j(overflow, &failure, Label::kNear);
@@ -1794,13 +1794,13 @@ void MacroAssembler::SmiMul(Register dst,
testq(dst, dst);
j(not_zero, &correct_result, Label::kNear);
- movq(dst, kScratchRegister);
+ movp(dst, kScratchRegister);
xor_(dst, src2);
// Result was positive zero.
j(positive, &zero_correct_result, Label::kNear);
bind(&failure); // Reused failure exit, restores src1.
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
bind(&zero_correct_result);
@@ -1818,7 +1818,7 @@ void MacroAssembler::SmiMul(Register dst,
j(not_zero, &correct_result, Label::kNear);
// One of src1 and src2 is zero, the check whether the other is
// negative.
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
xor_(kScratchRegister, src2);
j(negative, on_not_smi_result, near_jump);
bind(&correct_result);
@@ -1843,7 +1843,7 @@ void MacroAssembler::SmiDiv(Register dst,
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
}
SmiToInteger32(rax, src1);
// We need to rule out dividing Smi::kMinValue by -1, since that would
@@ -1859,7 +1859,7 @@ void MacroAssembler::SmiDiv(Register dst,
testq(src2, src2);
if (src1.is(rax)) {
j(positive, &safe_div, Label::kNear);
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
} else {
j(negative, on_not_smi_result, near_jump);
@@ -1876,14 +1876,14 @@ void MacroAssembler::SmiDiv(Register dst,
if (src1.is(rax)) {
Label smi_result;
j(zero, &smi_result, Label::kNear);
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
bind(&smi_result);
} else {
j(not_zero, on_not_smi_result, near_jump);
}
if (!dst.is(src1) && src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
Integer32ToSmi(dst, rax);
}
@@ -1906,7 +1906,7 @@ void MacroAssembler::SmiMod(Register dst,
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
}
SmiToInteger32(rax, src1);
SmiToInteger32(src2, src2);
@@ -1920,7 +1920,7 @@ void MacroAssembler::SmiMod(Register dst,
// Retag inputs and go slow case.
Integer32ToSmi(src2, src2);
if (src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
jmp(on_not_smi_result, near_jump);
bind(&safe_div);
@@ -1931,7 +1931,7 @@ void MacroAssembler::SmiMod(Register dst,
// Restore smi tags on inputs.
Integer32ToSmi(src2, src2);
if (src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
// Check for a negative zero result. If the result is zero, and the
// dividend is negative, go slow to return a floating point negative zero.
@@ -1962,7 +1962,7 @@ void MacroAssembler::SmiNot(Register dst, Register src) {
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
ASSERT(!dst.is(src2));
if (!dst.is(src1)) {
- movq(dst, src1);
+ movp(dst, src1);
}
and_(dst, src2);
}
@@ -1985,7 +1985,7 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
ASSERT(!src1.is(src2));
- movq(dst, src1);
+ movp(dst, src1);
}
or_(dst, src2);
}
@@ -2006,7 +2006,7 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
ASSERT(!src1.is(src2));
- movq(dst, src1);
+ movp(dst, src1);
}
xor_(dst, src2);
}
@@ -2043,7 +2043,7 @@ void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
int shift_value) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
if (shift_value > 0) {
shl(dst, Immediate(shift_value));
@@ -2058,7 +2058,7 @@ void MacroAssembler::SmiShiftLogicalRightConstant(
if (dst.is(src)) {
UNIMPLEMENTED(); // Not used.
} else {
- movq(dst, src);
+ movp(dst, src);
if (shift_value == 0) {
testq(dst, dst);
j(negative, on_not_smi_result, near_jump);
@@ -2131,21 +2131,21 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
ASSERT(!src2.is(kScratchRegister));
ASSERT(!dst.is(rcx));
if (src1.is(rcx)) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
} else if (src2.is(rcx)) {
- movq(kScratchRegister, src2);
+ movp(kScratchRegister, src2);
}
if (!dst.is(src1)) {
- movq(dst, src1);
+ movp(dst, src1);
}
SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift));
sar_cl(dst); // Shift 32 + original rcx & 0x1f.
shl(dst, Immediate(kSmiShift));
if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
} else if (src2.is(rcx)) {
- movq(src2, kScratchRegister);
+ movp(src2, kScratchRegister);
}
}
@@ -2162,10 +2162,8 @@ void MacroAssembler::SelectNonSmi(Register dst,
ASSERT(!dst.is(src2));
// Both operands must not be smis.
#ifdef DEBUG
- if (allow_stub_calls()) { // Check contains a stub call.
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
- }
+ Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+ Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
#endif
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
@@ -2180,7 +2178,7 @@ void MacroAssembler::SelectNonSmi(Register dst,
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
subq(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movq(dst, src1);
+ movp(dst, src1);
xor_(dst, src2);
and_(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
@@ -2242,7 +2240,7 @@ void MacroAssembler::Push(Smi* source) {
void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
- movq(scratch, src);
+ movp(scratch, src);
// High bits.
shr(src, Immediate(64 - kSmiShift));
shl(src, Immediate(kSmiShift));
@@ -2317,7 +2315,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
Register index = scratch;
Register probe = mask;
- movq(probe,
+ movp(probe,
FieldOperand(number_string_cache,
index,
times_1,
@@ -2348,7 +2346,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
// Get the result from the cache.
bind(&load_result_from_cache);
- movq(result,
+ movp(result,
FieldOperand(number_string_cache,
index,
times_1,
@@ -2380,8 +2378,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
j(either_smi, on_fail, near_jump);
// Load instance type for both strings.
- movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
@@ -2429,8 +2427,8 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Label* on_fail,
Label::Distance near_jump) {
// Load instance type for both strings.
- movq(scratch1, first_object_instance_type);
- movq(scratch2, second_object_instance_type);
+ movp(scratch1, first_object_instance_type);
+ movp(scratch2, second_object_instance_type);
// Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
@@ -2483,7 +2481,7 @@ void MacroAssembler::JumpIfNotUniqueName(Register reg,
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
}
@@ -2504,7 +2502,7 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
Move(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- movq(dst, kScratchRegister);
+ movp(dst, kScratchRegister);
}
}
@@ -2548,10 +2546,10 @@ void MacroAssembler::MoveHeapObject(Register result,
ASSERT(object->IsHeapObject());
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
- movq(result, cell, RelocInfo::CELL);
- movq(result, Operand(result, 0));
+ Move(result, cell, RelocInfo::CELL);
+ movp(result, Operand(result, 0));
} else {
- movq(result, object, RelocInfo::EMBEDDED_OBJECT);
+ Move(result, object, RelocInfo::EMBEDDED_OBJECT);
}
}
@@ -2561,8 +2559,8 @@ void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
AllowDeferredHandleDereference embedding_raw_address;
load_rax(cell.location(), RelocInfo::CELL);
} else {
- movq(dst, cell, RelocInfo::CELL);
- movq(dst, Operand(dst, 0));
+ Move(dst, cell, RelocInfo::CELL);
+ movp(dst, Operand(dst, 0));
}
}
@@ -2587,8 +2585,19 @@ void MacroAssembler::Jump(ExternalReference ext) {
}
+void MacroAssembler::Jump(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ jmp(op);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ movp(kScratchRegister, op);
+ jmp(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
- movq(kScratchRegister, destination, rmode);
+ Move(kScratchRegister, destination, rmode);
jmp(kScratchRegister);
}
@@ -2618,11 +2627,22 @@ void MacroAssembler::Call(ExternalReference ext) {
}
+void MacroAssembler::Call(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ call(op);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ movp(kScratchRegister, op);
+ call(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#ifdef DEBUG
- int end_position = pc_offset() + CallSize(destination, rmode);
+ int end_position = pc_offset() + CallSize(destination);
#endif
- movq(kScratchRegister, destination, rmode);
+ Move(kScratchRegister, destination, rmode);
call(kScratchRegister);
#ifdef DEBUG
CHECK_EQ(pc_offset(), end_position);
@@ -2718,17 +2738,17 @@ MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
const Immediate& imm) {
- movq(SafepointRegisterSlot(dst), imm);
+ movp(SafepointRegisterSlot(dst), imm);
}
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
- movq(SafepointRegisterSlot(dst), src);
+ movp(SafepointRegisterSlot(dst), src);
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- movq(dst, SafepointRegisterSlot(src));
+ movp(dst, SafepointRegisterSlot(src));
}
@@ -2772,7 +2792,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
push(ExternalOperand(handler_address));
// Set this new handler as the current one.
- movq(ExternalOperand(handler_address), rsp);
+ movp(ExternalOperand(handler_address), rsp);
}
@@ -2788,9 +2808,9 @@ void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// rax = exception, rdi = code object, rdx = state.
- movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
+ movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
shr(rdx, Immediate(StackHandler::kKindWidth));
- movq(rdx,
+ movp(rdx,
FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
@@ -2810,11 +2830,11 @@ void MacroAssembler::Throw(Register value) {
// The exception is expected in rax.
if (!value.is(rax)) {
- movq(rax, value);
+ movp(rax, value);
}
// Drop the stack pointer to the top of the top handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- movq(rsp, ExternalOperand(handler_address));
+ movp(rsp, ExternalOperand(handler_address));
// Restore the next handler.
pop(ExternalOperand(handler_address));
@@ -2832,7 +2852,7 @@ void MacroAssembler::Throw(Register value) {
Label skip;
testq(rsi, rsi);
j(zero, &skip, Label::kNear);
- movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
JumpToHandlerEntry();
@@ -2851,7 +2871,7 @@ void MacroAssembler::ThrowUncatchable(Register value) {
// The exception is expected in rax.
if (!value.is(rax)) {
- movq(rax, value);
+ movp(rax, value);
}
// Drop the stack pointer to the top of the top stack handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
@@ -2861,7 +2881,7 @@ void MacroAssembler::ThrowUncatchable(Register value) {
Label fetch_next, check_kind;
jmp(&check_kind, Label::kNear);
bind(&fetch_next);
- movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
+ movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
@@ -2910,7 +2930,7 @@ void MacroAssembler::FCmp() {
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
CmpInstanceType(map, type);
}
@@ -3017,9 +3037,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
}
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success) {
+void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}
@@ -3032,10 +3050,8 @@ void MacroAssembler::CheckMap(Register obj,
JumpIfSmi(obj, fail);
}
- Label success;
- CompareMap(obj, map, &success);
+ CompareMap(obj, map);
j(not_equal, fail);
- bind(&success);
}
@@ -3120,9 +3136,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
cvttsd2siq(result_reg, input_reg);
- movq(kScratchRegister,
- V8_INT64_C(0x8000000000000000),
- RelocInfo::NONE64);
+ movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
cmpq(result_reg, kScratchRegister);
j(not_equal, &done, Label::kNear);
@@ -3196,21 +3210,54 @@ void MacroAssembler::TaggedToI(Register result_reg,
}
+void MacroAssembler::Throw(BailoutReason reason) {
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ push(rax);
+ Push(Smi::FromInt(reason));
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // Control will not return here.
+ int3();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label L;
+ j(NegateCondition(cc), &L);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
+ movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
}
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- movq(dst, FieldOperand(map, Map::kBitField3Offset));
+ movp(dst, FieldOperand(map, Map::kBitField3Offset));
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- movq(dst, FieldOperand(map, Map::kBitField3Offset));
+ movp(dst, FieldOperand(map, Map::kBitField3Offset));
Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
and_(dst, kScratchRegister);
}
@@ -3272,7 +3319,7 @@ void MacroAssembler::AssertSmi(const Operand& object) {
void MacroAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
ASSERT(!int32_register.is(kScratchRegister));
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
+ movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
cmpq(kScratchRegister, int32_register);
Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
}
@@ -3284,7 +3331,7 @@ void MacroAssembler::AssertString(Register object) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAString);
push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
pop(object);
Check(below, kOperandIsNotAString);
@@ -3297,7 +3344,7 @@ void MacroAssembler::AssertName(Register object) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAName);
push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, LAST_NAME_TYPE);
pop(object);
Check(below_equal, kOperandIsNotAName);
@@ -3321,7 +3368,7 @@ void MacroAssembler::AssertRootValue(Register src,
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kNotStringTag != 0);
testb(instance_type, Immediate(kIsNotStringMask));
@@ -3332,7 +3379,7 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object,
Condition MacroAssembler::IsObjectNameType(Register heap_object,
Register map,
Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
return below_equal;
@@ -3352,7 +3399,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_equal, miss);
if (miss_on_bound_function) {
- movq(kScratchRegister,
+ movp(kScratchRegister,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
// It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
// field).
@@ -3369,7 +3416,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
- movq(result,
+ movp(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// If the prototype or initial map is the hole, don't return it and
@@ -3384,13 +3431,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
- movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ movp(result, FieldOperand(result, Map::kPrototypeOffset));
jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
- movq(result, FieldOperand(result, Map::kConstructorOffset));
+ movp(result, FieldOperand(result, Map::kConstructorOffset));
// All done.
bind(&done);
@@ -3442,26 +3489,11 @@ void MacroAssembler::DebugBreak() {
#endif // ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be rcx to
- // follow the calling convention which requires the call type to be
- // in rcx.
- ASSERT(dst.is(rcx));
- if (call_kind == CALL_AS_FUNCTION) {
- LoadSmiConstant(dst, Smi::FromInt(1));
- } else {
- LoadSmiConstant(dst, Smi::FromInt(0));
- }
-}
-
-
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -3475,17 +3507,14 @@ void MacroAssembler::InvokeCode(Register code,
&definitely_mismatches,
flag,
Label::kNear,
- call_wrapper,
- call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
call(code);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
jmp(code);
}
bind(&done);
@@ -3493,64 +3522,42 @@ void MacroAssembler::InvokeCode(Register code,
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
- Label done;
- bool definitely_mismatches = false;
- Register dummy = rax;
- InvokePrologue(expected,
- actual,
- code,
- dummy,
- &done,
- &definitely_mismatches,
- flag,
- Label::kNear,
- call_wrapper,
- call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
- Call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
- Jump(code, rmode);
- }
- bind(&done);
- }
+ ASSERT(function.is(rdi));
+ movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ movsxlq(rbx,
+ FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ // Advances rdx to the end of the Code object header, to the start of
+ // the executable code.
+ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+
+ ParameterCount expected(rbx);
+ InvokeCode(rdx, expected, actual, flag, call_wrapper);
}
void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
ASSERT(function.is(rdi));
- movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
- movsxlq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(rdx, expected, actual, flag, call_wrapper);
}
@@ -3558,20 +3565,9 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Get the function and setup the context.
+ const CallWrapper& call_wrapper) {
Move(rdi, function);
- movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
+ InvokeFunction(rdi, expected, actual, flag, call_wrapper);
}
@@ -3583,8 +3579,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance near_jump,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label invoke;
@@ -3628,22 +3623,20 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (!definitely_matches) {
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
- movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+ Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_register.is(rdx)) {
- movq(rdx, code_register);
+ movp(rdx, code_register);
}
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(rcx, call_kind);
Call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
jmp(done, near_jump);
}
} else {
- SetCallKind(rcx, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&invoke);
@@ -3654,7 +3647,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
if (frame_mode == BUILD_STUB_FRAME) {
push(rbp); // Caller's frame pointer.
- movq(rbp, rsp);
+ movp(rbp, rsp);
push(rsi); // Callee's context.
Push(Smi::FromInt(StackFrame::STUB));
} else {
@@ -3667,7 +3660,7 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
} else {
push(rbp); // Caller's frame pointer.
- movq(rbp, rsp);
+ movp(rbp, rsp);
push(rsi); // Callee's context.
push(rdi); // Callee's JS function.
}
@@ -3677,13 +3670,13 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(rbp);
- movq(rbp, rsp);
+ movp(rbp, rsp);
push(rsi); // Context.
Push(Smi::FromInt(type));
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
push(kScratchRegister);
if (emit_debug_code()) {
- movq(kScratchRegister,
+ Move(kScratchRegister,
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
cmpq(Operand(rsp, 0), kScratchRegister);
@@ -3698,7 +3691,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, kStackFrameTypesMustMatch);
}
- movq(rsp, rbp);
+ movp(rsp, rbp);
pop(rbp);
}
@@ -3711,17 +3704,17 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(rbp);
- movq(rbp, rsp);
+ movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // Saved entry sp, patched before call.
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
push(kScratchRegister); // Accessed from EditFrame::code_slot.
// Save the frame pointer and the context in top.
if (save_rax) {
- movq(r14, rax); // Backup rax in callee-save register.
+ movp(r14, rax); // Backup rax in callee-save register.
}
Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
@@ -3738,7 +3731,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
// Optionally save all XMM registers.
if (save_doubles) {
int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
- arg_stack_space * kPointerSize;
+ arg_stack_space * kRegisterSize;
subq(rsp, Immediate(space));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
@@ -3746,7 +3739,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kPointerSize));
+ subq(rsp, Immediate(arg_stack_space * kRegisterSize));
}
// Get the required frame alignment for the OS.
@@ -3758,7 +3751,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
}
// Patch the saved entry sp.
- movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
+ movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
}
@@ -3791,8 +3784,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
}
}
// Get the return address from the stack and restore the frame pointer.
- movq(rcx, Operand(rbp, 1 * kPointerSize));
- movq(rbp, Operand(rbp, 0 * kPointerSize));
+ movp(rcx, Operand(rbp, kFPOnStackSize));
+ movp(rbp, Operand(rbp, 0 * kPointerSize));
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
@@ -3805,7 +3798,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
- movq(rsp, rbp);
+ movp(rsp, rbp);
pop(rbp);
LeaveExitFrameEpilogue(restore_context);
@@ -3817,17 +3810,17 @@ void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
ExternalReference context_address(Isolate::kContextAddress, isolate());
Operand context_operand = ExternalOperand(context_address);
if (restore_context) {
- movq(rsi, context_operand);
+ movp(rsi, context_operand);
}
#ifdef DEBUG
- movq(context_operand, Immediate(0));
+ movp(context_operand, Immediate(0));
#endif
// Clear the top frame.
ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
isolate());
Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
- movq(c_entry_fp_operand, Immediate(0));
+ movp(c_entry_fp_operand, Immediate(0));
}
@@ -3839,7 +3832,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
ASSERT(!holder_reg.is(scratch));
ASSERT(!scratch.is(kScratchRegister));
// Load current lexical context from the stack frame.
- movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
+ movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
@@ -3849,8 +3842,8 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Load the native context of the current context.
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, offset));
- movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ movp(scratch, FieldOperand(scratch, offset));
+ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -3872,23 +3865,23 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
if (emit_debug_code()) {
// Preserve original value of holder_reg.
push(holder_reg);
- movq(holder_reg,
+ movp(holder_reg,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
// Read the first word and compare to native_context_map(),
- movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
+ movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
pop(holder_reg);
}
- movq(kScratchRegister,
+ movp(kScratchRegister,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, token_offset));
+ movp(scratch, FieldOperand(scratch, token_offset));
cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
@@ -3896,6 +3889,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
// First of all we assign the hash seed to scratch.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -3970,10 +3966,9 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
decl(r1);
// Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
- movq(r2, r0);
+ movp(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
@@ -3989,7 +3984,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
+ if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
@@ -4008,7 +4003,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Get the value at the masked, scaled index.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+ movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@@ -4035,7 +4030,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
// and keep address in scratch until call to UpdateAllocationTopHelper.
if (scratch.is_valid()) {
LoadAddress(scratch, allocation_top);
- movq(result, Operand(scratch, 0));
+ movp(result, Operand(scratch, 0));
} else {
Load(result, allocation_top);
}
@@ -4056,7 +4051,7 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
// Update new top.
if (scratch.is_valid()) {
// Scratch already contains address of allocation top.
- movq(Operand(scratch, 0), result_end);
+ movp(Operand(scratch, 0), result_end);
} else {
Store(allocation_top, result_end);
}
@@ -4070,7 +4065,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4090,10 +4085,6 @@ void MacroAssembler::Allocate(int object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- if (isolate()->heap_profiler()->is_tracking_allocations()) {
- RecordObjectAllocation(isolate(), result, object_size);
- }
-
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
@@ -4108,7 +4099,7 @@ void MacroAssembler::Allocate(int object_size,
Register top_reg = result_end.is_valid() ? result_end : result;
if (!top_reg.is(result)) {
- movq(top_reg, result);
+ movp(top_reg, result);
}
addq(top_reg, Immediate(object_size));
j(carry, gc_required);
@@ -4173,10 +4164,6 @@ void MacroAssembler::Allocate(Register object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- if (isolate()->heap_profiler()->is_tracking_allocations()) {
- RecordObjectAllocation(isolate(), result, object_size);
- }
-
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
@@ -4188,7 +4175,7 @@ void MacroAssembler::Allocate(Register object_size,
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
if (!object_size.is(result_end)) {
- movq(result_end, object_size);
+ movp(result_end, object_size);
}
addq(result_end, result);
j(carry, gc_required);
@@ -4217,7 +4204,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
cmpq(object, top_operand);
Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
- movq(top_operand, object);
+ movp(top_operand, object);
}
@@ -4229,7 +4216,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
// Set the map.
LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4264,10 +4251,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
+ movp(FieldOperand(result, String::kLengthOffset), scratch1);
+ movp(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -4302,10 +4289,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
+ movp(FieldOperand(result, String::kLengthOffset), scratch1);
+ movp(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -4320,7 +4307,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4358,7 +4345,7 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4372,7 +4359,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4386,7 +4373,7 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4407,50 +4394,72 @@ void MacroAssembler::CopyBytes(Register destination,
cmpl(length, Immediate(min_length));
Assert(greater_equal, kInvalidMinLength);
}
- Label loop, done, short_string, short_loop;
+ Label short_loop, len8, len16, len24, done, short_string;
- const int kLongStringLimit = 20;
+ const int kLongStringLimit = 4 * kPointerSize;
if (min_length <= kLongStringLimit) {
- cmpl(length, Immediate(kLongStringLimit));
- j(less_equal, &short_string);
+ cmpl(length, Immediate(kPointerSize));
+ j(below, &short_string, Label::kNear);
}
ASSERT(source.is(rsi));
ASSERT(destination.is(rdi));
ASSERT(length.is(rcx));
+ if (min_length <= kLongStringLimit) {
+ cmpl(length, Immediate(2 * kPointerSize));
+ j(below_equal, &len8, Label::kNear);
+ cmpl(length, Immediate(3 * kPointerSize));
+ j(below_equal, &len16, Label::kNear);
+ cmpl(length, Immediate(4 * kPointerSize));
+ j(below_equal, &len24, Label::kNear);
+ }
+
// Because source is 8-byte aligned in our uses of this function,
// we keep source aligned for the rep movs operation by copying the odd bytes
// at the end of the ranges.
- movq(scratch, length);
+ movp(scratch, length);
shrl(length, Immediate(kPointerSizeLog2));
repmovsq();
// Move remaining bytes of length.
andl(scratch, Immediate(kPointerSize - 1));
- movq(length, Operand(source, scratch, times_1, -kPointerSize));
- movq(Operand(destination, scratch, times_1, -kPointerSize), length);
+ movp(length, Operand(source, scratch, times_1, -kPointerSize));
+ movp(Operand(destination, scratch, times_1, -kPointerSize), length);
addq(destination, scratch);
if (min_length <= kLongStringLimit) {
- jmp(&done);
+ jmp(&done, Label::kNear);
+ bind(&len24);
+ movp(scratch, Operand(source, 2 * kPointerSize));
+ movp(Operand(destination, 2 * kPointerSize), scratch);
+ bind(&len16);
+ movp(scratch, Operand(source, kPointerSize));
+ movp(Operand(destination, kPointerSize), scratch);
+ bind(&len8);
+ movp(scratch, Operand(source, 0));
+ movp(Operand(destination, 0), scratch);
+ // Move remaining bytes of length.
+ movp(scratch, Operand(source, length, times_1, -kPointerSize));
+ movp(Operand(destination, length, times_1, -kPointerSize), scratch);
+ addq(destination, length);
+ jmp(&done, Label::kNear);
bind(&short_string);
if (min_length == 0) {
testl(length, length);
- j(zero, &done);
+ j(zero, &done, Label::kNear);
}
- lea(scratch, Operand(destination, length, times_1, 0));
bind(&short_loop);
- movb(length, Operand(source, 0));
- movb(Operand(destination, 0), length);
+ movb(scratch, Operand(source, 0));
+ movb(Operand(destination, 0), scratch);
incq(source);
incq(destination);
- cmpq(destination, scratch);
- j(not_equal, &short_loop);
-
- bind(&done);
+ decl(length);
+ j(not_zero, &short_loop);
}
+
+ bind(&done);
}
@@ -4460,7 +4469,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
Label loop, entry;
jmp(&entry);
bind(&loop);
- movq(Operand(start_offset, 0), filler);
+ movp(Operand(start_offset, 0), filler);
addq(start_offset, Immediate(kPointerSize));
bind(&entry);
cmpq(start_offset, end_offset);
@@ -4471,15 +4480,15 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
// destination register in case we store into it (the write barrier
// cannot be allowed to destroy the context in rsi).
- movq(dst, rsi);
+ movp(dst, rsi);
}
// We should not have found a with context by walking the context
@@ -4501,12 +4510,12 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- movq(scratch,
+ movp(scratch,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
- movq(scratch, Operand(scratch,
+ movp(scratch, Operand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
int offset = expected_kind * kPointerSize +
@@ -4517,7 +4526,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
// Use the transitioned cached map.
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
- movq(map_in_out, FieldOperand(scratch, offset));
+ movp(map_in_out, FieldOperand(scratch, offset));
}
@@ -4526,7 +4535,7 @@ void MacroAssembler::LoadInitialArrayMap(
Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out));
Label done;
- movq(map_out, FieldOperand(function_in,
+ movp(map_out, FieldOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
@@ -4553,20 +4562,20 @@ static const int kRegisterPassedArguments = 6;
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- movq(function,
+ movp(function,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- movq(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
- movq(function, Operand(function, Context::SlotOffset(index)));
+ movp(function, Operand(function, Context::SlotOffset(index)));
}
void MacroAssembler::LoadArrayFunction(Register function) {
- movq(function,
+ movp(function,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- movq(function,
+ movp(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
+ movp(function,
Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
}
@@ -4574,7 +4583,7 @@ void MacroAssembler::LoadArrayFunction(Register function) {
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
- movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
if (emit_debug_code()) {
Label ok, fail;
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
@@ -4605,19 +4614,52 @@ int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ JumpIfNotSmi(string, &is_object);
+ Abort(kNonObject);
+ bind(&is_object);
+
+ push(value);
+ movp(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ cmpq(value, Immediate(encoding_mask));
+ pop(value);
+ Check(equal, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ Integer32ToSmi(index, index);
+ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
+ Check(less, kIndexIsTooLarge);
+
+ SmiCompare(index, Smi::FromInt(0));
+ Check(greater_equal, kIndexIsNegative);
+
+ // Restore the index
+ SmiToInteger32(index, index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_arguments) {
int frame_alignment = OS::ActivationFrameAlignment();
ASSERT(frame_alignment != 0);
ASSERT(num_arguments >= 0);
// Make stack end at alignment and allocate space for arguments and old rsp.
- movq(kScratchRegister, rsp);
+ movp(kScratchRegister, rsp);
ASSERT(IsPowerOf2(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
+ subq(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
and_(rsp, Immediate(-frame_alignment));
- movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
+ movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
}
@@ -4640,7 +4682,7 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) {
ASSERT(num_arguments >= 0);
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
+ movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
}
@@ -4687,7 +4729,7 @@ void MacroAssembler::CheckPageFlag(
if (scratch.is(object)) {
and_(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
- movq(scratch, Immediate(~Page::kPageAlignmentMask));
+ movp(scratch, Immediate(~Page::kPageAlignmentMask));
and_(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
@@ -4705,7 +4747,7 @@ void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
Move(scratch, map);
- movq(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ movp(scratch, FieldOperand(scratch, Map::kBitField3Offset));
SmiToInteger32(scratch, scratch);
and_(scratch, Immediate(Map::Deprecated::kMask));
j(not_zero, if_deprecated);
@@ -4724,7 +4766,7 @@ void MacroAssembler::JumpIfBlack(Register object,
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
// The mask_scratch register contains a 1 at the position of the first bit
// and a 0 at all other positions, including the position of the second bit.
- movq(rcx, mask_scratch);
+ movp(rcx, mask_scratch);
// Make rcx into a mask that covers both marking bits using the operation
// rcx = mask | (mask << 1).
lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
@@ -4744,7 +4786,7 @@ void MacroAssembler::JumpIfDataObject(
Label* not_data_object,
Label::Distance not_data_object_distance) {
Label is_data_object;
- movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
+ movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
j(equal, &is_data_object, Label::kNear);
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
@@ -4762,10 +4804,10 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
- movq(bitmap_reg, addr_reg);
+ movp(bitmap_reg, addr_reg);
// Sign extended 32 bit immediate.
and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- movq(rcx, addr_reg);
+ movp(rcx, addr_reg);
int shift =
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
shrl(rcx, Immediate(shift));
@@ -4774,7 +4816,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
~(Bitmap::kBytesPerCell - 1)));
addq(bitmap_reg, rcx);
- movq(rcx, addr_reg);
+ movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1));
@@ -4825,10 +4867,10 @@ void MacroAssembler::EnsureNotWhite(
Label is_data_object;
// Check for heap-number
- movq(map, FieldOperand(value, HeapObject::kMapOffset));
+ movp(map, FieldOperand(value, HeapObject::kMapOffset));
CompareRoot(map, Heap::kHeapNumberMapRootIndex);
j(not_equal, &not_heap_number, Label::kNear);
- movq(length, Immediate(HeapNumber::kSize));
+ movp(length, Immediate(HeapNumber::kSize));
jmp(&is_data_object, Label::kNear);
bind(&not_heap_number);
@@ -4851,7 +4893,7 @@ void MacroAssembler::EnsureNotWhite(
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
testb(instance_type, Immediate(kExternalStringTag));
j(zero, &not_external, Label::kNear);
- movq(length, Immediate(ExternalString::kSize));
+ movp(length, Immediate(ExternalString::kSize));
jmp(&is_data_object, Label::kNear);
bind(&not_external);
@@ -4882,21 +4924,21 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Label next, start;
Register empty_fixed_array_value = r8;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- movq(rcx, rax);
+ movp(rcx, rax);
// Check if the enum length field is properly initialized, indicating that
// there is an enum cache.
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
EnumLength(rdx, rbx);
- Cmp(rdx, Smi::FromInt(Map::kInvalidEnumCache));
+ Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
j(equal, call_runtime);
jmp(&start);
bind(&next);
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
EnumLength(rdx, rbx);
@@ -4907,11 +4949,18 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
+ Label no_elements;
cmpq(empty_fixed_array_value,
FieldOperand(rcx, JSObject::kElementsOffset));
+ j(equal, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
+ cmpq(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
j(not_equal, call_runtime);
- movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ bind(&no_elements);
+ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
cmpq(rcx, null_value);
j(not_equal, &next);
}
@@ -4927,7 +4976,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
lea(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- movq(kScratchRegister, new_space_start);
+ Move(kScratchRegister, new_space_start);
cmpq(scratch_reg, kScratchRegister);
j(less, no_memento_found);
cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
@@ -4937,35 +4986,29 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
}
-void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
- Register object,
- Register object_size) {
- FrameScope frame(this, StackFrame::EXIT);
- PushSafepointRegisters();
- PrepareCallCFunction(3);
- // In case object is rdx
- movq(kScratchRegister, object);
- movq(arg_reg_3, object_size);
- movq(arg_reg_2, kScratchRegister);
- movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
- CallCFunction(
- ExternalReference::record_object_allocation_function(isolate), 3);
- PopSafepointRegisters();
-}
-
-
-void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
- Register object,
- int object_size) {
- FrameScope frame(this, StackFrame::EXIT);
- PushSafepointRegisters();
- PrepareCallCFunction(3);
- movq(arg_reg_2, object);
- movq(arg_reg_3, Immediate(object_size));
- movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
- CallCFunction(
- ExternalReference::record_object_allocation_function(isolate), 3);
- PopSafepointRegisters();
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
+ ASSERT(!scratch1.is(scratch0));
+ Register current = scratch0;
+ Label loop_again;
+
+ movp(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ movp(current, FieldOperand(current, HeapObject::kMapOffset));
+ movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ and_(scratch1, Immediate(Map::kElementsKindMask));
+ shr(scratch1, Immediate(Map::kElementsKindShift));
+ cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ j(equal, found);
+ movp(current, FieldOperand(current, Map::kPrototypeOffset));
+ CompareRoot(current, Heap::kNullValueRootIndex);
+ j(not_equal, &loop_again);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 67ae7a2a69..092acc0278 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -53,6 +53,22 @@ typedef Operand MemOperand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum SmiOperationConstraint {
+ PRESERVE_SOURCE_REGISTER,
+ BAILOUT_ON_NO_OVERFLOW,
+ BAILOUT_ON_OVERFLOW,
+ NUMBER_OF_CONSTRAINTS
+};
+
+STATIC_ASSERT(NUMBER_OF_CONSTRAINTS <= 8);
+
+class SmiOperationExecutionMode : public EnumSet<SmiOperationConstraint, byte> {
+ public:
+ SmiOperationExecutionMode() : EnumSet<SmiOperationConstraint, byte>(0) { }
+ explicit SmiOperationExecutionMode(byte bits)
+ : EnumSet<SmiOperationConstraint, byte>(bits) { }
+};
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
// Forward declaration.
@@ -319,48 +335,38 @@ class MacroAssembler: public Assembler {
void InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
- movq(kRootRegister, roots_array_start);
+ Move(kRootRegister, roots_array_start);
addq(kRootRegister, Immediate(kRootRegisterBias));
}
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Set up call kind marking in rcx. The method takes rcx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
@@ -384,9 +390,8 @@ class MacroAssembler: public Assembler {
void SafePush(Smi* src);
void InitializeSmiConstantRegister() {
- movq(kSmiConstantRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE64);
+ Move(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
+ Assembler::RelocInfoNone());
}
// Conversions between tagged smi values and non-tagged integer values.
@@ -548,7 +553,8 @@ class MacroAssembler: public Assembler {
void SmiAddConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
+ SmiOperationExecutionMode mode,
+ Label* bailout_label,
Label::Distance near_jump = Label::kFar);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
@@ -561,7 +567,8 @@ class MacroAssembler: public Assembler {
void SmiSubConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
+ SmiOperationExecutionMode mode,
+ Label* bailout_label,
Label::Distance near_jump = Label::kFar);
// Negating a smi can give a negative zero or too large positive value.
@@ -714,7 +721,7 @@ class MacroAssembler: public Assembler {
void Move(const Operand& dst, Smi* source) {
Register constant = GetSmiConstant(source);
- movq(dst, constant);
+ movp(dst, constant);
}
void Push(Smi* smi);
@@ -775,6 +782,11 @@ class MacroAssembler: public Assembler {
Label* on_fail,
Label::Distance near_jump = Label::kFar);
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask);
+
// Checks if the given register or operand is a unique name
void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
Label::Distance distance = Label::kFar);
@@ -790,7 +802,7 @@ class MacroAssembler: public Assembler {
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
- void Set(const Operand& dst, int64_t x);
+ void Set(const Operand& dst, intptr_t x);
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
@@ -829,22 +841,42 @@ class MacroAssembler: public Assembler {
void Pop(Register dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
- void MoveDouble(Register dst, const Operand& src) { movq(dst, src); }
- void MoveDouble(const Operand& dst, Register src) { movq(dst, src); }
+ void Move(Register dst, ExternalReference ext) {
+ movp(dst, reinterpret_cast<Address>(ext.address()),
+ RelocInfo::EXTERNAL_REFERENCE);
+ }
+
+ // Loads a pointer into a register with a relocation mode.
+ void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
+ // This method must not be used with heap object references. The stored
+ // address is not GC safe. Use the handle version instead.
+ ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
+ movp(dst, ptr, rmode);
+ }
+
+ void Move(Register dst, Handle<Object> value, RelocInfo::Mode rmode) {
+ AllowDeferredHandleDereference using_raw_address;
+ ASSERT(!RelocInfo::IsNone(rmode));
+ ASSERT(value->IsHeapObject());
+ ASSERT(!isolate()->heap()->InNewSpace(*value));
+ movp(dst, value.location(), rmode);
+ }
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
+ void Jump(const Operand& op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
+ void Call(const Operand& op);
void Call(Handle<Code> code_object,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
// The size of the code generated for different call instructions.
- int CallSize(Address destination, RelocInfo::Mode rmode) {
+ int CallSize(Address destination) {
return kCallSequenceLength;
}
int CallSize(ExternalReference ext);
@@ -916,13 +948,8 @@ class MacroAssembler: public Assembler {
Label* fail,
int elements_offset = 0);
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
- // result of map compare. If multiple map compares are required, the compare
- // sequences branches to early_success.
- void CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success);
+ // Compare an object's map with the specified map.
+ void CompareMap(Register obj, Handle<Map> map);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -1042,6 +1069,12 @@ class MacroAssembler: public Assembler {
// Propagate an uncatchable exception out of the current JS stack.
void ThrowUncatchable(Register value);
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason);
+
// ---------------------------------------------------------------------------
// Inline caching support
@@ -1100,15 +1133,6 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- // Record a JS object allocation if allocations tracking mode is on.
- void RecordObjectAllocation(Isolate* isolate,
- Register object,
- Register object_size);
-
- void RecordObjectAllocation(Isolate* isolate,
- Register object,
- int object_size);
-
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
// object(s) no longer allocated as they would be invalid when allocation is
@@ -1284,7 +1308,7 @@ class MacroAssembler: public Assembler {
// from handle and propagates exceptions. Clobbers r14, r15, rbx and
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address,
+ void CallApiFunctionAndReturn(Register function_address,
Address thunk_address,
Register thunk_last_arg,
int stack_space,
@@ -1375,8 +1399,6 @@ class MacroAssembler: public Assembler {
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
@@ -1414,6 +1436,10 @@ class MacroAssembler: public Assembler {
bind(&no_memento_found);
}
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
private:
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
@@ -1422,7 +1448,6 @@ class MacroAssembler: public Assembler {
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
bool generating_stub_;
- bool allow_stub_calls_;
bool has_frame_;
bool root_array_available_;
@@ -1447,8 +1472,7 @@ class MacroAssembler: public Assembler {
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance near_jump = Label::kFar,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
+ const CallWrapper& call_wrapper = NullCallWrapper());
void EnterExitFramePrologue(bool save_rax);
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index ca834e2771..75e70c5975 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -315,7 +315,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ j(below, &loop);
// Compute new value of character position after the matched part.
- __ movq(rdi, r11);
+ __ movp(rdi, r11);
__ subq(rdi, rsi);
} else {
ASSERT(mode_ == UC16);
@@ -341,7 +341,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Set byte_offset2.
__ lea(rdx, Operand(rsi, rdi, times_1, 0));
// Set byte_length.
- __ movq(r8, rbx);
+ __ movp(r8, rbx);
// Isolate.
__ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#else // AMD64 calling convention
@@ -350,9 +350,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Compute and set byte_offset1 (start of capture).
__ lea(rdi, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
- __ movq(rsi, rax);
+ __ movp(rsi, rax);
// Set byte_length.
- __ movq(rdx, rbx);
+ __ movp(rdx, rbx);
// Isolate.
__ LoadAddress(rcx, ExternalReference::isolate_address(isolate()));
#endif
@@ -441,7 +441,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
// Success.
// Set current character position to position after match.
- __ movq(rdi, rbx);
+ __ movp(rdi, rbx);
__ subq(rdi, rsi);
__ bind(&fallthrough);
@@ -522,7 +522,7 @@ void RegExpMacroAssemblerX64::CheckBitInTable(
__ Move(rax, table);
Register index = current_character();
if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
- __ movq(rbx, current_character());
+ __ movp(rbx, current_character());
__ and_(rbx, Immediate(kTableMask));
index = rbx;
}
@@ -618,7 +618,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate('z'));
BranchOrBacktrack(above, on_no_match);
}
- __ movq(rbx, ExternalReference::re_word_character_map());
+ __ Move(rbx, ExternalReference::re_word_character_map());
ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
__ testb(Operand(rbx, current_character(), times_1, 0),
current_character());
@@ -632,7 +632,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate('z'));
__ j(above, &done);
}
- __ movq(rbx, ExternalReference::re_word_character_map());
+ __ Move(rbx, ExternalReference::re_word_character_map());
ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
__ testb(Operand(rbx, current_character(), times_1, 0),
current_character());
@@ -675,7 +675,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Actually emit code to start a new stack frame.
__ push(rbp);
- __ movq(rbp, rsp);
+ __ movp(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
#ifdef _WIN64
@@ -717,8 +717,8 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
- __ movq(rcx, rsp);
- __ movq(kScratchRegister, stack_limit);
+ __ movp(rcx, rsp);
+ __ Move(kScratchRegister, stack_limit);
__ subq(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
@@ -743,14 +743,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Allocate space on stack for registers.
__ subq(rsp, Immediate(num_registers_ * kPointerSize));
// Load string length.
- __ movq(rsi, Operand(rbp, kInputEnd));
+ __ movp(rsi, Operand(rbp, kInputEnd));
// Load input position.
- __ movq(rdi, Operand(rbp, kInputStart));
+ __ movp(rdi, Operand(rbp, kInputStart));
// Set up rdi to be negative offset from string end.
__ subq(rdi, rsi);
// Set rax to address of char before start of the string
// (effectively string position -1).
- __ movq(rbx, Operand(rbp, kStartIndex));
+ __ movp(rbx, Operand(rbp, kStartIndex));
__ neg(rbx);
if (mode_ == UC16) {
__ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
@@ -759,7 +759,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ movq(Operand(rbp, kInputStartMinusOne), rax);
+ __ movp(Operand(rbp, kInputStartMinusOne), rax);
#if V8_OS_WIN
// Ensure that we have written to each stack page, in order. Skipping a page
@@ -769,7 +769,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
for (int i = num_saved_registers_ + kRegistersPerPage - 1;
i < num_registers_;
i += kRegistersPerPage) {
- __ movq(register_location(i), rax); // One write every page.
+ __ movp(register_location(i), rax); // One write every page.
}
#endif // V8_OS_WIN
@@ -798,20 +798,20 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ Set(rcx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
- __ movq(Operand(rbp, rcx, times_1, 0), rax);
+ __ movp(Operand(rbp, rcx, times_1, 0), rax);
__ subq(rcx, Immediate(kPointerSize));
__ cmpq(rcx,
Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
__ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(register_location(i), rax);
+ __ movp(register_location(i), rax);
}
}
}
// Initialize backtrack stack pointer.
- __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ __ movp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
__ jmp(&start_label_);
@@ -821,9 +821,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// copy captures to output
- __ movq(rdx, Operand(rbp, kStartIndex));
- __ movq(rbx, Operand(rbp, kRegisterOutput));
- __ movq(rcx, Operand(rbp, kInputEnd));
+ __ movp(rdx, Operand(rbp, kStartIndex));
+ __ movp(rbx, Operand(rbp, kRegisterOutput));
+ __ movp(rcx, Operand(rbp, kInputEnd));
__ subq(rcx, Operand(rbp, kInputStart));
if (mode_ == UC16) {
__ lea(rcx, Operand(rcx, rdx, times_2, 0));
@@ -834,7 +834,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(rax, register_location(i));
if (i == 0 && global_with_zero_length_check()) {
// Keep capture start in rdx for the zero-length check later.
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
}
__ addq(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
@@ -856,13 +856,13 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ cmpq(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
- __ movq(Operand(rbp, kNumOutputRegisters), rcx);
+ __ movp(Operand(rbp, kNumOutputRegisters), rcx);
// Advance the location for output.
__ addq(Operand(rbp, kRegisterOutput),
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
+ __ movp(rax, Operand(rbp, kInputStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -883,14 +883,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ jmp(&load_char_start_regexp);
} else {
- __ movq(rax, Immediate(SUCCESS));
+ __ movp(rax, Immediate(SUCCESS));
}
}
__ bind(&exit_label_);
if (global()) {
// Return the number of successful captures.
- __ movq(rax, Operand(rbp, kSuccessfulCaptures));
+ __ movp(rax, Operand(rbp, kSuccessfulCaptures));
}
__ bind(&return_rax);
@@ -903,9 +903,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Stack now at rbp.
#else
// Restore callee save register.
- __ movq(rbx, Operand(rbp, kBackup_rbx));
+ __ movp(rbx, Operand(rbp, kBackup_rbx));
// Skip rsp to rbp.
- __ movq(rsp, rbp);
+ __ movp(rsp, rbp);
#endif
// Exit function frame, restore previous one.
__ pop(rbp);
@@ -937,7 +937,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ pop(rdi);
__ pop(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
- __ movq(rsi, Operand(rbp, kInputEnd));
+ __ movp(rsi, Operand(rbp, kInputEnd));
SafeReturn();
}
@@ -964,7 +964,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
#else
// AMD64 ABI passes parameters in rdi, rsi, rdx.
- __ movq(rdi, backtrack_stackpointer()); // First argument.
+ __ movp(rdi, backtrack_stackpointer()); // First argument.
__ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
__ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
#endif
@@ -976,7 +976,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ testq(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
- __ movq(backtrack_stackpointer(), rax);
+ __ movp(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
__ Move(code_object_pointer(), masm_.CodeObject());
#ifndef _WIN64
@@ -1061,7 +1061,7 @@ void RegExpMacroAssemblerX64::PopCurrentPosition() {
void RegExpMacroAssemblerX64::PopRegister(int register_index) {
Pop(rax);
- __ movq(register_location(register_index), rax);
+ __ movp(register_location(register_index), rax);
}
@@ -1078,7 +1078,7 @@ void RegExpMacroAssemblerX64::PushCurrentPosition() {
void RegExpMacroAssemblerX64::PushRegister(int register_index,
StackCheckFlag check_stack_limit) {
- __ movq(rax, register_location(register_index));
+ __ movp(rax, register_location(register_index));
Push(rax);
if (check_stack_limit) CheckStackLimit();
}
@@ -1110,7 +1110,7 @@ void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ movq(register_location(register_index), Immediate(to));
+ __ movp(register_location(register_index), Immediate(to));
}
@@ -1123,27 +1123,27 @@ bool RegExpMacroAssemblerX64::Succeed() {
void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
int cp_offset) {
if (cp_offset == 0) {
- __ movq(register_location(reg), rdi);
+ __ movp(register_location(reg), rdi);
} else {
__ lea(rax, Operand(rdi, cp_offset * char_size()));
- __ movq(register_location(reg), rax);
+ __ movp(register_location(reg), rax);
}
}
void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
ASSERT(reg_from <= reg_to);
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
+ __ movp(rax, Operand(rbp, kInputStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
- __ movq(register_location(reg), rax);
+ __ movp(register_location(reg), rax);
}
}
void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
- __ movq(rax, backtrack_stackpointer());
+ __ movp(rax, backtrack_stackpointer());
__ subq(rax, Operand(rbp, kStackHighEnd));
- __ movq(register_location(reg), rax);
+ __ movp(register_location(reg), rax);
}
@@ -1156,17 +1156,17 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
// Second argument: Code* of self. (Do this before overwriting r8).
- __ movq(rdx, code_object_pointer());
+ __ movp(rdx, code_object_pointer());
// Third argument: RegExp code frame pointer.
- __ movq(r8, rbp);
+ __ movp(r8, rbp);
// First argument: Next address on the stack (will be address of
// return address).
__ lea(rcx, Operand(rsp, -kPointerSize));
#else
// Third argument: RegExp code frame pointer.
- __ movq(rdx, rbp);
+ __ movp(rdx, rbp);
// Second argument: Code* of self.
- __ movq(rsi, code_object_pointer());
+ __ movp(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of
// return address).
__ lea(rdi, Operand(rsp, -kPointerSize));
diff --git a/deps/v8/src/x64/simulator-x64.cc b/deps/v8/src/x64/simulator-x64.cc
index 209aa2d307..448b025a6b 100644
--- a/deps/v8/src/x64/simulator-x64.cc
+++ b/deps/v8/src/x64/simulator-x64.cc
@@ -24,4 +24,3 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 28e2a89627..346d5e805d 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -75,14 +75,14 @@ static void ProbeTable(Isolate* isolate,
// Get the map entry from the cache.
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
__ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Get the code entry from the cache.
__ LoadAddress(kScratchRegister, value_offset);
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, 0));
// Check that the flags match what we're looking for.
@@ -119,7 +119,7 @@ void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
- __ movq(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movp(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
@@ -135,7 +135,7 @@ void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
// Load properties array.
Register properties = scratch0;
- __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ movp(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
__ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
@@ -224,18 +224,18 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
// Load the global or builtins object from the current context.
- __ movq(prototype,
+ __ movp(prototype,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- __ movq(prototype,
+ __ movp(prototype,
FieldOperand(prototype, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
- __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
+ __ movp(prototype, Operand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
- __ movq(prototype,
+ __ movp(prototype,
FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+ __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
@@ -245,18 +245,22 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Register prototype,
Label* miss) {
Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ Move(prototype, isolate->global_object());
- __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
- prototype);
- __ j(not_equal, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ movp(scratch, Operand(rsi, offset));
+ __ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ Cmp(Operand(scratch, Context::SlotOffset(index)), function);
+ __ j(not_equal, miss);
+
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+ __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
@@ -272,7 +276,7 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
__ j(not_equal, miss_label);
// Load length directly from the JS array.
- __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset));
+ __ movp(rax, FieldOperand(receiver, JSArray::kLengthOffset));
__ ret(0);
}
@@ -288,7 +292,7 @@ static void GenerateStringCheck(MacroAssembler* masm,
__ JumpIfSmi(receiver, smi);
// Check that the object is a string.
- __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
STATIC_ASSERT(kNotStringTag != 0);
__ testl(scratch, Immediate(kNotStringTag));
@@ -308,7 +312,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length directly from the string.
- __ movq(rax, FieldOperand(receiver, String::kLengthOffset));
+ __ movp(rax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
// Check if the object is a JSValue wrapper.
@@ -318,9 +322,9 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// Check if the wrapped value is a string and load the length
// directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ __ movp(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+ __ movp(rax, FieldOperand(scratch2, String::kLengthOffset));
__ ret(0);
}
@@ -331,7 +335,7 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
__ TryGetFunctionPrototype(receiver, result, miss_label);
- if (!result.is(rax)) __ movq(rax, result);
+ if (!result.is(rax)) __ movp(rax, result);
__ ret(0);
}
@@ -347,10 +351,10 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
if (!inobject) {
// Calculate the offset into the properties array.
offset = offset + FixedArray::kHeaderSize;
- __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+ __ movp(dst, FieldOperand(src, JSObject::kPropertiesOffset));
src = dst;
}
- __ movq(dst, FieldOperand(src, offset));
+ __ movp(dst, FieldOperand(src, offset));
}
@@ -379,390 +383,94 @@ static void CompileCallLoadPropertyWithInterceptor(
Register receiver,
Register holder,
Register name,
- Handle<JSObject> holder_obj) {
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ Set(rax, StubCache::kInterceptorArgsLength);
- __ LoadAddress(rbx, ref);
-
- CEntryStub stub(1);
- __ CallStub(&stub);
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
}
-// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
-
+// Generate call to api function.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : last argument in the internal frame of the caller
- // -----------------------------------
- __ movq(scratch, StackOperandForReturnAddress(0));
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
- __ movq(StackOperandForReturnAddress(0), scratch);
- __ Move(scratch, Smi::FromInt(0));
- StackArgumentsAccessor args(rsp, kFastApiCallArguments,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ movq(args.GetArgumentOperand(i), scratch);
+ __ PopReturnAddressTo(scratch_in);
+ // receiver
+ __ push(receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch_in.is(arg));
+ __ push(arg);
+ }
+ __ PushReturnAddressFrom(scratch_in);
+ // Stack now matches JSFunction abi.
+
+ // Abi for CallApiFunctionStub.
+ Register callee = rax;
+ Register call_data = rbx;
+ Register holder = rcx;
+ Register api_function_address = rdx;
+ Register scratch = rdi; // scratch_in is no longer valid.
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ Move(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
}
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address.
- // -- rsp[8] : last fast api call extra argument.
- // -- ...
- // -- rsp[kFastApiCallArguments * 8] : first fast api call extra
- // argument.
- // -- rsp[kFastApiCallArguments * 8 + 8] : last argument in the internal
- // frame.
- // -----------------------------------
- __ movq(scratch, StackOperandForReturnAddress(0));
- __ movq(StackOperandForReturnAddress(kFastApiCallArguments * kPointerSize),
- scratch);
- __ addq(rsp, Immediate(kPointerSize * kFastApiCallArguments));
-}
-
-
-// Generates call to API function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] - rsp[56] : FunctionCallbackInfo, incl.
- // : object passing the type check
- // (set by CheckPrototypes)
- // -- rsp[64] : last argument
- // -- ...
- // -- rsp[(argc + 7) * 8] : first argument
- // -- rsp[(argc + 8) * 8] : receiver
- // -----------------------------------
- typedef FunctionCallbackArguments FCA;
- StackArgumentsAccessor args(rsp, argc + kFastApiCallArguments);
-
- // Save calling context.
- int offset = argc + kFastApiCallArguments;
- __ movq(args.GetArgumentOperand(offset - FCA::kContextSaveIndex), rsi);
- // Get the function and setup the context.
+ Isolate* isolate = masm->isolate();
Handle<JSFunction> function = optimization.constant_function();
- __ Move(rdi, function);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // Construct the FunctionCallbackInfo on the stack.
- __ movq(args.GetArgumentOperand(offset - FCA::kCalleeIndex), rdi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ Move(rcx, api_call_info);
- __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(args.GetArgumentOperand(offset - FCA::kDataIndex), rbx);
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ Move(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ Move(scratch, api_call_info);
+ __ movp(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
} else {
- __ Move(args.GetArgumentOperand(offset - FCA::kDataIndex), call_data);
+ __ Move(call_data, call_data_obj);
}
- __ movq(kScratchRegister,
- ExternalReference::isolate_address(masm->isolate()));
- __ movq(args.GetArgumentOperand(offset - FCA::kIsolateIndex),
- kScratchRegister);
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueDefaultValueIndex),
- kScratchRegister);
- __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueOffset),
- kScratchRegister);
- // Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 7);
- __ lea(rbx, Operand(rsp, 1 * kPointerSize));
-
- // Function address is a foreign pointer outside V8's heap.
+ // Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ Move(
+ api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE);
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- __ PrepareCallApiFunction(kApiStackSpace);
-
- __ movq(StackSpaceOperand(0), rbx); // FunctionCallbackInfo::implicit_args_.
- __ addq(rbx, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
- __ movq(StackSpaceOperand(1), rbx); // FunctionCallbackInfo::values_.
- __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
- // FunctionCallbackInfo::is_construct_call_.
- __ Set(StackSpaceOperand(3), 0);
-
-#if defined(__MINGW64__) || defined(_WIN64)
- Register arguments_arg = rcx;
- Register callback_arg = rdx;
-#else
- Register arguments_arg = rdi;
- Register callback_arg = rsi;
-#endif
-
- // v8::InvocationCallback's argument.
- __ lea(arguments_arg, StackSpaceOperand(0));
-
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
-
- StackArgumentsAccessor args_from_rbp(rbp, kFastApiCallArguments,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
- kFastApiCallArguments - 1 - FCA::kContextSaveIndex);
- Operand return_value_operand = args_from_rbp.GetArgumentOperand(
- kFastApiCallArguments - 1 - FCA::kReturnValueOffset);
- __ CallApiFunctionAndReturn(
- function_address,
- thunk_address,
- callback_arg,
- argc + kFastApiCallArguments + 1,
- return_value_operand,
- restore_context ? &context_restore_operand : NULL);
-}
-
-
-// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch,
- int argc,
- Register* values) {
- ASSERT(optimization.is_simple_api_call());
- ASSERT(!receiver.is(scratch));
-
- const int fast_api_call_argc = argc + kFastApiCallArguments;
- StackArgumentsAccessor args(rsp, fast_api_call_argc);
- // argc + 1 is the argument number before FastApiCall arguments, 1 ~ receiver
- const int kHolderIndex = argc + 1 +
- kFastApiCallArguments - 1 - FunctionCallbackArguments::kHolderIndex;
- __ movq(scratch, StackOperandForReturnAddress(0));
- // Assign stack space for the call arguments and receiver.
- __ subq(rsp, Immediate((fast_api_call_argc + 1) * kPointerSize));
- __ movq(StackOperandForReturnAddress(0), scratch);
- // Write holder to stack frame.
- __ movq(args.GetArgumentOperand(kHolderIndex), receiver);
- __ movq(args.GetReceiverOperand(), receiver);
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- ASSERT(!receiver.is(values[i]));
- ASSERT(!scratch.is(values[i]));
- __ movq(args.GetArgumentOperand(i + 1), values[i]);
- }
-
- GenerateFastApiCall(masm, optimization, argc, true);
+ // Jump to stub.
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
}
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- Code::ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<Name> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->call_const_interceptor(), 1);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate(), false);
- } else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- Handle<JSFunction> fun = optimization.constant_function();
- ParameterCount expected(fun);
- __ InvokeFunction(fun, expected, arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm, scratch1);
- __ jmp(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm, scratch1);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<Name> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- StubCache::kInterceptorArgsLength);
-
- // Restore the name_ register.
- __ pop(name_);
-
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- // Leave the internal frame.
- }
-
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(not_equal, interceptor_succeeded);
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- Code::ExtraICState extra_ic_state_;
-};
-
-
void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
Label* label,
Handle<Name> name) {
@@ -875,7 +583,7 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Update the map of the object.
__ Move(scratch1, transition);
- __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+ __ movp(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
@@ -907,15 +615,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
if (FLAG_track_double_fields && representation.IsDouble()) {
- __ movq(FieldOperand(receiver_reg, offset), storage_reg);
+ __ movp(FieldOperand(receiver_reg, offset), storage_reg);
} else {
- __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ __ movp(FieldOperand(receiver_reg, offset), value_reg);
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ movq(storage_reg, value_reg);
+ __ movp(storage_reg, value_reg);
}
__ RecordWriteField(
receiver_reg, offset, storage_reg, scratch1, kDontSaveFPRegs,
@@ -925,17 +633,17 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
- __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
if (FLAG_track_double_fields && representation.IsDouble()) {
- __ movq(FieldOperand(scratch1, offset), storage_reg);
+ __ movp(FieldOperand(scratch1, offset), storage_reg);
} else {
- __ movq(FieldOperand(scratch1, offset), value_reg);
+ __ movp(FieldOperand(scratch1, offset), value_reg);
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ movq(storage_reg, value_reg);
+ __ movp(storage_reg, value_reg);
}
__ RecordWriteField(
scratch1, offset, storage_reg, receiver_reg, kDontSaveFPRegs,
@@ -981,12 +689,12 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(scratch1, FieldOperand(receiver_reg, offset));
+ __ movp(scratch1, FieldOperand(receiver_reg, offset));
} else {
- __ movq(scratch1,
+ __ movp(scratch1,
FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ movq(scratch1, FieldOperand(scratch1, offset));
+ __ movp(scratch1, FieldOperand(scratch1, offset));
}
// Store the value into the storage.
@@ -1014,12 +722,12 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ __ movp(FieldOperand(receiver_reg, offset), value_reg);
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
+ __ movp(name_reg, value_reg);
__ RecordWriteField(
receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
@@ -1028,13 +736,13 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
- __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch1, offset), value_reg);
+ __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ movp(FieldOperand(scratch1, offset), value_reg);
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
+ __ movp(name_reg, value_reg);
__ RecordWriteField(
scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
@@ -1047,26 +755,6 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<JSGlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ jmp(code, RelocInfo::CODE_TARGET);
}
@@ -1076,21 +764,20 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM((masm()))
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
Handle<Name> name,
- int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ Move(scratch1, Handle<Map>(object->map()));
+ __ Move(scratch1, receiver_map);
- Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1102,136 +789,127 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
- StackArgumentsAccessor args(rsp, kFastApiCallArguments,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- const int kHolderIndex = kFastApiCallArguments - 1 -
- FunctionCallbackArguments::kHolderIndex;
-
- if (save_at_depth == depth) {
- __ movq(args.GetArgumentOperand(kHolderIndex), object_reg);
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
if (!name->IsUniqueName()) {
ASSERT(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
+ ASSERT(current.is_null() ||
+ current->property_dictionary()->FindEntry(*name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
scratch1, scratch2);
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
bool in_new_space = heap()->InNewSpace(*prototype);
- Handle<Map> current_map(current->map());
if (in_new_space) {
// Save the map in scratch1 for later.
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
__ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
- if (current->IsJSGlobalProxy()) {
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
}
reg = holder_reg; // From now on the object will be in holder_reg.
if (in_new_space) {
// The prototype is in new space; we cannot store a reference to it
// in the code. Load it from the map.
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
// The prototype is in old space; load it directly.
__ Move(reg, prototype);
}
}
- if (save_at_depth == depth) {
- __ movq(args.GetArgumentOperand(kHolderIndex), reg);
- }
-
// Go to the next object in the prototype chain.
current = prototype;
+ current_map = handle(current->map());
}
- ASSERT(current.is_identical_to(holder));
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()), miss, DONT_DO_SMI_CHECK);
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
- if (current->IsJSGlobalProxy()) {
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
// Return the register containing the holder.
return reg;
}
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ jmp(success);
+ Label success;
+ __ jmp(&success);
__ bind(miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ jmp(success);
+ Label success;
+ __ jmp(&success);
GenerateRestoreName(masm(), miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
Register LoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
- Label* success,
Handle<Object> callback) {
Label miss;
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
ASSERT(!reg.is(scratch2()));
@@ -1240,7 +918,7 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
// Load the properties dictionary.
Register dictionary = scratch4();
- __ movq(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
+ __ movp(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
// Probe the dictionary.
Label probe_done;
@@ -1260,15 +938,15 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(scratch2(),
+ __ movp(scratch2(),
Operand(dictionary, index, times_pointer_size,
kValueOffset - kHeapObjectTag));
- __ movq(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
+ __ Move(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
__ cmpq(scratch2(), scratch3());
__ j(not_equal, &miss);
}
- HandlerFrontendFooter(name, success, &miss);
+ HandlerFrontendFooter(name, &miss);
return reg;
}
@@ -1277,7 +955,7 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
PropertyIndex field,
Representation representation) {
- if (!reg.is(receiver())) __ movq(receiver(), reg);
+ if (!reg.is(receiver())) __ movp(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
field.translate(holder),
@@ -1293,13 +971,6 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
@@ -1332,51 +1003,15 @@ void LoadStubCompiler::GenerateLoadCallback(
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const PropertyAccessorInfo& to the C++ callback.
- Address getter_address = v8::ToCData<Address>(callback->getter());
-
-#if defined(__MINGW64__) || defined(_WIN64)
- Register getter_arg = r8;
- Register accessor_info_arg = rdx;
- Register name_arg = rcx;
-#else
- Register getter_arg = rdx;
- Register accessor_info_arg = rsi;
- Register name_arg = rdi;
-#endif
-
- ASSERT(!name_arg.is(scratch4()));
- __ movq(name_arg, rsp);
__ PushReturnAddressFrom(scratch4());
- // v8::Arguments::values_ and handler for name.
- const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Allocate v8::AccessorInfo in non-GCed stack space.
- const int kArgStackSpace = 1;
-
- __ PrepareCallApiFunction(kArgStackSpace);
- __ lea(rax, Operand(name_arg, 1 * kPointerSize));
-
- // v8::PropertyAccessorInfo::args_.
- __ movq(StackSpaceOperand(0), rax);
-
- // The context register (rsi) has been saved in PrepareCallApiFunction and
- // could be used to pass arguments.
- __ lea(accessor_info_arg, StackSpaceOperand(0));
-
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ // Abi for CallApiGetter
+ Register api_function_address = r8;
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ __ Move(api_function_address, getter_address, RelocInfo::EXTERNAL_REFERENCE);
- // The name handler is counted as an argument.
- StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
- Operand return_value_operand = args.GetArgumentOperand(
- PropertyCallbackArguments::kArgsLength - 1 -
- PropertyCallbackArguments::kReturnValueOffset);
- __ CallApiFunctionAndReturn(getter_address,
- thunk_address,
- getter_arg,
- kStackSpace,
- return_value_operand,
- NULL);
+ CallApiGetterStub stub;
+ __ TailCallStub(&stub);
}
@@ -1389,7 +1024,7 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
- Handle<JSObject> object,
+ Handle<Object> object,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Handle<Name> name) {
@@ -1440,11 +1075,9 @@ void LoadStubCompiler::GenerateLoadInterceptor(
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -1480,1301 +1113,14 @@ void LoadStubCompiler::GenerateLoadInterceptor(
}
-void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ Cmp(rcx, name);
- __ j(not_equal, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- StackArgumentsAccessor args(rsp, arguments());
- __ movq(rdx, args.GetReceiverOperand());
-
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(rdx, miss);
- CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ Move(rdi, cell);
- __ movq(rdi, FieldOperand(rdi, Cell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(rdi, miss);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
- __ j(not_equal, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
- __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
- } else {
- __ Cmp(rdi, function);
- }
- __ j(not_equal, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<Name> name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- StackArgumentsAccessor args(rsp, arguments());
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, rdi,
- name, &miss);
-
- GenerateFastPropertyLoad(masm(), rdi, reg, index.is_inobject(holder),
- index.translate(holder), Representation::Tagged());
-
- // Check that the function really is a function.
- __ JumpIfSmi(rdi, &miss);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- Label miss;
-
- // Check that function is still array
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
- Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
- __ movq(rax, Immediate(argc));
- __ Move(rbx, site_feedback_cell);
- __ Move(rdi, function);
-
- ArrayConstructorStub stub(isolate());
- __ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
-
- if (argc == 0) {
- // Noop, return the length.
- __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- __ ret((argc + 1) * kPointerSize);
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- // Get the elements array of the object.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- factory()->fixed_array_map());
- __ j(not_equal, &check_double);
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the elements' length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
- __ j(greater, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ movq(rcx, args.GetArgumentOperand(1));
- __ JumpIfNotSmi(rcx, &with_write_barrier);
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Store the value.
- __ movq(FieldOperand(rdi,
- rax,
- times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize),
- rcx);
-
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&check_double);
-
- // Check that the elements are in double mode.
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- factory()->fixed_double_array_map());
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the elements' length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
- __ j(greater, &call_builtin);
-
- __ movq(rcx, args.GetArgumentOperand(1));
- __ StoreNumberToDoubleElements(
- rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&with_write_barrier);
-
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(rbx, &not_fast_object, Label::kNear);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(rbx, &call_builtin);
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &call_builtin);
- // rdx: receiver
- // rbx: map
-
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- &try_holey_map);
-
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- rbx,
- rdi,
- &call_builtin);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(rbx, &call_builtin);
- }
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Store the value.
- __ lea(rdx, FieldOperand(rdi,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ movq(Operand(rdx, 0), rcx);
-
- __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&attempt_to_grow_elements);
- if (!FLAG_inline_new) {
- __ jmp(&call_builtin);
- }
-
- __ movq(rbx, args.GetArgumentOperand(1));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(rbx, &no_fast_elements_check);
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
- __ bind(&no_fast_elements_check);
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top.
- __ Load(rcx, new_space_allocation_top);
-
- // Check if it's the end of elements.
- __ lea(rdx, FieldOperand(rdi,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmpq(rdx, rcx);
- __ j(not_equal, &call_builtin);
- __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
- Operand limit_operand =
- masm()->ExternalOperand(new_space_allocation_limit);
- __ cmpq(rcx, limit_operand);
- __ j(above, &call_builtin);
-
- // We fit and could grow elements.
- __ Store(new_space_allocation_top, rcx);
-
- // Push the argument...
- __ movq(Operand(rdx, 0), rbx);
- // ... and fill the rest with holes.
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
- }
-
- // We know the elements array is in new space so we don't need the
- // remembered set, but we just pushed a value onto it so we may have to
- // tell the incremental marker to rescan the object that we just grew. We
- // don't need to worry about the holes because they are in old space and
- // already marked black.
- __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
- // Restore receiver to rdx as finish sequence assumes it's here.
- __ movq(rdx, args.GetReceiverOperand());
-
- // Increment element's and array's sizes.
- __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
- Smi::FromInt(kAllocationDelta));
-
- // Make new length a smi before returning it.
- __ Integer32ToSmi(rax, rax);
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- isolate()),
- argc + 1,
- 1);
- }
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss, return_undefined, call_builtin;
- GenerateNameCheck(name, &miss);
-
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
-
- // Get the elements array of the object.
- __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into rcx and calculate new length.
- __ SmiToInteger32(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ subl(rcx, Immediate(1));
- __ j(negative, &return_undefined);
-
- // Get the last element.
- __ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
- __ movq(rax, FieldOperand(rbx,
- rcx, times_pointer_size,
- FixedArray::kHeaderSize));
- // Check if element is already the hole.
- __ cmpq(rax, r9);
- // If so, call slow-case to also check prototypes for value.
- __ j(equal, &call_builtin);
-
- // Set the array's length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
-
- // Fill with the hole and return original value.
- __ movq(FieldOperand(rbx,
- rcx, times_pointer_size,
- FixedArray::kHeaderSize),
- r9);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&return_undefined);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()),
- argc + 1,
- 1);
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
-
- Register receiver = rbx;
- Register index = rdi;
- Register result = rax;
- __ movq(receiver, args.GetReceiverOperand());
- if (argc > 0) {
- __ movq(index, args.GetArgumentOperand(1));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
-
- Register receiver = rax;
- Register index = rdi;
- Register scratch = rdx;
- Register result = rax;
- __ movq(receiver, args.GetReceiverOperand());
- if (argc > 0) {
- __ movq(index, args.GetArgumentOperand(1));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kempty_stringRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ movq(rdx, args.GetReceiverOperand());
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = rbx;
- __ movq(code, args.GetArgumentOperand(1));
-
- // Check the code is a smi.
- Label slow;
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ SmiAndConstant(code, code, Smi::FromInt(0xffff));
-
- StringCharFromCodeGenerator generator(code, rax);
- generator.GenerateFast(masm());
- __ ret(2 * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
-
- __ bind(&miss);
- // rcx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 4] : receiver
- // -----------------------------------
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ movq(rdx, args.GetReceiverOperand());
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into rax.
- __ movq(rax, args.GetArgumentOperand(1));
-
- // Check if the argument is a smi.
- Label smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rax, &smi);
-
- // Check if the argument is a heap number and load its value into xmm0.
- Label slow;
- __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
-
- // Check if the argument is strictly positive. Note this also discards NaN.
- __ xorpd(xmm1, xmm1);
- __ ucomisd(xmm0, xmm1);
- __ j(below_equal, &slow);
-
- // Do a truncating conversion.
- __ cvttsd2si(rax, xmm0);
-
- // Checks for 0x80000000 which signals a failed conversion.
- Label conversion_failure;
- __ cmpl(rax, Immediate(0x80000000));
- __ j(equal, &conversion_failure);
-
- // Smi tag and return.
- __ Integer32ToSmi(rax, rax);
- __ bind(&smi);
- __ ret(2 * kPointerSize);
-
- // Check if the argument is < 2^kMantissaBits.
- Label already_round;
- __ bind(&conversion_failure);
- int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
- __ movq(rbx, kTwoMantissaBits, RelocInfo::NONE64);
- __ movq(xmm1, rbx);
- __ ucomisd(xmm0, xmm1);
- __ j(above_equal, &already_round);
-
- // Save a copy of the argument.
- __ movaps(xmm2, xmm0);
-
- // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
- __ addsd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
-
- // Compare the argument and the tentative result to get the right mask:
- // if xmm2 < xmm0:
- // xmm2 = 1...1
- // else:
- // xmm2 = 0...0
- __ cmpltsd(xmm2, xmm0);
-
- // Subtract 1 if the argument was less than the tentative result.
- int64_t kOne = V8_INT64_C(0x3ff0000000000000);
- __ movq(rbx, kOne, RelocInfo::NONE64);
- __ movq(xmm1, rbx);
- __ andpd(xmm1, xmm2);
- __ subsd(xmm0, xmm1);
-
- // Return a new heap number.
- __ AllocateHeapNumber(rax, rbx, &slow);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ ret(2 * kPointerSize);
-
- // Return the argument (when it's an already round heap number).
- __ bind(&already_round);
- __ movq(rax, args.GetArgumentOperand(1));
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // rcx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ movq(rdx, args.GetReceiverOperand());
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
- // Load the (only) argument into rax.
- __ movq(rax, args.GetArgumentOperand(1));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(rax, &not_smi);
-
- // Branchless abs implementation, refer to below:
- // http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
- // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
- // otherwise.
- __ movq(rbx, rax);
- __ sar(rbx, Immediate(kBitsPerPointer - 1));
-
- // Do bitwise not or do nothing depending on ebx.
- __ xor_(rax, rbx);
-
- // Add 1 or do nothing depending on ebx.
- __ subq(rax, rbx);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ j(negative, &slow);
-
- __ ret(2 * kPointerSize);
-
- // Check if the argument is a heap number and load its value.
- __ bind(&not_smi);
- __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- const int sign_mask_shift =
- (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
- __ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
- RelocInfo::NONE64);
- __ testq(rbx, rdi);
- __ j(not_zero, &negative_sign);
- __ ret(2 * kPointerSize);
-
- // If the argument is negative, clear the sign, and return a new
- // number. We still have the sign mask in rdi.
- __ bind(&negative_sign);
- __ xor_(rbx, rdi);
- __ AllocateHeapNumber(rax, rdx, &slow);
- __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
-
- __ bind(&miss);
- // rcx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss_before_stack_reserved);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_const(), 1);
- __ IncrementCounter(counters->call_const_fast_api(), 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before calling any runtime function.
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, depth, &miss);
-
- // Move the return address on top of the stack.
- __ movq(rax,
- StackOperandForReturnAddress(kFastApiCallArguments * kPointerSize));
- __ movq(StackOperandForReturnAddress(0), rax);
-
- GenerateFastApiCall(masm(), optimization, argc, false);
-
- __ bind(&miss);
- __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
-
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- StackArgumentsAccessor args(rsp, arguments());
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(rdx, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- Counters* counters = isolate()->counters();
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(counters->call_const(), 1);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax,
- rdi, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- }
- break;
-
- case STRING_CHECK:
- // Check that the object is a string.
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
- break;
-
- case SYMBOL_CHECK:
- // Check that the object is a symbol.
- __ CmpObjectType(rdx, SYMBOL_TYPE, rax);
- __ j(not_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
- break;
-
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(rdx, &fast);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
- break;
- }
- case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &fast);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
- break;
- }
- }
-
- __ jmp(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Handle<JSFunction> function) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<PropertyCell>::null(),
- function, Handle<String>::cast(name),
- Code::CONSTANT);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
Label success;
-
- CompileHandlerFrontend(object, holder, name, check, &success);
+ // Check that the object is a boolean.
+ __ Cmp(object, factory()->true_value());
+ __ j(equal, &success);
+ __ Cmp(object, factory()->false_value());
+ __ j(not_equal, miss);
__ bind(&success);
- CompileHandlerBackend(function);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- StackArgumentsAccessor args(rsp, arguments());
- __ movq(rdx, args.GetReceiverOperand());
-
- CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
- compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
- &miss);
-
- // Restore receiver.
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the function really is a function.
- __ JumpIfSmi(rax, &miss);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- }
-
- // Invoke the function.
- __ movq(rdi, rax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle load cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function,
- Handle<Name> name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name),
- Code::NORMAL);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- StackArgumentsAccessor args(rsp, arguments());
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- }
-
- // Set up the context (function already in rdi).
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
}
@@ -2783,12 +1129,12 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
__ PopReturnAddressTo(scratch1());
__ push(receiver());
+ __ push(holder_reg);
__ Push(callback); // callback info
__ Push(name);
__ push(value());
@@ -2797,28 +1143,10 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 1, values);
+ __ TailCallExternalReference(store_callback_property, 5, 1);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2828,6 +1156,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
+ Handle<HeapType> type,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- rax : value
@@ -2837,18 +1166,25 @@ void StoreStubCompiler::GenerateStoreViaSetter(
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ Register receiver = rdx;
+ Register value = rax;
// Save value register, so we can restore it later.
- __ push(rax);
+ __ push(value);
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
- __ push(rdx);
- __ push(rax);
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ movp(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ push(receiver);
+ __ push(value);
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2859,7 +1195,7 @@ void StoreStubCompiler::GenerateStoreViaSetter(
__ pop(rax);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
__ ret(0);
}
@@ -2876,16 +1212,15 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ push(receiver());
__ push(this->name());
__ push(value());
- __ Push(Smi::FromInt(strict_mode()));
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
+ __ TailCallExternalReference(store_ic_property, 3, 1);
// Return the generated code.
- return GetCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2896,7 +1231,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
Label miss;
__ JumpIfSmi(receiver(), &miss, Label::kNear);
- __ movq(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ __ movp(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
for (int i = 0; i < receiver_count; ++i) {
// Check map and tail call if there's a match
@@ -2906,7 +1241,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
} else {
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
- __ movq(transition_map(),
+ __ Move(transition_map(),
transitioned_maps->at(i),
RelocInfo::EMBEDDED_OBJECT);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
@@ -2924,23 +1259,18 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Handle<JSGlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
- __ bind(&success);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ ret(0);
// Return the generated code.
- return GetCode(kind(), Code::NONEXISTENT, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2972,27 +1302,12 @@ Register* KeyedStoreStubCompiler::registers() {
}
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ Cmp(name_reg, name);
- __ j(not_equal, miss);
-}
-
-
-void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ Cmp(name_reg, name);
- __ j(not_equal, miss);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
@@ -3005,11 +1320,16 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ movp(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
__ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -3017,7 +1337,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
}
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
__ ret(0);
}
@@ -3028,23 +1348,20 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
+ Handle<HeapType> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
- Label success, miss;
+ Label miss;
// TODO(verwaest): Directly store to rax. Currently we cannot do this, since
// rax is used as receiver(), which we would otherwise clobber before a
// potential miss.
-
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
__ Move(rbx, cell);
- __ movq(rbx, FieldOperand(rbx, PropertyCell::kValueOffset));
+ __ movp(rbx, FieldOperand(rbx, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
@@ -3055,42 +1372,51 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
}
- HandlerFrontendFooter(name, &success, &miss);
- __ bind(&success);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
- __ movq(rax, rbx);
+ __ movp(rax, rbx);
__ ret(0);
+ HandlerFrontendFooter(name, &miss);
+
// Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), Code::NORMAL, name);
}
Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
+ TypeHandleList* types,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
IcCheckType check) {
Label miss;
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ Cmp(this->name(), name);
+ __ j(not_equal, &miss);
}
- __ JumpIfSmi(receiver(), &miss);
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
Register map_reg = scratch1();
- __ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
+ __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = receiver_maps->at(current);
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
// Check map and tail call if there's a match
- __ Cmp(map_reg, receiver_maps->at(current));
+ __ Cmp(map_reg, map);
+ if (type->Is(HeapType::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ bind(&number_case);
+ }
__ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
}
}
@@ -3117,14 +1443,14 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, miss_force_generic;
+ Label slow, miss;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
- __ JumpIfNotSmi(rax, &miss_force_generic);
+ __ JumpIfNotSmi(rax, &miss);
__ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// rdx: receiver
@@ -3142,13 +1468,13 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
- __ bind(&miss_force_generic);
+ __ bind(&miss);
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
diff --git a/deps/v8/src/zone-allocator.h b/deps/v8/src/zone-allocator.h
new file mode 100644
index 0000000000..5245c6b1bf
--- /dev/null
+++ b/deps/v8/src/zone-allocator.h
@@ -0,0 +1,80 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ZONE_ALLOCATOR_H_
+#define V8_ZONE_ALLOCATOR_H_
+
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+template<typename T>
+class zone_allocator {
+ public:
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T value_type;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ template<class O> struct rebind {
+ typedef zone_allocator<O> other;
+ };
+
+ explicit zone_allocator(Zone* zone) throw() : zone_(zone) {}
+ explicit zone_allocator(const zone_allocator& other) throw()
+ : zone_(other.zone_) {}
+ template<typename U> zone_allocator(const zone_allocator<U>&) throw() {}
+
+ pointer address(reference x) const {return &x;}
+ const_pointer address(const_reference x) const {return &x;}
+
+ pointer allocate(size_type count, const void* hint = 0) {
+ size_t size = count * sizeof(value_type);
+ size = RoundUp(size, kPointerSize);
+ return static_cast<pointer>(zone_->New(size));
+ }
+ void deallocate(pointer p, size_type) { /* noop for Zones */ }
+
+ size_type max_size() const throw() {
+ size_type max = static_cast<size_type>(-1) / sizeof(T);
+ return (max > 0 ? max : 1);
+ }
+ void construct(pointer p, const T& val) {
+ new(static_cast<void*>(p)) T(val);
+ }
+ void destroy(pointer p) { (static_cast<T*>(p))->~T(); }
+
+ private:
+ Zone* zone_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ZONE_ALLOCATOR_H_
diff --git a/deps/v8/src/allocation-inl.h b/deps/v8/src/zone-containers.h
index d32db4b17f..31672b62db 100644
--- a/deps/v8/src/allocation-inl.h
+++ b/deps/v8/src/zone-containers.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,25 +25,22 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_ALLOCATION_INL_H_
-#define V8_ALLOCATION_INL_H_
+#ifndef V8_ZONE_CONTAINERS_H_
+#define V8_ZONE_CONTAINERS_H_
-#include "allocation.h"
+#include <vector>
+#include <set>
+
+#include "zone.h"
namespace v8 {
namespace internal {
-
-void* PreallocatedStorageAllocationPolicy::New(size_t size) {
- return Isolate::Current()->PreallocatedStorageNew(size);
-}
-
-
-void PreallocatedStorageAllocationPolicy::Delete(void* p) {
- return Isolate::Current()->PreallocatedStorageDelete(p);
-}
-
+typedef zone_allocator<int> ZoneIntAllocator;
+typedef std::vector<int, ZoneIntAllocator> IntVector;
+typedef IntVector::iterator IntVectorIter;
+typedef IntVector::reverse_iterator IntVectorRIter;
} } // namespace v8::internal
-#endif // V8_ALLOCATION_INL_H_
+#endif // V8_ZONE_CONTAINERS_H_
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc
index 9ee00edcba..417f895e5a 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone.cc
@@ -185,25 +185,31 @@ Address Zone::NewExpand(int size) {
// except that we employ a maximum segment size when we delete. This
// is to avoid excessive malloc() and free() overhead.
Segment* head = segment_head_;
- int old_size = (head == NULL) ? 0 : head->size();
- static const int kSegmentOverhead = sizeof(Segment) + kAlignment;
- int new_size_no_overhead = size + (old_size << 1);
- int new_size = kSegmentOverhead + new_size_no_overhead;
+ const size_t old_size = (head == NULL) ? 0 : head->size();
+ static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment;
+ const size_t new_size_no_overhead = size + (old_size << 1);
+ size_t new_size = kSegmentOverhead + new_size_no_overhead;
+ const size_t min_new_size = kSegmentOverhead + static_cast<size_t>(size);
// Guard against integer overflow.
- if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
+ if (new_size_no_overhead < static_cast<size_t>(size) ||
+ new_size < static_cast<size_t>(kSegmentOverhead)) {
V8::FatalProcessOutOfMemory("Zone");
return NULL;
}
- if (new_size < kMinimumSegmentSize) {
+ if (new_size < static_cast<size_t>(kMinimumSegmentSize)) {
new_size = kMinimumSegmentSize;
- } else if (new_size > kMaximumSegmentSize) {
+ } else if (new_size > static_cast<size_t>(kMaximumSegmentSize)) {
// Limit the size of new segments to avoid growing the segment size
// exponentially, thus putting pressure on contiguous virtual address space.
// All the while making sure to allocate a segment large enough to hold the
// requested size.
- new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
+ new_size = Max(min_new_size, static_cast<size_t>(kMaximumSegmentSize));
}
- Segment* segment = NewSegment(new_size);
+ if (new_size > INT_MAX) {
+ V8::FatalProcessOutOfMemory("Zone");
+ return NULL;
+ }
+ Segment* segment = NewSegment(static_cast<int>(new_size));
if (segment == NULL) {
V8::FatalProcessOutOfMemory("Zone");
return NULL;
@@ -213,7 +219,10 @@ Address Zone::NewExpand(int size) {
Address result = RoundUp(segment->start(), kAlignment);
position_ = result + size;
// Check for address overflow.
- if (position_ < result) {
+ // (Should not happen since the segment is guaranteed to accomodate
+ // size bytes + header and alignment padding)
+ if (reinterpret_cast<uintptr_t>(position_)
+ < reinterpret_cast<uintptr_t>(result)) {
V8::FatalProcessOutOfMemory("Zone");
return NULL;
}
diff --git a/deps/v8/test/benchmarks/benchmarks.status b/deps/v8/test/benchmarks/benchmarks.status
index 103eaeb126..d651b3c0f0 100644
--- a/deps/v8/test/benchmarks/benchmarks.status
+++ b/deps/v8/test/benchmarks/benchmarks.status
@@ -25,9 +25,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Too slow in Debug mode.
[
-['mode == debug', {
- # Too slow in Debug mode.
- 'octane/mandreel': [SKIP],
-}], # 'mode == debug'
+[ALWAYS, {
+ 'octane/mandreel': [PASS, ['mode == debug', SKIP]],
+}], # ALWAYS
]
diff --git a/deps/v8/test/benchmarks/testcfg.py b/deps/v8/test/benchmarks/testcfg.py
index b15553a861..c94a35ffd9 100644
--- a/deps/v8/test/benchmarks/testcfg.py
+++ b/deps/v8/test/benchmarks/testcfg.py
@@ -64,7 +64,7 @@ class BenchmarksTestSuite(testsuite.TestSuite):
"octane/crypto",
"octane/deltablue",
"octane/earley-boyer",
- "octane/gbemu",
+ "octane/gbemu-part1",
"octane/mandreel",
"octane/navier-stokes",
"octane/pdfjs",
@@ -72,6 +72,8 @@ class BenchmarksTestSuite(testsuite.TestSuite):
"octane/regexp",
"octane/richards",
"octane/splay",
+ "octane/typescript",
+ "octane/zlib",
"sunspider/3d-cube",
"sunspider/3d-morph",
@@ -111,6 +113,14 @@ class BenchmarksTestSuite(testsuite.TestSuite):
elif testcase.path.startswith("octane"):
result.append(os.path.join(self.testroot, "octane/base.js"))
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
+ if testcase.path.startswith("octane/gbemu"):
+ result.append(os.path.join(self.testroot, "octane/gbemu-part2.js"))
+ elif testcase.path.startswith("octane/typescript"):
+ result.append(os.path.join(self.testroot,
+ "octane/typescript-compiler.js"))
+ result.append(os.path.join(self.testroot, "octane/typescript-input.js"))
+ elif testcase.path.startswith("octane/zlib"):
+ result.append(os.path.join(self.testroot, "octane/zlib-data.js"))
result += ["-e", "BenchmarkSuite.RunSuites({});"]
elif testcase.path.startswith("sunspider"):
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
@@ -158,8 +168,8 @@ class BenchmarksTestSuite(testsuite.TestSuite):
self._DownloadIfNecessary(
("http://svn.webkit.org/repository/webkit/trunk/PerformanceTests/"
- "SunSpider/tests/sunspider-1.0/"),
- "153700", "sunspider")
+ "SunSpider/tests/sunspider-1.0.2/"),
+ "159499", "sunspider")
self._DownloadIfNecessary(
("http://kraken-mirror.googlecode.com/svn/trunk/kraken/tests/"
@@ -168,7 +178,7 @@ class BenchmarksTestSuite(testsuite.TestSuite):
self._DownloadIfNecessary(
"http://octane-benchmark.googlecode.com/svn/trunk/",
- "22", "octane")
+ "26", "octane")
os.chdir(old_cwd)
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
new file mode 100644
index 0000000000..6d5f927e73
--- /dev/null
+++ b/deps/v8/test/cctest/OWNERS
@@ -0,0 +1,2 @@
+per-file *-mips.*=plind44@gmail.com
+per-file *-mips.*=gergely@homejinni.com
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 4aa9c7eb71..b1cf5abb4e 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -27,6 +27,10 @@
#include <v8.h>
#include "cctest.h"
+
+#include "print-extension.h"
+#include "profiler-extension.h"
+#include "trace-extension.h"
#include "debug.h"
enum InitializationState {kUnset, kUnintialized, kInitialized};
@@ -141,6 +145,13 @@ int main(int argc, char* argv[]) {
CcTestArrayBufferAllocator array_buffer_allocator;
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
+ i::PrintExtension print_extension;
+ v8::RegisterExtension(&print_extension);
+ i::ProfilerExtension profiler_extension;
+ v8::RegisterExtension(&profiler_extension);
+ i::TraceExtension trace_extension;
+ v8::RegisterExtension(&trace_extension);
+
int tests_run = 0;
bool print_run_count = true;
for (int i = 1; i < argc; i++) {
@@ -188,6 +199,7 @@ int main(int argc, char* argv[]) {
}
if (print_run_count && tests_run != 1)
printf("Ran %i tests.\n", tests_run);
+ CcTest::TearDown();
if (!disable_automatic_dispose_) v8::V8::Dispose();
return 0;
}
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index fbe38f2709..187bdf3236 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -47,6 +47,8 @@
'gay-fixed.cc',
'gay-precision.cc',
'gay-shortest.cc',
+ 'print-extension.cc',
+ 'profiler-extension.cc',
'test-accessors.cc',
'test-alloc.cc',
'test-api.cc',
@@ -80,21 +82,25 @@
'test-hashmap.cc',
'test-heap.cc',
'test-heap-profiler.cc',
+ 'test-libplatform-task-queue.cc',
+ 'test-libplatform-worker-thread.cc',
'test-list.cc',
'test-liveedit.cc',
'test-lockers.cc',
'test-log.cc',
+ 'test-microtask-delivery.cc',
'test-mark-compact.cc',
+ 'test-mementos.cc',
'test-mutex.cc',
'test-object-observe.cc',
'test-parsing.cc',
'test-platform.cc',
'test-platform-tls.cc',
'test-profile-generator.cc',
- 'test-random.cc',
'test-random-number-generator.cc',
'test-regexp.cc',
'test-reloc-info.cc',
+ 'test-representation.cc',
'test-semaphore.cc',
'test-serialize.cc',
'test-socket.cc',
@@ -112,7 +118,8 @@
'test-version.cc',
'test-weakmaps.cc',
'test-weaksets.cc',
- 'test-weaktypedarrays.cc'
+ 'test-weaktypedarrays.cc',
+ 'trace-extension.cc'
],
'conditions': [
['v8_target_arch=="ia32"', {
@@ -122,6 +129,7 @@
'test-code-stubs-ia32.cc',
'test-cpu-ia32.cc',
'test-disasm-ia32.cc',
+ 'test-macro-assembler-ia32.cc',
'test-log-stack-tracer.cc'
],
}],
@@ -144,14 +152,28 @@
'test-macro-assembler-arm.cc'
],
}],
+ ['v8_target_arch=="a64"', {
+ 'sources': [
+ 'test-utils-a64.cc',
+ 'test-assembler-a64.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-a64.cc',
+ 'test-disasm-a64.cc',
+ 'test-fuzz-a64.cc',
+ 'test-javascript-a64.cc',
+ 'test-js-a64-variables.cc'
+ ],
+ }],
['v8_target_arch=="mipsel"', {
'sources': [
'test-assembler-mips.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-mips.cc',
'test-disasm-mips.cc',
'test-macro-assembler-mips.cc'
],
}],
- [ 'OS=="linux"', {
+ [ 'OS=="linux" or OS=="qnx"', {
'sources': [
'test-platform-linux.cc',
],
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 7f84c259f0..d9f76294e1 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -58,10 +58,11 @@
static void Test##Name()
#endif
-#define EXTENSION_LIST(V) \
- V(GC_EXTENSION, "v8/gc") \
- V(PRINT_EXTENSION, "v8/print") \
- V(TRACE_EXTENSION, "v8/trace")
+#define EXTENSION_LIST(V) \
+ V(GC_EXTENSION, "v8/gc") \
+ V(PRINT_EXTENSION, "v8/print") \
+ V(PROFILER_EXTENSION, "v8/profiler") \
+ V(TRACE_EXTENSION, "v8/trace")
#define DEFINE_EXTENSION_ID(Name, Ident) Name##_ID,
enum CcTestExtensionIds {
@@ -129,6 +130,10 @@ class CcTest {
CcTestExtensionFlags extensions,
v8::Isolate* isolate = CcTest::isolate());
+ static void TearDown() {
+ if (isolate_ != NULL) isolate_->Dispose();
+ }
+
private:
friend int main(int argc, char** argv);
TestFunction* callback_;
@@ -255,7 +260,7 @@ class LocalContext {
virtual ~LocalContext() {
v8::HandleScope scope(isolate_);
v8::Local<v8::Context>::New(isolate_, context_)->Exit();
- context_.Dispose();
+ context_.Reset();
}
v8::Context* operator->() {
@@ -289,12 +294,12 @@ class LocalContext {
};
static inline v8::Local<v8::Value> v8_num(double x) {
- return v8::Number::New(x);
+ return v8::Number::New(v8::Isolate::GetCurrent(), x);
}
static inline v8::Local<v8::String> v8_str(const char* x) {
- return v8::String::New(x);
+ return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), x);
}
@@ -305,7 +310,8 @@ static inline v8::Local<v8::Script> v8_compile(const char* x) {
// Helper function that compiles and runs the source.
static inline v8::Local<v8::Value> CompileRun(const char* source) {
- return v8::Script::Compile(v8::String::New(source))->Run();
+ return v8::Script::Compile(
+ v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), source))->Run();
}
@@ -314,10 +320,12 @@ static inline v8::Local<v8::Value> CompileRunWithOrigin(const char* source,
const char* origin_url,
int line_number,
int column_number) {
- v8::ScriptOrigin origin(v8::String::New(origin_url),
- v8::Integer::New(line_number),
- v8::Integer::New(column_number));
- return v8::Script::Compile(v8::String::New(source), &origin)->Run();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::ScriptOrigin origin(v8::String::NewFromUtf8(isolate, origin_url),
+ v8::Integer::New(isolate, line_number),
+ v8::Integer::New(isolate, column_number));
+ return v8::Script::Compile(v8::String::NewFromUtf8(isolate, source), &origin)
+ ->Run();
}
@@ -332,6 +340,7 @@ static inline int FlagDependentPortOffset() {
static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
int new_linear_size = static_cast<int>(
*space->allocation_limit_address() - *space->allocation_top_address());
+ if (new_linear_size == 0) return;
v8::internal::MaybeObject* maybe = space->AllocateRaw(new_linear_size);
v8::internal::FreeListNode* node = v8::internal::FreeListNode::cast(maybe);
node->set_size(space->heap(), new_linear_size);
@@ -340,9 +349,7 @@ static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
// Helper function that simulates a full old-space in the heap.
static inline void SimulateFullSpace(v8::internal::PagedSpace* space) {
- int old_linear_size = static_cast<int>(space->limit() - space->top());
- space->Free(space->top(), old_linear_size);
- space->SetTop(space->limit(), space->limit());
+ space->EmptyAllocationInfo();
space->ResetFreeList();
space->ClearStats();
}
@@ -356,13 +363,13 @@ class HeapObjectsTracker {
HeapObjectsTracker() {
heap_profiler_ = i::Isolate::Current()->heap_profiler();
CHECK_NE(NULL, heap_profiler_);
- heap_profiler_->StartHeapAllocationsRecording();
+ heap_profiler_->StartHeapObjectsTracking(true);
}
~HeapObjectsTracker() {
i::Isolate::Current()->heap()->CollectAllAvailableGarbage();
- CHECK_EQ(0, heap_profiler_->FindUntrackedObjects());
- heap_profiler_->StopHeapAllocationsRecording();
+ CHECK_EQ(0, heap_profiler_->heap_object_map()->FindUntrackedObjects());
+ heap_profiler_->StopHeapObjectsTracking();
}
private:
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 59bf8268e9..f149ebe297 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -60,9 +60,84 @@
# running several variants. Note that this still takes ages, because there
# are actually 13 * 38 * 5 * 128 = 316160 individual tests hidden here.
'test-parsing/ParserSync': [PASS, NO_VARIANTS],
+
+ ############################################################################
+ # Slow tests.
+ 'test-api/Threading1': [PASS, ['mode == debug', SLOW]],
+ 'test-api/Threading2': [PASS, ['mode == debug', SLOW]],
+ 'test-api/Threading3': [PASS, ['mode == debug', SLOW]],
+ 'test-api/Threading4': [PASS, ['mode == debug', SLOW]],
}], # ALWAYS
##############################################################################
+['arch == a64', {
+
+ 'test-api/Bug618': [PASS],
+
+ # BUG(v8:2999).
+ 'test-cpu-profiler/CollectCpuProfile': [PASS, FAIL],
+
+ # Runs out of memory in debug mode.
+ 'test-api/OutOfMemory': [PASS, ['mode == debug', FAIL]],
+ 'test-api/OutOfMemoryNested': [PASS, ['mode == debug', FAIL]],
+
+ # BUG(v8:3154).
+ 'test-heap/ReleaseOverReservedPages': [PASS, ['mode == debug', FAIL]],
+
+ # BUG(v8:3155).
+ 'test-strings/AsciiArrayJoin': [PASS, ['mode == debug', FAIL]],
+}], # 'arch == a64'
+
+['arch == a64 and simulator_run == True', {
+
+ # Pass but take too long with the simulator.
+ 'test-api/ExternalArrays': [PASS, TIMEOUT],
+ 'test-api/Threading1': [SKIP],
+}], # 'arch == a64 and simulator_run == True'
+
+['arch == a64 and mode == debug and simulator_run == True', {
+
+ # Pass but take too long with the simulator in debug mode.
+ 'test-api/ExternalDoubleArray': [SKIP],
+ 'test-api/ExternalFloat32Array': [SKIP],
+ 'test-api/ExternalFloat64Array': [SKIP],
+ 'test-api/ExternalFloatArray': [SKIP],
+ 'test-api/Float32Array': [SKIP],
+ 'test-api/Float64Array': [SKIP],
+ 'test-debug/DebugBreakLoop': [SKIP],
+}], # 'arch == a64 and mode == debug and simulator_run == True'
+
+##############################################################################
+['asan == True', {
+ # Skip tests not suitable for ASAN.
+ 'test-assembler-x64/AssemblerX64XchglOperations': [SKIP],
+ 'test-lockers/MultithreadedParallelIsolates': [SKIP],
+}], # 'asan == True'
+
+##############################################################################
+['system == windows', {
+
+ # BUG(2999).
+ 'test-cpu-profiler/CollectCpuProfile': [PASS, FAIL],
+ 'test-cpu-profiler/JsNativeJsSample': [PASS, FLAKY],
+
+ # BUG(3055).
+ 'test-cpu-profiler/JsNative1JsNative2JsSample': [PASS, ['mode == release', FAIL], ['mode == debug', FLAKY]],
+
+ # BUG(3005).
+ 'test-alloc/CodeRange': [PASS, FAIL],
+}], # 'system == windows'
+
+##############################################################################
+['system == macos', {
+
+ # BUG(3125).
+ 'test-debug/DebugGetLoadedScripts': [PASS, FLAKY],
+ 'test-debug/DebugStepLinear': [PASS, FLAKY],
+ 'test-debug/DebuggerClearMessageHandler': [PASS, FLAKY],
+}], # 'system == macos'
+
+##############################################################################
['arch == arm', {
# We cannot assume that we can throw OutOfMemory exceptions in all situations.
@@ -79,7 +154,18 @@
'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
'test-serialize/DeserializeAndRunScript2': [SKIP],
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
+
+ # BUG(2999).
+ 'test-cpu-profiler/CollectCpuProfile': [PASS, FLAKY],
+
+ ############################################################################
+ # Slow tests.
+ 'test-api/Threading1': [PASS, SLOW],
+ 'test-api/Threading2': [PASS, SLOW],
+ 'test-api/Threading3': [PASS, SLOW],
+ 'test-api/Threading4': [PASS, SLOW],
}], # 'arch == arm'
+
##############################################################################
['arch == mipsel', {
@@ -132,5 +218,11 @@
# Fails since 16322 (new test).
'test-code-stubs-arm/ConvertDToI': [SKIP],
+
+ # BUG(2998).
+ 'test-macro-assembler-arm/LoadAndStoreWithRepresentation': [SKIP],
+
+ # BUG(3150).
+ 'test-api/PreCompileInvalidPreparseDataError': [SKIP],
}], # 'arch == nacl_ia32 or arch == nacl_x64'
]
diff --git a/deps/v8/test/cctest/print-extension.cc b/deps/v8/test/cctest/print-extension.cc
new file mode 100644
index 0000000000..9f629195bd
--- /dev/null
+++ b/deps/v8/test/cctest/print-extension.cc
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "print-extension.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Handle<v8::FunctionTemplate> PrintExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
+ v8::Handle<v8::String> str) {
+ return v8::FunctionTemplate::New(isolate, PrintExtension::Print);
+}
+
+
+void PrintExtension::Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ for (int i = 0; i < args.Length(); i++) {
+ if (i != 0) printf(" ");
+ v8::HandleScope scope(args.GetIsolate());
+ v8::String::Utf8Value str(args[i]);
+ if (*str == NULL) return;
+ printf("%s", *str);
+ }
+ printf("\n");
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/apiutils.h b/deps/v8/test/cctest/print-extension.h
index 0765585649..7fe9226f7b 100644
--- a/deps/v8/src/apiutils.h
+++ b/deps/v8/test/cctest/print-extension.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,25 +25,23 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_APIUTILS_H_
-#define V8_APIUTILS_H_
+#ifndef V8_TEST_CCTEST_PRINT_EXTENSION_H_
+#define V8_TEST_CCTEST_PRINT_EXTENSION_H_
-namespace v8 {
-class ImplementationUtilities {
- public:
- static int GetNameCount(ExtensionConfiguration* that) {
- return that->name_count_;
- }
+#include "v8.h"
- static const char** GetNames(ExtensionConfiguration* that) {
- return that->names_;
- }
+namespace v8 {
+namespace internal {
- // Introduce an alias for the handle scope data to allow non-friends
- // to access the HandleScope data.
- typedef v8::HandleScope::Data HandleScopeData;
+class PrintExtension : public v8::Extension {
+ public:
+ PrintExtension() : v8::Extension("v8/print", "native function print();") { }
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
+ v8::Handle<v8::String> name);
+ static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
};
-} // namespace v8
+} } // namespace v8::internal
-#endif // V8_APIUTILS_H_
+#endif
diff --git a/deps/v8/test/cctest/profiler-extension.cc b/deps/v8/test/cctest/profiler-extension.cc
new file mode 100644
index 0000000000..80d9f90412
--- /dev/null
+++ b/deps/v8/test/cctest/profiler-extension.cc
@@ -0,0 +1,75 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Tests of profiles generator and utilities.
+
+#include "profiler-extension.h"
+#include "checks.h"
+
+namespace v8 {
+namespace internal {
+
+
+const v8::CpuProfile* ProfilerExtension::last_profile = NULL;
+const char* ProfilerExtension::kSource =
+ "native function startProfiling();"
+ "native function stopProfiling();";
+
+v8::Handle<v8::FunctionTemplate> ProfilerExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Handle<v8::String> name) {
+ if (name->Equals(v8::String::NewFromUtf8(isolate, "startProfiling"))) {
+ return v8::FunctionTemplate::New(isolate,
+ ProfilerExtension::StartProfiling);
+ } else if (name->Equals(v8::String::NewFromUtf8(isolate, "stopProfiling"))) {
+ return v8::FunctionTemplate::New(isolate,
+ ProfilerExtension::StopProfiling);
+ } else {
+ CHECK(false);
+ return v8::Handle<v8::FunctionTemplate>();
+ }
+}
+
+
+void ProfilerExtension::StartProfiling(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ last_profile = NULL;
+ v8::CpuProfiler* cpu_profiler = args.GetIsolate()->GetCpuProfiler();
+ cpu_profiler->StartCpuProfiling((args.Length() > 0)
+ ? args[0].As<v8::String>()
+ : v8::String::Empty(args.GetIsolate()));
+}
+
+
+void ProfilerExtension::StopProfiling(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::CpuProfiler* cpu_profiler = args.GetIsolate()->GetCpuProfiler();
+ last_profile = cpu_profiler->StopCpuProfiling((args.Length() > 0)
+ ? args[0].As<v8::String>()
+ : v8::String::Empty(args.GetIsolate()));
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/test/cctest/profiler-extension.h b/deps/v8/test/cctest/profiler-extension.h
new file mode 100644
index 0000000000..392a7efbc7
--- /dev/null
+++ b/deps/v8/test/cctest/profiler-extension.h
@@ -0,0 +1,54 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Tests of profiles generator and utilities.
+
+#ifndef V8_TEST_CCTEST_PROFILER_EXTENSION_H_
+#define V8_TEST_CCTEST_PROFILER_EXTENSION_H_
+
+#include "../include/v8-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+class ProfilerExtension : public v8::Extension {
+ public:
+ ProfilerExtension() : v8::Extension("v8/profiler", kSource) { }
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
+ v8::Handle<v8::String> name);
+ static void StartProfiling(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void StopProfiling(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static const v8::CpuProfile* last_profile;
+ private:
+ static const char* kSource;
+};
+
+
+} } // namespace v8::internal
+
+#endif
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index df4937ee28..daafb244e3 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -65,11 +65,12 @@ static void handle_property(const v8::FunctionCallbackInfo<v8::Value>& info) {
THREADED_TEST(PropertyHandler) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
fun_templ->InstanceTemplate()->SetAccessor(v8_str("foo"), handle_property);
Local<v8::FunctionTemplate> getter_templ =
- v8::FunctionTemplate::New(handle_property);
+ v8::FunctionTemplate::New(isolate, handle_property);
getter_templ->SetLength(0);
fun_templ->
InstanceTemplate()->SetAccessorProperty(v8_str("bar"), getter_templ);
@@ -120,20 +121,18 @@ THREADED_TEST(GlobalVariableAccess) {
foo = 0;
bar = -4;
baz = 10;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
- templ->InstanceTemplate()->SetAccessor(v8_str("foo"),
- GetIntValue,
- SetIntValue,
- v8::External::New(&foo));
- templ->InstanceTemplate()->SetAccessor(v8_str("bar"),
- GetIntValue,
- SetIntValue,
- v8::External::New(&bar));
- templ->InstanceTemplate()->SetAccessor(v8_str("baz"),
- GetIntValue,
- SetIntValue,
- v8::External::New(&baz));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ templ->InstanceTemplate()->SetAccessor(
+ v8_str("foo"), GetIntValue, SetIntValue,
+ v8::External::New(isolate, &foo));
+ templ->InstanceTemplate()->SetAccessor(
+ v8_str("bar"), GetIntValue, SetIntValue,
+ v8::External::New(isolate, &bar));
+ templ->InstanceTemplate()->SetAccessor(
+ v8_str("baz"), GetIntValue, SetIntValue,
+ v8::External::New(isolate, &baz));
LocalContext env(0, templ->InstanceTemplate());
v8_compile("foo = (++bar) + baz")->Run();
CHECK_EQ(bar, -3);
@@ -175,6 +174,7 @@ static void XSetter(Local<Value> value, const Info& info, int offset) {
CHECK_EQ(x_holder, info.This());
CHECK_EQ(x_holder, info.Holder());
x_register[offset] = value->Int32Value();
+ info.GetReturnValue().Set(v8_num(-1));
}
@@ -193,36 +193,38 @@ static void XSetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
THREADED_TEST(AccessorIC) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("x0"), XGetter, XSetter);
obj->SetAccessorProperty(v8_str("x1"),
- v8::FunctionTemplate::New(XGetter),
- v8::FunctionTemplate::New(XSetter));
+ v8::FunctionTemplate::New(isolate, XGetter),
+ v8::FunctionTemplate::New(isolate, XSetter));
x_holder = obj->NewInstance();
context->Global()->Set(v8_str("holder"), x_holder);
- x_receiver = v8::Object::New();
+ x_receiver = v8::Object::New(isolate);
context->Global()->Set(v8_str("obj"), x_receiver);
v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(CompileRun(
"obj.__proto__ = holder;"
"var result = [];"
"var key_0 = 'x0';"
"var key_1 = 'x1';"
- "for (var i = 0; i < 10; i++) {"
- " holder.x0 = i;"
+ "for (var j = 0; j < 10; j++) {"
+ " var i = 4*j;"
+ " result.push(holder.x0 = i);"
" result.push(obj.x0);"
- " holder.x1 = i;"
+ " result.push(holder.x1 = i + 1);"
" result.push(obj.x1);"
- " holder[key_0] = i;"
+ " result.push(holder[key_0] = i + 2);"
" result.push(obj[key_0]);"
- " holder[key_1] = i;"
+ " result.push(holder[key_1] = i + 3);"
" result.push(obj[key_1]);"
"}"
"result"));
- CHECK_EQ(40, array->Length());
- for (int i = 0; i < 40; i++) {
- v8::Handle<Value> entry = array->Get(v8::Integer::New(i));
- CHECK_EQ(v8::Integer::New(i/4), entry);
+ CHECK_EQ(80, array->Length());
+ for (int i = 0; i < 80; i++) {
+ v8::Handle<Value> entry = array->Get(v8::Integer::New(isolate, i));
+ CHECK_EQ(v8::Integer::New(isolate, i/2), entry);
}
}
@@ -237,8 +239,9 @@ static void AccessorProhibitsOverwritingGetter(
THREADED_TEST(AccessorProhibitsOverwriting) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"),
AccessorProhibitsOverwritingGetter,
0,
@@ -280,30 +283,32 @@ static void HandleAllocatingGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
for (int i = 0; i < C; i++)
- v8::String::New("foo");
- info.GetReturnValue().Set(v8::String::New("foo"));
+ v8::String::NewFromUtf8(info.GetIsolate(), "foo");
+ info.GetReturnValue().Set(v8::String::NewFromUtf8(info.GetIsolate(), "foo"));
}
THREADED_TEST(HandleScopePop) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("one"), HandleAllocatingGetter<1>);
obj->SetAccessor(v8_str("many"), HandleAllocatingGetter<1024>);
v8::Handle<v8::Object> inst = obj->NewInstance();
- context->Global()->Set(v8::String::New("obj"), inst);
- i::Isolate* isolate = CcTest::i_isolate();
- int count_before = i::HandleScope::NumberOfHandles(isolate);
+ context->Global()->Set(v8::String::NewFromUtf8(isolate, "obj"), inst);
+ int count_before =
+ i::HandleScope::NumberOfHandles(reinterpret_cast<i::Isolate*>(isolate));
{
- v8::HandleScope scope(context->GetIsolate());
+ v8::HandleScope scope(isolate);
CompileRun(
"for (var i = 0; i < 1000; i++) {"
" obj.one;"
" obj.many;"
"}");
}
- int count_after = i::HandleScope::NumberOfHandles(isolate);
+ int count_after =
+ i::HandleScope::NumberOfHandles(reinterpret_cast<i::Isolate*>(isolate));
CHECK_EQ(count_before, count_after);
}
@@ -312,30 +317,36 @@ static void CheckAccessorArgsCorrect(
const v8::PropertyCallbackInfo<v8::Value>& info) {
CHECK(info.GetIsolate() == CcTest::isolate());
CHECK(info.This() == info.Holder());
- CHECK(info.Data()->Equals(v8::String::New("data")));
+ CHECK(
+ info.Data()->Equals(v8::String::NewFromUtf8(CcTest::isolate(), "data")));
ApiTestFuzzer::Fuzz();
CHECK(info.GetIsolate() == CcTest::isolate());
CHECK(info.This() == info.Holder());
- CHECK(info.Data()->Equals(v8::String::New("data")));
+ CHECK(
+ info.Data()->Equals(v8::String::NewFromUtf8(CcTest::isolate(), "data")));
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK(info.GetIsolate() == CcTest::isolate());
CHECK(info.This() == info.Holder());
- CHECK(info.Data()->Equals(v8::String::New("data")));
+ CHECK(
+ info.Data()->Equals(v8::String::NewFromUtf8(CcTest::isolate(), "data")));
info.GetReturnValue().Set(17);
}
THREADED_TEST(DirectCall) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("xxx"),
CheckAccessorArgsCorrect,
NULL,
- v8::String::New("data"));
+ v8::String::NewFromUtf8(isolate, "data"));
v8::Handle<v8::Object> inst = obj->NewInstance();
- context->Global()->Set(v8::String::New("obj"), inst);
- Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
+ context->Global()->Set(v8::String::NewFromUtf8(isolate, "obj"),
+ inst);
+ Local<Script> scr = v8::Script::Compile(
+ v8::String::NewFromUtf8(isolate, "obj.xxx"));
for (int i = 0; i < 10; i++) {
Local<Value> result = scr->Run();
CHECK(!result.IsEmpty());
@@ -356,11 +367,13 @@ THREADED_TEST(EmptyResult) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
- obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL, v8::String::New("data"));
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL,
+ v8::String::NewFromUtf8(isolate, "data"));
v8::Handle<v8::Object> inst = obj->NewInstance();
- context->Global()->Set(v8::String::New("obj"), inst);
- Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
+ context->Global()->Set(v8::String::NewFromUtf8(isolate, "obj"), inst);
+ Local<Script> scr =
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate, "obj.xxx"));
for (int i = 0; i < 10; i++) {
Local<Value> result = scr->Run();
CHECK(result == v8::Undefined(isolate));
@@ -374,27 +387,30 @@ THREADED_TEST(NoReuseRegress) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
{
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
- obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL, v8::String::New("data"));
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL,
+ v8::String::NewFromUtf8(isolate, "data"));
LocalContext context;
v8::Handle<v8::Object> inst = obj->NewInstance();
- context->Global()->Set(v8::String::New("obj"), inst);
- Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
+ context->Global()->Set(v8::String::NewFromUtf8(isolate, "obj"), inst);
+ Local<Script> scr =
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate, "obj.xxx"));
for (int i = 0; i < 2; i++) {
Local<Value> result = scr->Run();
CHECK(result == v8::Undefined(isolate));
}
}
{
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("xxx"),
CheckAccessorArgsCorrect,
NULL,
- v8::String::New("data"));
+ v8::String::NewFromUtf8(isolate, "data"));
LocalContext context;
v8::Handle<v8::Object> inst = obj->NewInstance();
- context->Global()->Set(v8::String::New("obj"), inst);
- Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
+ context->Global()->Set(v8::String::NewFromUtf8(isolate, "obj"), inst);
+ Local<Script> scr =
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate, "obj.xxx"));
for (int i = 0; i < 10; i++) {
Local<Value> result = scr->Run();
CHECK(!result.IsEmpty());
@@ -420,8 +436,9 @@ static void ThrowingSetAccessor(Local<String> name,
THREADED_TEST(Regress1054726) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("x"),
ThrowingGetAccessor,
ThrowingSetAccessor,
@@ -439,7 +456,8 @@ THREADED_TEST(Regress1054726) {
"}; result"))->Run();
CHECK_EQ(v8_str("ggggg"), result);
- result = Script::Compile(String::New(
+ result = Script::Compile(String::NewFromUtf8(
+ isolate,
"var result = '';"
"for (var i = 0; i < 5; i++) {"
" try { obj.x = i; } catch (e) { result += e; }"
@@ -451,17 +469,19 @@ THREADED_TEST(Regress1054726) {
static void AllocGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- info.GetReturnValue().Set(v8::Array::New(1000));
+ info.GetReturnValue().Set(v8::Array::New(info.GetIsolate(), 1000));
}
THREADED_TEST(Gc) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("xxx"), AllocGetter);
env->Global()->Set(v8_str("obj"), obj->NewInstance());
- Script::Compile(String::New(
+ Script::Compile(String::NewFromUtf8(
+ isolate,
"var last = [];"
"for (var i = 0; i < 2048; i++) {"
" var result = obj.xxx;"
@@ -488,13 +508,15 @@ static void StackCheck(Local<String> name,
THREADED_TEST(StackIteration) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
- i::StringStream::ClearMentionedObjectCache(isolate);
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ i::StringStream::ClearMentionedObjectCache(
+ reinterpret_cast<i::Isolate*>(isolate));
obj->SetAccessor(v8_str("xxx"), StackCheck);
env->Global()->Set(v8_str("obj"), obj->NewInstance());
- Script::Compile(String::New(
+ Script::Compile(String::NewFromUtf8(
+ isolate,
"function foo() {"
" return obj.xxx;"
"}"
@@ -509,7 +531,7 @@ static void AllocateHandles(Local<String> name,
for (int i = 0; i < i::kHandleBlockSize + 1; i++) {
v8::Local<v8::Value>::New(info.GetIsolate(), name);
}
- info.GetReturnValue().Set(v8::Integer::New(100));
+ info.GetReturnValue().Set(v8::Integer::New(info.GetIsolate(), 100));
}
@@ -517,11 +539,13 @@ THREADED_TEST(HandleScopeSegment) {
// Check that we can return values past popping of handle scope
// segments.
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("xxx"), AllocateHandles);
env->Global()->Set(v8_str("obj"), obj->NewInstance());
- v8::Handle<v8::Value> result = Script::Compile(String::New(
+ v8::Handle<v8::Value> result = Script::Compile(String::NewFromUtf8(
+ isolate,
"var result;"
"for (var i = 0; i < 4; i++)"
" result = obj.xxx;"
@@ -531,7 +555,7 @@ THREADED_TEST(HandleScopeSegment) {
void JSONStringifyEnumerator(const v8::PropertyCallbackInfo<v8::Array>& info) {
- v8::Handle<v8::Array> array = v8::Array::New(1);
+ v8::Handle<v8::Array> array = v8::Array::New(info.GetIsolate(), 1);
array->Set(0, v8_str("regress"));
info.GetReturnValue().Set(array);
}
@@ -545,9 +569,10 @@ void JSONStringifyGetter(Local<String> name,
THREADED_TEST(JSONStringifyNamedInterceptorObject) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetNamedPropertyHandler(
JSONStringifyGetter, NULL, NULL, NULL, JSONStringifyEnumerator);
env->Global()->Set(v8_str("obj"), obj->NewInstance());
@@ -556,16 +581,47 @@ THREADED_TEST(JSONStringifyNamedInterceptorObject) {
}
+static v8::Local<v8::Context> expected_current_context;
+static v8::Local<v8::Context> expected_calling_context;
+
+
+static void check_contexts(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CHECK(expected_current_context == info.GetIsolate()->GetCurrentContext());
+ CHECK(expected_calling_context == info.GetIsolate()->GetCallingContext());
+}
+
+
THREADED_TEST(AccessorPropertyCrossContext) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Function> fun = v8::Function::New(isolate, handle_property);
+ v8::Handle<v8::Function> fun = v8::Function::New(isolate, check_contexts);
LocalContext switch_context;
switch_context->Global()->Set(v8_str("fun"), fun);
v8::TryCatch try_catch;
+ expected_current_context = env.local();
+ expected_calling_context = switch_context.local();
CompileRun(
"var o = Object.create(null, { n: { get:fun } });"
"for (var i = 0; i < 10; i++) o.n;");
CHECK(!try_catch.HasCaught());
}
+
+
+THREADED_TEST(GlobalObjectAccessor) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ CompileRun(
+ "var set_value = 1;"
+ "Object.defineProperty(this.__proto__, 'x', {"
+ " get : function() { return this; },"
+ " set : function() { set_value = this; }"
+ "});"
+ "function getter() { return x; }"
+ "function setter() { x = 1; }"
+ "for (var i = 0; i < 4; i++) { getter(); setter(); }");
+ CHECK(v8::Utils::OpenHandle(*CompileRun("getter()"))->IsJSGlobalProxy());
+ CHECK(v8::Utils::OpenHandle(*CompileRun("set_value"))->IsJSGlobalProxy());
+}
diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc
index 7a5979a951..0d4ab886a3 100644
--- a/deps/v8/test/cctest/test-alloc.cc
+++ b/deps/v8/test/cctest/test-alloc.cc
@@ -148,10 +148,11 @@ TEST(StressJS) {
map->AppendDescriptor(&d, witness);
// Add the Foo constructor the global object.
- env->Global()->Set(v8::String::New("Foo"), v8::Utils::ToLocal(function));
+ env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "Foo"),
+ v8::Utils::ToLocal(function));
// Call the accessor through JavaScript.
- v8::Handle<v8::Value> result =
- v8::Script::Compile(v8::String::New("(new Foo).get"))->Run();
+ v8::Handle<v8::Value> result = v8::Script::Compile(
+ v8::String::NewFromUtf8(CcTest::isolate(), "(new Foo).get"))->Run();
CHECK_EQ(42, result->Int32Value());
env->Exit();
}
@@ -197,11 +198,11 @@ TEST(CodeRange) {
if (current_allocated < code_range_size / 10) {
// Allocate a block.
// Geometrically distributed sizes, greater than
- // Page::kMaxNonCodeHeapObjectSize (which is greater than code page area).
+ // Page::kMaxRegularHeapObjectSize (which is greater than code page area).
// TODO(gc): instead of using 3 use some contant based on code_range_size
// kMaxHeapObjectSize.
size_t requested =
- (Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
+ (Page::kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
Address base = code_range.AllocateRawMemory(requested,
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index d5e838ebe0..9312057fa2 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -88,12 +88,13 @@ using ::v8::Value;
void RunWithProfiler(void (*test)()) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Local<v8::String> profile_name = v8::String::New("my_profile1");
+ v8::Local<v8::String> profile_name =
+ v8::String::NewFromUtf8(env->GetIsolate(), "my_profile1");
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
cpu_profiler->StartCpuProfiling(profile_name);
(*test)();
- cpu_profiler->DeleteAllCpuProfiles();
+ reinterpret_cast<i::CpuProfiler*>(cpu_profiler)->DeleteAllProfiles();
}
@@ -149,9 +150,10 @@ static void IncrementingSignatureCallback(
signature_callback_count++;
CHECK_EQ(signature_expected_receiver, args.Holder());
CHECK_EQ(signature_expected_receiver, args.This());
- v8::Handle<v8::Array> result = v8::Array::New(args.Length());
+ v8::Handle<v8::Array> result =
+ v8::Array::New(args.GetIsolate(), args.Length());
for (int i = 0; i < args.Length(); i++)
- result->Set(v8::Integer::New(i), args[i]);
+ result->Set(v8::Integer::New(args.GetIsolate(), i), args[i]);
args.GetReturnValue().Set(result);
}
@@ -159,9 +161,10 @@ static void IncrementingSignatureCallback(
static void SignatureCallback(
const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
- v8::Handle<v8::Array> result = v8::Array::New(args.Length());
+ v8::Handle<v8::Array> result =
+ v8::Array::New(args.GetIsolate(), args.Length());
for (int i = 0; i < args.Length(); i++) {
- result->Set(v8::Integer::New(i), args[i]);
+ result->Set(v8::Integer::New(args.GetIsolate(), i), args[i]);
}
args.GetReturnValue().Set(result);
}
@@ -202,7 +205,7 @@ THREADED_TEST(Handles) {
CHECK(undef->IsUndefined());
const char* c_source = "1 + 2 + 3";
- Local<String> source = String::New(c_source);
+ Local<String> source = String::NewFromUtf8(CcTest::isolate(), c_source);
Local<Script> script = Script::Compile(source);
CHECK_EQ(6, script->Run()->Int32Value());
@@ -214,13 +217,13 @@ THREADED_TEST(IsolateOfContext) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<Context> env = Context::New(CcTest::isolate());
- CHECK(!env->InContext());
+ CHECK(!env->GetIsolate()->InContext());
CHECK(env->GetIsolate() == CcTest::isolate());
env->Enter();
- CHECK(env->InContext());
+ CHECK(env->GetIsolate()->InContext());
CHECK(env->GetIsolate() == CcTest::isolate());
env->Exit();
- CHECK(!env->InContext());
+ CHECK(!env->GetIsolate()->InContext());
CHECK(env->GetIsolate() == CcTest::isolate());
}
@@ -249,18 +252,20 @@ static void TestSignature(const char* loop_js, Local<Value> receiver) {
THREADED_TEST(ReceiverSignature) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Setup templates.
- v8::Handle<v8::FunctionTemplate> fun = v8::FunctionTemplate::New();
- v8::Handle<v8::Signature> sig = v8::Signature::New(fun);
+ v8::Handle<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::Signature> sig = v8::Signature::New(isolate, fun);
v8::Handle<v8::FunctionTemplate> callback_sig =
v8::FunctionTemplate::New(
- IncrementingSignatureCallback, Local<Value>(), sig);
+ isolate, IncrementingSignatureCallback, Local<Value>(), sig);
v8::Handle<v8::FunctionTemplate> callback =
- v8::FunctionTemplate::New(IncrementingSignatureCallback);
- v8::Handle<v8::FunctionTemplate> sub_fun = v8::FunctionTemplate::New();
+ v8::FunctionTemplate::New(isolate, IncrementingSignatureCallback);
+ v8::Handle<v8::FunctionTemplate> sub_fun = v8::FunctionTemplate::New(isolate);
sub_fun->Inherit(fun);
- v8::Handle<v8::FunctionTemplate> unrel_fun = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> unrel_fun =
+ v8::FunctionTemplate::New(isolate);
// Install properties.
v8::Handle<v8::ObjectTemplate> fun_proto = fun->PrototypeTemplate();
fun_proto->Set(v8_str("prop_sig"), callback_sig);
@@ -322,13 +327,17 @@ THREADED_TEST(ReceiverSignature) {
THREADED_TEST(ArgumentSignature) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::Handle<v8::FunctionTemplate> cons = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> cons = v8::FunctionTemplate::New(isolate);
cons->SetClassName(v8_str("Cons"));
- v8::Handle<v8::Signature> sig =
- v8::Signature::New(v8::Handle<v8::FunctionTemplate>(), 1, &cons);
+ v8::Handle<v8::Signature> sig = v8::Signature::New(
+ isolate, v8::Handle<v8::FunctionTemplate>(), 1, &cons);
v8::Handle<v8::FunctionTemplate> fun =
- v8::FunctionTemplate::New(SignatureCallback, v8::Handle<Value>(), sig);
+ v8::FunctionTemplate::New(isolate,
+ SignatureCallback,
+ v8::Handle<Value>(),
+ sig);
env->Global()->Set(v8_str("Cons"), cons->GetFunction());
env->Global()->Set(v8_str("Fun1"), fun->GetFunction());
@@ -341,18 +350,21 @@ THREADED_TEST(ArgumentSignature) {
v8::Handle<Value> value3 = CompileRun("Fun1() == '';");
CHECK(value3->IsTrue());
- v8::Handle<v8::FunctionTemplate> cons1 = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> cons1 = v8::FunctionTemplate::New(isolate);
cons1->SetClassName(v8_str("Cons1"));
- v8::Handle<v8::FunctionTemplate> cons2 = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> cons2 = v8::FunctionTemplate::New(isolate);
cons2->SetClassName(v8_str("Cons2"));
- v8::Handle<v8::FunctionTemplate> cons3 = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> cons3 = v8::FunctionTemplate::New(isolate);
cons3->SetClassName(v8_str("Cons3"));
v8::Handle<v8::FunctionTemplate> args[3] = { cons1, cons2, cons3 };
- v8::Handle<v8::Signature> wsig =
- v8::Signature::New(v8::Handle<v8::FunctionTemplate>(), 3, args);
+ v8::Handle<v8::Signature> wsig = v8::Signature::New(
+ isolate, v8::Handle<v8::FunctionTemplate>(), 3, args);
v8::Handle<v8::FunctionTemplate> fun2 =
- v8::FunctionTemplate::New(SignatureCallback, v8::Handle<Value>(), wsig);
+ v8::FunctionTemplate::New(isolate,
+ SignatureCallback,
+ v8::Handle<Value>(),
+ wsig);
env->Global()->Set(v8_str("Cons1"), cons1->GetFunction());
env->Global()->Set(v8_str("Cons2"), cons2->GetFunction());
@@ -399,7 +411,7 @@ THREADED_TEST(Access) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- Local<v8::Object> obj = v8::Object::New();
+ Local<v8::Object> obj = v8::Object::New(isolate);
Local<Value> foo_before = obj->Get(v8_str("foo"));
CHECK(foo_before->IsUndefined());
Local<String> bar_str = v8_str("bar");
@@ -414,7 +426,7 @@ THREADED_TEST(Access) {
THREADED_TEST(AccessElement) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<v8::Object> obj = v8::Object::New();
+ Local<v8::Object> obj = v8::Object::New(env->GetIsolate());
Local<Value> before = obj->Get(1);
CHECK(before->IsUndefined());
Local<String> bar_str = v8_str("bar");
@@ -434,7 +446,7 @@ THREADED_TEST(Script) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
const char* c_source = "1 + 2 + 3";
- Local<String> source = String::New(c_source);
+ Local<String> source = String::NewFromUtf8(env->GetIsolate(), c_source);
Local<Script> script = Script::Compile(source);
CHECK_EQ(6, script->Run()->Int32Value());
}
@@ -450,13 +462,13 @@ static uint16_t* AsciiToTwoByteString(const char* source) {
class TestResource: public String::ExternalStringResource {
public:
- explicit TestResource(uint16_t* data, int* counter = NULL)
- : data_(data), length_(0), counter_(counter) {
+ TestResource(uint16_t* data, int* counter = NULL, bool owning_data = true)
+ : data_(data), length_(0), counter_(counter), owning_data_(owning_data) {
while (data[length_]) ++length_;
}
~TestResource() {
- i::DeleteArray(data_);
+ if (owning_data_) i::DeleteArray(data_);
if (counter_ != NULL) ++*counter_;
}
@@ -467,20 +479,25 @@ class TestResource: public String::ExternalStringResource {
size_t length() const {
return length_;
}
+
private:
uint16_t* data_;
size_t length_;
int* counter_;
+ bool owning_data_;
};
class TestAsciiResource: public String::ExternalAsciiStringResource {
public:
- explicit TestAsciiResource(const char* data, int* counter = NULL)
- : data_(data), length_(strlen(data)), counter_(counter) { }
+ TestAsciiResource(const char* data, int* counter = NULL, size_t offset = 0)
+ : orig_data_(data),
+ data_(data + offset),
+ length_(strlen(data) - offset),
+ counter_(counter) { }
~TestAsciiResource() {
- i::DeleteArray(data_);
+ i::DeleteArray(orig_data_);
if (counter_ != NULL) ++*counter_;
}
@@ -491,7 +508,9 @@ class TestAsciiResource: public String::ExternalAsciiStringResource {
size_t length() const {
return length_;
}
+
private:
+ const char* orig_data_;
const char* data_;
size_t length_;
int* counter_;
@@ -506,7 +525,7 @@ THREADED_TEST(ScriptUsingStringResource) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
TestResource* resource = new TestResource(two_byte_source, &dispose_count);
- Local<String> source = String::NewExternal(resource);
+ Local<String> source = String::NewExternal(env->GetIsolate(), resource);
Local<Script> script = Script::Compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
@@ -535,7 +554,7 @@ THREADED_TEST(ScriptUsingAsciiStringResource) {
v8::HandleScope scope(env->GetIsolate());
TestAsciiResource* resource = new TestAsciiResource(i::StrDup(c_source),
&dispose_count);
- Local<String> source = String::NewExternal(resource);
+ Local<String> source = String::NewExternal(env->GetIsolate(), resource);
CHECK(source->IsExternalAscii());
CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
source->GetExternalAsciiStringResource());
@@ -562,7 +581,8 @@ THREADED_TEST(ScriptMakingExternalString) {
{
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<String> source = String::New(two_byte_source);
+ Local<String> source =
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_source);
// Trigger GCs so that the newly allocated string moves to old gen.
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
@@ -622,7 +642,8 @@ TEST(MakingExternalStringConditions) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
uint16_t* two_byte_string = AsciiToTwoByteString("s1");
- Local<String> small_string = String::New(two_byte_string);
+ Local<String> small_string =
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_string);
i::DeleteArray(two_byte_string);
// We should refuse to externalize newly created small string.
@@ -634,7 +655,7 @@ TEST(MakingExternalStringConditions) {
CHECK(small_string->CanMakeExternal());
two_byte_string = AsciiToTwoByteString("small string 2");
- small_string = String::New(two_byte_string);
+ small_string = String::NewFromTwoByte(env->GetIsolate(), two_byte_string);
i::DeleteArray(two_byte_string);
// We should refuse externalizing newly created small string.
@@ -651,7 +672,8 @@ TEST(MakingExternalStringConditions) {
buf[buf_size - 1] = '\0';
two_byte_string = AsciiToTwoByteString(buf);
- Local<String> large_string = String::New(two_byte_string);
+ Local<String> large_string =
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_string);
i::DeleteArray(buf);
i::DeleteArray(two_byte_string);
// Large strings should be immediately accepted.
@@ -667,7 +689,7 @@ TEST(MakingExternalAsciiStringConditions) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
- Local<String> small_string = String::New("s1");
+ Local<String> small_string = String::NewFromUtf8(env->GetIsolate(), "s1");
// We should refuse to externalize newly created small string.
CHECK(!small_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
@@ -676,7 +698,7 @@ TEST(MakingExternalAsciiStringConditions) {
// Old space strings should be accepted.
CHECK(small_string->CanMakeExternal());
- small_string = String::New("small string 2");
+ small_string = String::NewFromUtf8(env->GetIsolate(), "small string 2");
// We should refuse externalizing newly created small string.
CHECK(!small_string->CanMakeExternal());
for (int i = 0; i < 100; i++) {
@@ -689,7 +711,7 @@ TEST(MakingExternalAsciiStringConditions) {
char* buf = i::NewArray<char>(buf_size);
memset(buf, 'a', buf_size);
buf[buf_size - 1] = '\0';
- Local<String> large_string = String::New(buf);
+ Local<String> large_string = String::NewFromUtf8(env->GetIsolate(), buf);
i::DeleteArray(buf);
// Large strings should be immediately accepted.
CHECK(large_string->CanMakeExternal());
@@ -715,14 +737,13 @@ TEST(MakingExternalUnalignedAsciiString) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
// Turn into external string with unaligned resource data.
- int dispose_count = 0;
const char* c_cons = "_abcdefghijklmnopqrstuvwxyz";
bool success = cons->MakeExternal(
- new TestAsciiResource(i::StrDup(c_cons) + 1, &dispose_count));
+ new TestAsciiResource(i::StrDup(c_cons), NULL, 1));
CHECK(success);
const char* c_slice = "_bcdefghijklmnopqrstuvwxyz";
success = slice->MakeExternal(
- new TestAsciiResource(i::StrDup(c_slice) + 1, &dispose_count));
+ new TestAsciiResource(i::StrDup(c_slice), NULL, 1));
CHECK(success);
// Trigger GCs and force evacuation.
@@ -736,14 +757,14 @@ THREADED_TEST(UsingExternalString) {
{
v8::HandleScope scope(CcTest::isolate());
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
- Local<String> string =
- String::NewExternal(new TestResource(two_byte_string));
+ Local<String> string = String::NewExternal(
+ CcTest::isolate(), new TestResource(two_byte_string));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen.
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol =
- factory->InternalizedStringFromString(istring);
+ factory->InternalizeString(istring);
CHECK(isymbol->IsInternalizedString());
}
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -757,13 +778,13 @@ THREADED_TEST(UsingExternalAsciiString) {
v8::HandleScope scope(CcTest::isolate());
const char* one_byte_string = "test string";
Local<String> string = String::NewExternal(
- new TestAsciiResource(i::StrDup(one_byte_string)));
+ CcTest::isolate(), new TestAsciiResource(i::StrDup(one_byte_string)));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen.
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol =
- factory->InternalizedStringFromString(istring);
+ factory->InternalizeString(istring);
CHECK(isymbol->IsInternalizedString());
}
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -779,9 +800,8 @@ THREADED_TEST(ScavengeExternalString) {
{
v8::HandleScope scope(CcTest::isolate());
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
- Local<String> string =
- String::NewExternal(new TestResource(two_byte_string,
- &dispose_count));
+ Local<String> string = String::NewExternal(
+ CcTest::isolate(), new TestResource(two_byte_string, &dispose_count));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
in_new_space = CcTest::heap()->InNewSpace(*istring);
@@ -803,6 +823,7 @@ THREADED_TEST(ScavengeExternalAsciiString) {
v8::HandleScope scope(CcTest::isolate());
const char* one_byte_string = "test string";
Local<String> string = String::NewExternal(
+ CcTest::isolate(),
new TestAsciiResource(i::StrDup(one_byte_string), &dispose_count));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
@@ -849,7 +870,7 @@ TEST(ExternalStringWithDisposeHandling) {
{
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<String> source = String::NewExternal(&res_stack);
+ Local<String> source = String::NewExternal(env->GetIsolate(), &res_stack);
Local<Script> script = Script::Compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
@@ -870,7 +891,7 @@ TEST(ExternalStringWithDisposeHandling) {
{
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<String> source = String::NewExternal(res_heap);
+ Local<String> source = String::NewExternal(env->GetIsolate(), res_heap);
Local<Script> script = Script::Compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
@@ -899,25 +920,28 @@ THREADED_TEST(StringConcat) {
Local<String> left = v8_str(one_byte_string_1);
uint16_t* two_byte_source = AsciiToTwoByteString(two_byte_string_1);
- Local<String> right = String::New(two_byte_source);
+ Local<String> right =
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_source);
i::DeleteArray(two_byte_source);
Local<String> source = String::Concat(left, right);
right = String::NewExternal(
- new TestAsciiResource(i::StrDup(one_byte_extern_1)));
+ env->GetIsolate(), new TestAsciiResource(i::StrDup(one_byte_extern_1)));
source = String::Concat(source, right);
right = String::NewExternal(
+ env->GetIsolate(),
new TestResource(AsciiToTwoByteString(two_byte_extern_1)));
source = String::Concat(source, right);
right = v8_str(one_byte_string_2);
source = String::Concat(source, right);
two_byte_source = AsciiToTwoByteString(two_byte_string_2);
- right = String::New(two_byte_source);
+ right = String::NewFromTwoByte(env->GetIsolate(), two_byte_source);
i::DeleteArray(two_byte_source);
source = String::Concat(source, right);
right = String::NewExternal(
+ env->GetIsolate(),
new TestResource(AsciiToTwoByteString(two_byte_extern_2)));
source = String::Concat(source, right);
Local<Script> script = Script::Compile(source);
@@ -1011,10 +1035,11 @@ static void TestFunctionTemplateInitializer(Handler handler,
// Test constructor calls.
{
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(handler);
+ v8::FunctionTemplate::New(isolate, handler);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("obj"), fun);
Local<Script> script = v8_compile("obj()");
@@ -1026,9 +1051,10 @@ static void TestFunctionTemplateInitializer(Handler handler,
// the previous one.
{
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
fun_templ->SetCallHandler(handler_2);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("obj"), fun);
@@ -1047,7 +1073,7 @@ static void TestFunctionTemplateAccessor(Constructor constructor,
v8::HandleScope scope(env->GetIsolate());
Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(constructor);
+ v8::FunctionTemplate::New(env->GetIsolate(), constructor);
fun_templ->SetClassName(v8_str("funky"));
fun_templ->InstanceTemplate()->SetAccessor(v8_str("m"), accessor);
Local<Function> fun = fun_templ->GetFunction();
@@ -1083,10 +1109,13 @@ static void SimpleCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
template<typename Callback>
static void TestSimpleCallback(Callback callback) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> object_template = v8::ObjectTemplate::New();
- object_template->Set("callback", v8::FunctionTemplate::New(callback));
+ v8::Handle<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
+ object_template->Set(isolate, "callback",
+ v8::FunctionTemplate::New(isolate, callback));
v8::Local<v8::Object> object = object_template->NewInstance();
(*env)->Global()->Set(v8_str("callback_object"), object);
v8::Handle<v8::Script> script;
@@ -1178,20 +1207,25 @@ template<>
void FastReturnValueCallback<Object>(
const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Handle<v8::Object> object;
- if (!fast_return_value_object_is_empty) object = Object::New();
+ if (!fast_return_value_object_is_empty) {
+ object = Object::New(info.GetIsolate());
+ }
info.GetReturnValue().Set(object);
}
template<typename T>
Handle<Value> TestFastReturnValues() {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::Handle<v8::ObjectTemplate> object_template = v8::ObjectTemplate::New();
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::EscapableHandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
v8::FunctionCallback callback = &FastReturnValueCallback<T>;
- object_template->Set("callback", v8::FunctionTemplate::New(callback));
+ object_template->Set(isolate, "callback",
+ v8::FunctionTemplate::New(isolate, callback));
v8::Local<v8::Object> object = object_template->NewInstance();
(*env)->Global()->Set(v8_str("callback_object"), object);
- return scope.Close(CompileRun("callback_object.callback()"));
+ return scope.Escape(CompileRun("callback_object.callback()"));
}
@@ -1264,10 +1298,15 @@ THREADED_PROFILED_TEST(FastReturnValues) {
THREADED_TEST(FunctionTemplateSetLength) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
{
- Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(
- handle_callback, Handle<v8::Value>(), Handle<v8::Signature>(), 23);
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate,
+ handle_callback,
+ Handle<v8::Value>(),
+ Handle<v8::Signature>(),
+ 23);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("obj"), fun);
Local<Script> script = v8_compile("obj.length");
@@ -1275,7 +1314,7 @@ THREADED_TEST(FunctionTemplateSetLength) {
}
{
Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(handle_callback);
+ v8::FunctionTemplate::New(isolate, handle_callback);
fun_templ->SetLength(22);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("obj"), fun);
@@ -1285,7 +1324,7 @@ THREADED_TEST(FunctionTemplateSetLength) {
{
// Without setting length it defaults to 0.
Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(handle_callback);
+ v8::FunctionTemplate::New(isolate, handle_callback);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("obj"), fun);
Local<Script> script = v8_compile("obj.length");
@@ -1304,13 +1343,15 @@ static void callback(const v8::FunctionCallbackInfo<v8::Value>& args) {
static void TestExternalPointerWrapping() {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- v8::Handle<v8::Value> data = v8::External::New(expected_ptr);
+ v8::Handle<v8::Value> data =
+ v8::External::New(isolate, expected_ptr);
- v8::Handle<v8::Object> obj = v8::Object::New();
+ v8::Handle<v8::Object> obj = v8::Object::New(isolate);
obj->Set(v8_str("func"),
- v8::FunctionTemplate::New(callback, data)->GetFunction());
+ v8::FunctionTemplate::New(isolate, callback, data)->GetFunction());
env->Global()->Set(v8_str("obj"), obj);
CHECK(CompileRun(
@@ -1369,11 +1410,12 @@ THREADED_TEST(ExternalWrap) {
THREADED_TEST(FindInstanceInPrototypeChain) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- Local<v8::FunctionTemplate> base = v8::FunctionTemplate::New();
- Local<v8::FunctionTemplate> derived = v8::FunctionTemplate::New();
- Local<v8::FunctionTemplate> other = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> base = v8::FunctionTemplate::New(isolate);
+ Local<v8::FunctionTemplate> derived = v8::FunctionTemplate::New(isolate);
+ Local<v8::FunctionTemplate> other = v8::FunctionTemplate::New(isolate);
derived->Inherit(base);
Local<v8::Function> base_function = base->GetFunction();
@@ -1416,14 +1458,14 @@ THREADED_TEST(FindInstanceInPrototypeChain) {
THREADED_TEST(TinyInteger) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::Isolate* isolate = CcTest::isolate();
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
int32_t value = 239;
- Local<v8::Integer> value_obj = v8::Integer::New(value);
+ Local<v8::Integer> value_obj = v8::Integer::New(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
- value_obj = v8::Integer::New(value, isolate);
+ value_obj = v8::Integer::New(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
@@ -1439,10 +1481,10 @@ THREADED_TEST(BigSmiInteger) {
CHECK(i::Smi::IsValid(value));
CHECK(!i::Smi::IsValid(value + 1));
- Local<v8::Integer> value_obj = v8::Integer::New(value);
+ Local<v8::Integer> value_obj = v8::Integer::New(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
- value_obj = v8::Integer::New(value, isolate);
+ value_obj = v8::Integer::New(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
}
@@ -1462,10 +1504,10 @@ THREADED_TEST(BigInteger) {
CHECK(value > i::Smi::kMaxValue);
CHECK(!i::Smi::IsValid(value));
- Local<v8::Integer> value_obj = v8::Integer::New(value);
+ Local<v8::Integer> value_obj = v8::Integer::New(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
- value_obj = v8::Integer::New(value, isolate);
+ value_obj = v8::Integer::New(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
}
@@ -1478,10 +1520,10 @@ THREADED_TEST(TinyUnsignedInteger) {
uint32_t value = 239;
- Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
+ Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
- value_obj = v8::Integer::NewFromUnsigned(value, isolate);
+ value_obj = v8::Integer::NewFromUnsigned(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
@@ -1495,10 +1537,10 @@ THREADED_TEST(BigUnsignedSmiInteger) {
CHECK(i::Smi::IsValid(value));
CHECK(!i::Smi::IsValid(value + 1));
- Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
+ Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
- value_obj = v8::Integer::NewFromUnsigned(value, isolate);
+ value_obj = v8::Integer::NewFromUnsigned(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
@@ -1512,10 +1554,10 @@ THREADED_TEST(BigUnsignedInteger) {
CHECK(value > static_cast<uint32_t>(i::Smi::kMaxValue));
CHECK(!i::Smi::IsValid(value));
- Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
+ Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
- value_obj = v8::Integer::NewFromUnsigned(value, isolate);
+ value_obj = v8::Integer::NewFromUnsigned(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
@@ -1529,10 +1571,10 @@ THREADED_TEST(OutOfSignedRangeUnsignedInteger) {
uint32_t value = INT32_MAX_AS_UINT + 1;
CHECK(value > INT32_MAX_AS_UINT); // No overflow.
- Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
+ Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
- value_obj = v8::Integer::NewFromUnsigned(value, isolate);
+ value_obj = v8::Integer::NewFromUnsigned(isolate, value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
@@ -1588,7 +1630,8 @@ THREADED_TEST(NumberObject) {
CHECK(!as_boxed.IsEmpty());
double the_number = as_boxed->ValueOf();
CHECK_EQ(42.0, the_number);
- v8::Handle<v8::Value> new_boxed_number = v8::NumberObject::New(43);
+ v8::Handle<v8::Value> new_boxed_number =
+ v8::NumberObject::New(env->GetIsolate(), 43);
CHECK(new_boxed_number->IsNumberObject());
as_boxed = new_boxed_number.As<v8::NumberObject>();
the_number = as_boxed->ValueOf();
@@ -1625,7 +1668,7 @@ THREADED_TEST(PrimitiveAndWrappedBooleans) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<Value> primitive_false = Boolean::New(false);
+ Local<Value> primitive_false = Boolean::New(env->GetIsolate(), false);
CHECK(primitive_false->IsBoolean());
CHECK(!primitive_false->IsBooleanObject());
CHECK(!primitive_false->BooleanValue());
@@ -1648,7 +1691,7 @@ THREADED_TEST(PrimitiveAndWrappedBooleans) {
CHECK(!false_boolean_object->IsTrue());
CHECK(!false_boolean_object->IsFalse());
- Local<Value> primitive_true = Boolean::New(true);
+ Local<Value> primitive_true = Boolean::New(env->GetIsolate(), true);
CHECK(primitive_true->IsBoolean());
CHECK(!primitive_true->IsBooleanObject());
CHECK(primitive_true->BooleanValue());
@@ -1677,7 +1720,7 @@ THREADED_TEST(Number) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
double PI = 3.1415926;
- Local<v8::Number> pi_obj = v8::Number::New(PI);
+ Local<v8::Number> pi_obj = v8::Number::New(env->GetIsolate(), PI);
CHECK_EQ(PI, pi_obj->NumberValue());
}
@@ -1699,32 +1742,34 @@ THREADED_TEST(Date) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
double PI = 3.1415926;
- Local<Value> date = v8::Date::New(PI);
+ Local<Value> date = v8::Date::New(env->GetIsolate(), PI);
CHECK_EQ(3.0, date->NumberValue());
- date.As<v8::Date>()->Set(v8_str("property"), v8::Integer::New(42));
+ date.As<v8::Date>()->Set(v8_str("property"),
+ v8::Integer::New(env->GetIsolate(), 42));
CHECK_EQ(42, date.As<v8::Date>()->Get(v8_str("property"))->Int32Value());
}
THREADED_TEST(Boolean) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::Handle<v8::Boolean> t = v8::True(CcTest::isolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::Boolean> t = v8::True(isolate);
CHECK(t->Value());
- v8::Handle<v8::Boolean> f = v8::False(CcTest::isolate());
+ v8::Handle<v8::Boolean> f = v8::False(isolate);
CHECK(!f->Value());
- v8::Handle<v8::Primitive> u = v8::Undefined(CcTest::isolate());
+ v8::Handle<v8::Primitive> u = v8::Undefined(isolate);
CHECK(!u->BooleanValue());
- v8::Handle<v8::Primitive> n = v8::Null(CcTest::isolate());
+ v8::Handle<v8::Primitive> n = v8::Null(isolate);
CHECK(!n->BooleanValue());
v8::Handle<String> str1 = v8_str("");
CHECK(!str1->BooleanValue());
v8::Handle<String> str2 = v8_str("x");
CHECK(str2->BooleanValue());
- CHECK(!v8::Number::New(0)->BooleanValue());
- CHECK(v8::Number::New(-1)->BooleanValue());
- CHECK(v8::Number::New(1)->BooleanValue());
- CHECK(v8::Number::New(42)->BooleanValue());
+ CHECK(!v8::Number::New(isolate, 0)->BooleanValue());
+ CHECK(v8::Number::New(isolate, -1)->BooleanValue());
+ CHECK(v8::Number::New(isolate, 1)->BooleanValue());
+ CHECK(v8::Number::New(isolate, 42)->BooleanValue());
CHECK(!v8_compile("NaN")->Run()->BooleanValue());
}
@@ -1743,13 +1788,14 @@ static void GetM(Local<String> name,
THREADED_TEST(GlobalPrototype) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> func_templ = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> func_templ =
+ v8::FunctionTemplate::New(isolate);
func_templ->PrototypeTemplate()->Set(
- "dummy",
- v8::FunctionTemplate::New(DummyCallHandler));
+ isolate, "dummy", v8::FunctionTemplate::New(isolate, DummyCallHandler));
v8::Handle<ObjectTemplate> templ = func_templ->InstanceTemplate();
- templ->Set("x", v8_num(200));
+ templ->Set(isolate, "x", v8_num(200));
templ->SetAccessor(v8_str("m"), GetM);
LocalContext env(0, templ);
v8::Handle<Script> script(v8_compile("dummy()"));
@@ -1761,20 +1807,21 @@ THREADED_TEST(GlobalPrototype) {
THREADED_TEST(ObjectTemplate) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ1 = ObjectTemplate::New();
- templ1->Set("x", v8_num(10));
- templ1->Set("y", v8_num(13));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ1 = ObjectTemplate::New(isolate);
+ templ1->Set(isolate, "x", v8_num(10));
+ templ1->Set(isolate, "y", v8_num(13));
LocalContext env;
Local<v8::Object> instance1 = templ1->NewInstance();
env->Global()->Set(v8_str("p"), instance1);
CHECK(v8_compile("(p.x == 10)")->Run()->BooleanValue());
CHECK(v8_compile("(p.y == 13)")->Run()->BooleanValue());
- Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New();
- fun->PrototypeTemplate()->Set("nirk", v8_num(123));
+ Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(isolate);
+ fun->PrototypeTemplate()->Set(isolate, "nirk", v8_num(123));
Local<ObjectTemplate> templ2 = fun->InstanceTemplate();
- templ2->Set("a", v8_num(12));
- templ2->Set("b", templ1);
+ templ2->Set(isolate, "a", v8_num(12));
+ templ2->Set(isolate, "b", templ1);
Local<v8::Object> instance2 = templ2->NewInstance();
env->Global()->Set(v8_str("q"), instance2);
CHECK(v8_compile("(q.nirk == 123)")->Run()->BooleanValue());
@@ -1798,21 +1845,23 @@ static void GetKnurd(Local<String> property,
THREADED_TEST(DescriptorInheritance) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> super = v8::FunctionTemplate::New();
- super->PrototypeTemplate()->Set("flabby",
- v8::FunctionTemplate::New(GetFlabby));
- super->PrototypeTemplate()->Set("PI", v8_num(3.14));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> super = v8::FunctionTemplate::New(isolate);
+ super->PrototypeTemplate()->Set(isolate, "flabby",
+ v8::FunctionTemplate::New(isolate,
+ GetFlabby));
+ super->PrototypeTemplate()->Set(isolate, "PI", v8_num(3.14));
super->InstanceTemplate()->SetAccessor(v8_str("knurd"), GetKnurd);
- v8::Handle<v8::FunctionTemplate> base1 = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> base1 = v8::FunctionTemplate::New(isolate);
base1->Inherit(super);
- base1->PrototypeTemplate()->Set("v1", v8_num(20.1));
+ base1->PrototypeTemplate()->Set(isolate, "v1", v8_num(20.1));
- v8::Handle<v8::FunctionTemplate> base2 = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> base2 = v8::FunctionTemplate::New(isolate);
base2->Inherit(super);
- base2->PrototypeTemplate()->Set("v2", v8_num(10.1));
+ base2->PrototypeTemplate()->Set(isolate, "v2", v8_num(10.1));
LocalContext env;
@@ -1940,8 +1989,8 @@ void AddInterceptor(Handle<FunctionTemplate> templ,
THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
child->Inherit(parent);
AddAccessor(parent, v8_str("age"),
SimpleAccessorGetter, SimpleAccessorSetter);
@@ -1956,10 +2005,26 @@ THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
}
-THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
+THREADED_TEST(EmptyInterceptorBreakTransitions) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Constructor"), templ->GetFunction());
+ CompileRun("var o1 = new Constructor;"
+ "o1.a = 1;" // Ensure a and x share the descriptor array.
+ "Object.defineProperty(o1, 'x', {value: 10});");
+ CompileRun("var o2 = new Constructor;"
+ "o2.a = 1;"
+ "Object.defineProperty(o2, 'x', {value: 10});");
+}
+
+
+THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+ Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
child->Inherit(parent);
AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
LocalContext env;
@@ -1978,9 +2043,10 @@ THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+ Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
child->Inherit(parent);
AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
LocalContext env;
@@ -1999,7 +2065,7 @@ THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
THREADED_TEST(SwitchFromInterceptorToAccessor) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New();
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddAccessor(templ, v8_str("age"),
SimpleAccessorGetter, SimpleAccessorSetter);
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
@@ -2017,7 +2083,7 @@ THREADED_TEST(SwitchFromInterceptorToAccessor) {
THREADED_TEST(SwitchFromAccessorToInterceptor) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New();
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddAccessor(templ, v8_str("age"),
SimpleAccessorGetter, SimpleAccessorSetter);
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
@@ -2035,8 +2101,8 @@ THREADED_TEST(SwitchFromAccessorToInterceptor) {
THREADED_TEST(SwitchFromInterceptorToAccessorWithInheritance) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
child->Inherit(parent);
AddAccessor(parent, v8_str("age"),
SimpleAccessorGetter, SimpleAccessorSetter);
@@ -2055,8 +2121,8 @@ THREADED_TEST(SwitchFromInterceptorToAccessorWithInheritance) {
THREADED_TEST(SwitchFromAccessorToInterceptorWithInheritance) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
child->Inherit(parent);
AddAccessor(parent, v8_str("age"),
SimpleAccessorGetter, SimpleAccessorSetter);
@@ -2075,7 +2141,7 @@ THREADED_TEST(SwitchFromAccessorToInterceptorWithInheritance) {
THREADED_TEST(SwitchFromInterceptorToJSAccessor) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New();
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
env->Global()->Set(v8_str("Obj"), templ->GetFunction());
@@ -2100,7 +2166,7 @@ THREADED_TEST(SwitchFromInterceptorToJSAccessor) {
THREADED_TEST(SwitchFromJSAccessorToInterceptor) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New();
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
env->Global()->Set(v8_str("Obj"), templ->GetFunction());
@@ -2125,8 +2191,8 @@ THREADED_TEST(SwitchFromJSAccessorToInterceptor) {
THREADED_TEST(SwitchFromInterceptorToProperty) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
child->Inherit(parent);
AddInterceptor(child, InterceptorGetter, InterceptorSetter);
LocalContext env;
@@ -2143,8 +2209,8 @@ THREADED_TEST(SwitchFromInterceptorToProperty) {
THREADED_TEST(SwitchFromPropertyToInterceptor) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
child->Inherit(parent);
AddInterceptor(child, InterceptorGetter, InterceptorSetter);
LocalContext env;
@@ -2162,7 +2228,8 @@ THREADED_TEST(SwitchFromPropertyToInterceptor) {
THREADED_TEST(NamedPropertyHandlerGetter) {
echo_named_call_count = 0;
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetNamedPropertyHandler(EchoNamedProperty,
0, 0, 0, 0,
v8_str("data"));
@@ -2197,8 +2264,9 @@ static void EchoIndexedProperty(
THREADED_TEST(IndexedPropertyHandlerGetter) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->InstanceTemplate()->SetIndexedPropertyHandler(EchoIndexedProperty,
0, 0, 0, 0,
v8_num(637));
@@ -2301,10 +2369,11 @@ void CheckThisNamedPropertyEnumerator(
THREADED_PROFILED_TEST(PropertyHandlerInPrototype) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Set up a prototype chain with three interceptors.
- v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->InstanceTemplate()->SetIndexedPropertyHandler(
CheckThisIndexedPropertyHandler,
CheckThisIndexedPropertySetter,
@@ -2368,8 +2437,9 @@ static void PrePropertyHandlerQuery(
THREADED_TEST(PrePropertyHandler) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> desc = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
desc->InstanceTemplate()->SetNamedPropertyHandler(PrePropertyHandlerGet,
0,
PrePropertyHandlerQuery);
@@ -2403,7 +2473,8 @@ static void CallScriptRecursivelyCall(
ApiTestFuzzer::Fuzz();
int depth = args.This()->Get(v8_str("depth"))->Int32Value();
if (depth == kTargetRecursionDepth) return;
- args.This()->Set(v8_str("depth"), v8::Integer::New(depth + 1));
+ args.This()->Set(v8_str("depth"),
+ v8::Integer::New(args.GetIsolate(), depth + 1));
args.GetReturnValue().Set(call_recursively_script->Run());
}
@@ -2416,7 +2487,8 @@ static void CallFunctionRecursivelyCall(
printf("[depth = %d]\n", depth);
return;
}
- args.This()->Set(v8_str("depth"), v8::Integer::New(depth + 1));
+ args.This()->Set(v8_str("depth"),
+ v8::Integer::New(args.GetIsolate(), depth + 1));
v8::Handle<Value> function =
args.This()->Get(v8_str("callFunctionRecursively"));
args.GetReturnValue().Set(
@@ -2425,20 +2497,21 @@ static void CallFunctionRecursivelyCall(
THREADED_TEST(DeepCrossLanguageRecursion) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> global = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global = ObjectTemplate::New(isolate);
global->Set(v8_str("callScriptRecursively"),
- v8::FunctionTemplate::New(CallScriptRecursivelyCall));
+ v8::FunctionTemplate::New(isolate, CallScriptRecursivelyCall));
global->Set(v8_str("callFunctionRecursively"),
- v8::FunctionTemplate::New(CallFunctionRecursivelyCall));
+ v8::FunctionTemplate::New(isolate, CallFunctionRecursivelyCall));
LocalContext env(NULL, global);
- env->Global()->Set(v8_str("depth"), v8::Integer::New(0));
+ env->Global()->Set(v8_str("depth"), v8::Integer::New(isolate, 0));
call_recursively_script = v8_compile("callScriptRecursively()");
call_recursively_script->Run();
call_recursively_script = v8::Handle<Script>();
- env->Global()->Set(v8_str("depth"), v8::Integer::New(0));
+ env->Global()->Set(v8_str("depth"), v8::Integer::New(isolate, 0));
Script::Compile(v8_str("callFunctionRecursively()"))->Run();
}
@@ -2461,8 +2534,9 @@ static void ThrowingPropertyHandlerSet(
THREADED_TEST(CallbackExceptionRegression) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetNamedPropertyHandler(ThrowingPropertyHandlerGet,
ThrowingPropertyHandlerSet);
LocalContext env;
@@ -2477,8 +2551,9 @@ THREADED_TEST(CallbackExceptionRegression) {
THREADED_TEST(FunctionPrototype) {
- v8::HandleScope scope(CcTest::isolate());
- Local<v8::FunctionTemplate> Foo = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::FunctionTemplate> Foo = v8::FunctionTemplate::New(isolate);
Foo->PrototypeTemplate()->Set(v8_str("plak"), v8_num(321));
LocalContext env;
env->Global()->Set(v8_str("Foo"), Foo->GetFunction());
@@ -2489,9 +2564,10 @@ THREADED_TEST(FunctionPrototype) {
THREADED_TEST(InternalFields) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate();
instance_templ->SetInternalFieldCount(1);
Local<v8::Object> obj = templ->GetFunction()->NewInstance();
@@ -2503,8 +2579,9 @@ THREADED_TEST(InternalFields) {
THREADED_TEST(GlobalObjectInternalFields) {
- v8::HandleScope scope(CcTest::isolate());
- Local<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New(isolate);
global_template->SetInternalFieldCount(1);
LocalContext env(NULL, global_template);
v8::Handle<v8::Object> global_proxy = env->Global();
@@ -2521,7 +2598,7 @@ THREADED_TEST(GlobalObjectHasRealIndexedProperty) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Object> global = env->Global();
- global->Set(0, v8::String::New("value"));
+ global->Set(0, v8::String::NewFromUtf8(CcTest::isolate(), "value"));
CHECK(global->HasRealIndexedProperty(0));
}
@@ -2537,9 +2614,10 @@ static void CheckAlignedPointerInInternalField(Handle<v8::Object> obj,
THREADED_TEST(InternalFieldsAlignedPointers) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate();
instance_templ->SetInternalFieldCount(1);
Local<v8::Object> obj = templ->GetFunction()->NewInstance();
@@ -2611,33 +2689,38 @@ static void CheckEmbedderData(LocalContext* env,
THREADED_TEST(EmbedderData) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- CheckEmbedderData(&env, 3, v8::String::New("The quick brown fox jumps"));
- CheckEmbedderData(&env, 2, v8::String::New("over the lazy dog."));
- CheckEmbedderData(&env, 1, v8::Number::New(1.2345));
- CheckEmbedderData(&env, 0, v8::Boolean::New(true));
+ CheckEmbedderData(
+ &env, 3,
+ v8::String::NewFromUtf8(isolate, "The quick brown fox jumps"));
+ CheckEmbedderData(&env, 2, v8::String::NewFromUtf8(isolate,
+ "over the lazy dog."));
+ CheckEmbedderData(&env, 1, v8::Number::New(isolate, 1.2345));
+ CheckEmbedderData(&env, 0, v8::Boolean::New(isolate, true));
}
THREADED_TEST(IdentityHash) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Ensure that the test starts with an fresh heap to test whether the hash
// code is based on the address.
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
- Local<v8::Object> obj = v8::Object::New();
+ Local<v8::Object> obj = v8::Object::New(isolate);
int hash = obj->GetIdentityHash();
int hash1 = obj->GetIdentityHash();
CHECK_EQ(hash, hash1);
- int hash2 = v8::Object::New()->GetIdentityHash();
+ int hash2 = v8::Object::New(isolate)->GetIdentityHash();
// Since the identity hash is essentially a random number two consecutive
// objects should not be assigned the same hash code. If the test below fails
// the random number generator should be evaluated.
CHECK_NE(hash, hash2);
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
- int hash3 = v8::Object::New()->GetIdentityHash();
+ int hash3 = v8::Object::New(isolate)->GetIdentityHash();
// Make sure that the identity hash is not based on the initial address of
// the object alone. If the test below fails the random number generator
// should be evaluated.
@@ -2649,16 +2732,16 @@ THREADED_TEST(IdentityHash) {
// Put a getter for 'v8::IdentityHash' on the Object's prototype:
{
CompileRun("Object.prototype['v8::IdentityHash'] = 42;\n");
- Local<v8::Object> o1 = v8::Object::New();
- Local<v8::Object> o2 = v8::Object::New();
+ Local<v8::Object> o1 = v8::Object::New(isolate);
+ Local<v8::Object> o2 = v8::Object::New(isolate);
CHECK_NE(o1->GetIdentityHash(), o2->GetIdentityHash());
}
{
CompileRun(
"function cnst() { return 42; };\n"
"Object.prototype.__defineGetter__('v8::IdentityHash', cnst);\n");
- Local<v8::Object> o1 = v8::Object::New();
- Local<v8::Object> o2 = v8::Object::New();
+ Local<v8::Object> o1 = v8::Object::New(isolate);
+ Local<v8::Object> o2 = v8::Object::New(isolate);
CHECK_NE(o1->GetIdentityHash(), o2->GetIdentityHash());
}
}
@@ -2671,7 +2754,7 @@ THREADED_TEST(SymbolProperties) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Local<v8::Object> obj = v8::Object::New();
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
v8::Local<v8::Symbol> sym1 = v8::Symbol::New(isolate);
v8::Local<v8::Symbol> sym2 = v8::Symbol::New(isolate, "my-symbol");
@@ -2691,7 +2774,7 @@ THREADED_TEST(SymbolProperties) {
CHECK(!sym1->StrictEquals(sym2));
CHECK(!sym2->StrictEquals(sym1));
- CHECK(sym2->Name()->Equals(v8::String::New("my-symbol")));
+ CHECK(sym2->Name()->Equals(v8::String::NewFromUtf8(isolate, "my-symbol")));
v8::Local<v8::Value> sym_val = sym2;
CHECK(sym_val->IsSymbol());
@@ -2712,17 +2795,18 @@ THREADED_TEST(SymbolProperties) {
CHECK(obj->Delete(sym1));
CHECK(!obj->Has(sym1));
- CHECK(obj->Set(sym1, v8::Integer::New(1503)));
+ CHECK(obj->Set(sym1, v8::Integer::New(isolate, 1503)));
CHECK(obj->Has(sym1));
CHECK_EQ(1503, obj->Get(sym1)->Int32Value());
- CHECK(obj->Set(sym1, v8::Integer::New(2002)));
+ CHECK(obj->Set(sym1, v8::Integer::New(isolate, 2002)));
CHECK(obj->Has(sym1));
CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
CHECK_EQ(v8::None, obj->GetPropertyAttributes(sym1));
CHECK_EQ(0, obj->GetOwnPropertyNames()->Length());
int num_props = obj->GetPropertyNames()->Length();
- CHECK(obj->Set(v8::String::New("bla"), v8::Integer::New(20)));
+ CHECK(obj->Set(v8::String::NewFromUtf8(isolate, "bla"),
+ v8::Integer::New(isolate, 20)));
CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
CHECK_EQ(num_props + 1, obj->GetPropertyNames()->Length());
@@ -2730,7 +2814,7 @@ THREADED_TEST(SymbolProperties) {
// Add another property and delete it afterwards to force the object in
// slow case.
- CHECK(obj->Set(sym2, v8::Integer::New(2008)));
+ CHECK(obj->Set(sym2, v8::Integer::New(isolate, 2008)));
CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
CHECK_EQ(2008, obj->Get(sym2)->Int32Value());
CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
@@ -2743,6 +2827,71 @@ THREADED_TEST(SymbolProperties) {
CHECK(!obj->Has(sym2));
CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+
+ // Symbol properties are inherited.
+ v8::Local<v8::Object> child = v8::Object::New(isolate);
+ child->SetPrototype(obj);
+ CHECK(child->Has(sym1));
+ CHECK_EQ(2002, child->Get(sym1)->Int32Value());
+ CHECK_EQ(0, child->GetOwnPropertyNames()->Length());
+}
+
+
+THREADED_TEST(PrivateProperties) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+ v8::Local<v8::Private> priv1 = v8::Private::New(isolate);
+ v8::Local<v8::Private> priv2 = v8::Private::New(isolate, "my-private");
+
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+
+ CHECK(priv2->Name()->Equals(v8::String::NewFromUtf8(isolate, "my-private")));
+
+ // Make sure delete of a non-existent private symbol property works.
+ CHECK(obj->DeletePrivate(priv1));
+ CHECK(!obj->HasPrivate(priv1));
+
+ CHECK(obj->SetPrivate(priv1, v8::Integer::New(isolate, 1503)));
+ CHECK(obj->HasPrivate(priv1));
+ CHECK_EQ(1503, obj->GetPrivate(priv1)->Int32Value());
+ CHECK(obj->SetPrivate(priv1, v8::Integer::New(isolate, 2002)));
+ CHECK(obj->HasPrivate(priv1));
+ CHECK_EQ(2002, obj->GetPrivate(priv1)->Int32Value());
+
+ CHECK_EQ(0, obj->GetOwnPropertyNames()->Length());
+ int num_props = obj->GetPropertyNames()->Length();
+ CHECK(obj->Set(v8::String::NewFromUtf8(isolate, "bla"),
+ v8::Integer::New(isolate, 20)));
+ CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+ CHECK_EQ(num_props + 1, obj->GetPropertyNames()->Length());
+
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+
+ // Add another property and delete it afterwards to force the object in
+ // slow case.
+ CHECK(obj->SetPrivate(priv2, v8::Integer::New(isolate, 2008)));
+ CHECK_EQ(2002, obj->GetPrivate(priv1)->Int32Value());
+ CHECK_EQ(2008, obj->GetPrivate(priv2)->Int32Value());
+ CHECK_EQ(2002, obj->GetPrivate(priv1)->Int32Value());
+ CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+
+ CHECK(obj->HasPrivate(priv1));
+ CHECK(obj->HasPrivate(priv2));
+ CHECK(obj->DeletePrivate(priv2));
+ CHECK(obj->HasPrivate(priv1));
+ CHECK(!obj->HasPrivate(priv2));
+ CHECK_EQ(2002, obj->GetPrivate(priv1)->Int32Value());
+ CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+
+ // Private properties are inherited (for the time being).
+ v8::Local<v8::Object> child = v8::Object::New(isolate);
+ child->SetPrototype(obj);
+ CHECK(child->HasPrivate(priv1));
+ CHECK_EQ(2002, child->GetPrivate(priv1)->Int32Value());
+ CHECK_EQ(0, child->GetOwnPropertyNames()->Length());
}
@@ -2772,7 +2921,7 @@ THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(1024);
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 1024);
CheckInternalFieldsAreZero(ab);
CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
CHECK(!ab->IsExternal());
@@ -2852,7 +3001,8 @@ THREADED_TEST(ArrayBuffer_External) {
i::ScopedVector<uint8_t> my_data(100);
memset(my_data.start(), 0, 100);
- Local<v8::ArrayBuffer> ab3 = v8::ArrayBuffer::New(my_data.start(), 100);
+ Local<v8::ArrayBuffer> ab3 =
+ v8::ArrayBuffer::New(isolate, my_data.start(), 100);
CheckInternalFieldsAreZero(ab3);
CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
CHECK(ab3->IsExternal());
@@ -2919,7 +3069,7 @@ THREADED_TEST(ArrayBuffer_NeuteringApi) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(1024);
+ v8::Handle<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(isolate, 1024);
v8::Handle<v8::Uint8Array> u8a =
CreateAndCheck<v8::Uint8Array, 1>(buffer, 1, 1023);
@@ -3011,9 +3161,10 @@ THREADED_TEST(ArrayBuffer_NeuteringScript) {
THREADED_TEST(HiddenProperties) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- v8::Local<v8::Object> obj = v8::Object::New();
+ v8::Local<v8::Object> obj = v8::Object::New(env->GetIsolate());
v8::Local<v8::String> key = v8_str("api-test::hidden-key");
v8::Local<v8::String> empty = v8_str("");
v8::Local<v8::String> prop_name = v8_str("prop_name");
@@ -3023,9 +3174,9 @@ THREADED_TEST(HiddenProperties) {
// Make sure delete of a non-existent hidden value works
CHECK(obj->DeleteHiddenValue(key));
- CHECK(obj->SetHiddenValue(key, v8::Integer::New(1503)));
+ CHECK(obj->SetHiddenValue(key, v8::Integer::New(isolate, 1503)));
CHECK_EQ(1503, obj->GetHiddenValue(key)->Int32Value());
- CHECK(obj->SetHiddenValue(key, v8::Integer::New(2002)));
+ CHECK(obj->SetHiddenValue(key, v8::Integer::New(isolate, 2002)));
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -3035,7 +3186,7 @@ THREADED_TEST(HiddenProperties) {
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
CHECK(obj->Get(empty)->IsUndefined());
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
- CHECK(obj->Set(empty, v8::Integer::New(2003)));
+ CHECK(obj->Set(empty, v8::Integer::New(isolate, 2003)));
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
CHECK_EQ(2003, obj->Get(empty)->Int32Value());
@@ -3043,7 +3194,7 @@ THREADED_TEST(HiddenProperties) {
// Add another property and delete it afterwards to force the object in
// slow case.
- CHECK(obj->Set(prop_name, v8::Integer::New(2008)));
+ CHECK(obj->Set(prop_name, v8::Integer::New(isolate, 2008)));
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
CHECK_EQ(2008, obj->Get(prop_name)->Int32Value());
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
@@ -3055,7 +3206,7 @@ THREADED_TEST(HiddenProperties) {
CHECK(obj->SetHiddenValue(key, Handle<Value>()));
CHECK(obj->GetHiddenValue(key).IsEmpty());
- CHECK(obj->SetHiddenValue(key, v8::Integer::New(2002)));
+ CHECK(obj->SetHiddenValue(key, v8::Integer::New(isolate, 2002)));
CHECK(obj->DeleteHiddenValue(key));
CHECK(obj->GetHiddenValue(key).IsEmpty());
}
@@ -3068,7 +3219,7 @@ THREADED_TEST(Regress97784) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Local<v8::Object> obj = v8::Object::New();
+ v8::Local<v8::Object> obj = v8::Object::New(env->GetIsolate());
v8::Local<v8::String> key = v8_str("hidden");
CompileRun(
@@ -3083,7 +3234,7 @@ THREADED_TEST(Regress97784) {
// Make sure that the getter and setter from Object.prototype is not invoked.
// If it did we would have full access to the hidden properties in
// the accessor.
- CHECK(obj->SetHiddenValue(key, v8::Integer::New(42)));
+ CHECK(obj->SetHiddenValue(key, v8::Integer::New(env->GetIsolate(), 42)));
ExpectFalse("set_called");
CHECK_EQ(42, obj->GetHiddenValue(key)->Int32Value());
}
@@ -3098,19 +3249,20 @@ static void InterceptorForHiddenProperties(
THREADED_TEST(HiddenPropertiesWithInterceptors) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
interceptor_for_hidden_properties_called = false;
v8::Local<v8::String> key = v8_str("api-test::hidden-key");
// Associate an interceptor with an object and start setting hidden values.
- Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
instance_templ->SetNamedPropertyHandler(InterceptorForHiddenProperties);
Local<v8::Function> function = fun_templ->GetFunction();
Local<v8::Object> obj = function->NewInstance();
- CHECK(obj->SetHiddenValue(key, v8::Integer::New(2302)));
+ CHECK(obj->SetHiddenValue(key, v8::Integer::New(isolate, 2302)));
CHECK_EQ(2302, obj->GetHiddenValue(key)->Int32Value());
CHECK(!interceptor_for_hidden_properties_called);
}
@@ -3119,7 +3271,7 @@ THREADED_TEST(HiddenPropertiesWithInterceptors) {
THREADED_TEST(External) {
v8::HandleScope scope(CcTest::isolate());
int x = 3;
- Local<v8::External> ext = v8::External::New(&x);
+ Local<v8::External> ext = v8::External::New(CcTest::isolate(), &x);
LocalContext env;
env->Global()->Set(v8_str("ext"), ext);
Local<Value> reext_obj = Script::Compile(v8_str("this.ext"))->Run();
@@ -3131,10 +3283,10 @@ THREADED_TEST(External) {
// Make sure unaligned pointers are wrapped properly.
char* data = i::StrDup("0123456789");
- Local<v8::Value> zero = v8::External::New(&data[0]);
- Local<v8::Value> one = v8::External::New(&data[1]);
- Local<v8::Value> two = v8::External::New(&data[2]);
- Local<v8::Value> three = v8::External::New(&data[3]);
+ Local<v8::Value> zero = v8::External::New(CcTest::isolate(), &data[0]);
+ Local<v8::Value> one = v8::External::New(CcTest::isolate(), &data[1]);
+ Local<v8::Value> two = v8::External::New(CcTest::isolate(), &data[2]);
+ Local<v8::Value> three = v8::External::New(CcTest::isolate(), &data[3]);
char* char_ptr = reinterpret_cast<char*>(v8::External::Cast(*zero)->Value());
CHECK_EQ('0', *char_ptr);
@@ -3159,8 +3311,7 @@ THREADED_TEST(GlobalHandle) {
v8::HandleScope scope(isolate);
CHECK_EQ(v8::Local<String>::New(isolate, global)->Length(), 3);
}
- global.Dispose();
- global.Clear();
+ global.Reset();
{
v8::HandleScope scope(isolate);
global.Reset(isolate, v8_str("str"));
@@ -3169,7 +3320,7 @@ THREADED_TEST(GlobalHandle) {
v8::HandleScope scope(isolate);
CHECK_EQ(v8::Local<String>::New(isolate, global)->Length(), 3);
}
- global.Dispose();
+ global.Reset();
}
@@ -3196,7 +3347,7 @@ THREADED_TEST(ResettingGlobalHandle) {
v8::HandleScope scope(isolate);
CHECK_EQ(v8::Local<String>::New(isolate, global)->Length(), 6);
}
- global.Dispose();
+ global.Reset();
CHECK_EQ(global_handles->global_handles_count(), initial_handle_count - 1);
}
@@ -3225,25 +3376,70 @@ THREADED_TEST(ResettingGlobalHandleToEmpty) {
}
-THREADED_TEST(ClearAndLeakGlobal) {
+template<class T>
+static v8::UniquePersistent<T> PassUnique(v8::UniquePersistent<T> unique) {
+ return unique.Pass();
+}
+
+
+template<class T>
+static v8::UniquePersistent<T> ReturnUnique(v8::Isolate* isolate,
+ const v8::Persistent<T> & global) {
+ v8::UniquePersistent<String> unique(isolate, global);
+ return unique.Pass();
+}
+
+
+THREADED_TEST(UniquePersistent) {
v8::Isolate* isolate = CcTest::isolate();
- v8::internal::GlobalHandles* global_handles = NULL;
- int initial_handle_count = 0;
v8::Persistent<String> global;
{
v8::HandleScope scope(isolate);
- Local<String> str = v8_str("str");
- global_handles =
- reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
- initial_handle_count = global_handles->global_handles_count();
- global.Reset(isolate, str);
- }
- CHECK_EQ(global_handles->global_handles_count(), initial_handle_count + 1);
- String* str = global.ClearAndLeak();
- CHECK(global.IsEmpty());
- CHECK_EQ(global_handles->global_handles_count(), initial_handle_count + 1);
- global_handles->Destroy(reinterpret_cast<i::Object**>(str));
- CHECK_EQ(global_handles->global_handles_count(), initial_handle_count);
+ global.Reset(isolate, v8_str("str"));
+ }
+ v8::internal::GlobalHandles* global_handles =
+ reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
+ int initial_handle_count = global_handles->global_handles_count();
+ {
+ v8::UniquePersistent<String> unique(isolate, global);
+ CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
+ // Test assignment via Pass
+ {
+ v8::UniquePersistent<String> copy = unique.Pass();
+ CHECK(unique.IsEmpty());
+ CHECK(copy == global);
+ CHECK_EQ(initial_handle_count + 1,
+ global_handles->global_handles_count());
+ unique = copy.Pass();
+ }
+ // Test ctor via Pass
+ {
+ v8::UniquePersistent<String> copy(unique.Pass());
+ CHECK(unique.IsEmpty());
+ CHECK(copy == global);
+ CHECK_EQ(initial_handle_count + 1,
+ global_handles->global_handles_count());
+ unique = copy.Pass();
+ }
+ // Test pass through function call
+ {
+ v8::UniquePersistent<String> copy = PassUnique(unique.Pass());
+ CHECK(unique.IsEmpty());
+ CHECK(copy == global);
+ CHECK_EQ(initial_handle_count + 1,
+ global_handles->global_handles_count());
+ unique = copy.Pass();
+ }
+ CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
+ }
+ // Test pass from function call
+ {
+ v8::UniquePersistent<String> unique = ReturnUnique(isolate, global);
+ CHECK(unique == global);
+ CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
+ }
+ CHECK_EQ(initial_handle_count, global_handles->global_handles_count());
+ global.Reset();
}
@@ -3256,7 +3452,7 @@ THREADED_TEST(GlobalHandleUpcast) {
v8::Persistent<Value>::Cast(global_string);
CHECK(v8::Local<v8::Value>::New(isolate, global_value)->IsString());
CHECK(global_string == v8::Persistent<String>::Cast(global_value));
- global_string.Dispose();
+ global_string.Reset();
}
@@ -3293,8 +3489,8 @@ THREADED_TEST(HandleEquality) {
CHECK_EQ(local1 == anotherLocal1, true);
CHECK_EQ(local1 != anotherLocal1, false);
}
- global1.Dispose();
- global2.Dispose();
+ global1.Reset();
+ global2.Reset();
}
@@ -3319,12 +3515,20 @@ class WeakCallCounter {
template<typename T>
-static void WeakPointerCallback(v8::Isolate* isolate,
- Persistent<T>* handle,
- WeakCallCounter* counter) {
- CHECK_EQ(1234, counter->id());
- counter->increment();
- handle->Dispose();
+struct WeakCallCounterAndPersistent {
+ explicit WeakCallCounterAndPersistent(WeakCallCounter* counter)
+ : counter(counter) {}
+ WeakCallCounter* counter;
+ v8::Persistent<T> handle;
+};
+
+
+template <typename T>
+static void WeakPointerCallback(
+ const v8::WeakCallbackData<T, WeakCallCounterAndPersistent<T> >& data) {
+ CHECK_EQ(1234, data.GetParameter()->counter->id());
+ data.GetParameter()->counter->increment();
+ data.GetParameter()->handle.Reset();
}
@@ -3339,52 +3543,53 @@ THREADED_TEST(ApiObjectGroups) {
v8::Isolate* iso = env->GetIsolate();
HandleScope scope(iso);
- Persistent<Value> g1s1;
- Persistent<Value> g1s2;
- Persistent<Value> g1c1;
- Persistent<Value> g2s1;
- Persistent<Value> g2s2;
- Persistent<Value> g2c1;
-
WeakCallCounter counter(1234);
+ WeakCallCounterAndPersistent<Value> g1s1(&counter);
+ WeakCallCounterAndPersistent<Value> g1s2(&counter);
+ WeakCallCounterAndPersistent<Value> g1c1(&counter);
+ WeakCallCounterAndPersistent<Value> g2s1(&counter);
+ WeakCallCounterAndPersistent<Value> g2s2(&counter);
+ WeakCallCounterAndPersistent<Value> g2c1(&counter);
+
{
HandleScope scope(iso);
- g1s1.Reset(iso, Object::New());
- g1s2.Reset(iso, Object::New());
- g1c1.Reset(iso, Object::New());
- g1s1.MakeWeak(&counter, &WeakPointerCallback);
- g1s2.MakeWeak(&counter, &WeakPointerCallback);
- g1c1.MakeWeak(&counter, &WeakPointerCallback);
+ g1s1.handle.Reset(iso, Object::New(iso));
+ g1s2.handle.Reset(iso, Object::New(iso));
+ g1c1.handle.Reset(iso, Object::New(iso));
+ g1s1.handle.SetWeak(&g1s1, &WeakPointerCallback);
+ g1s2.handle.SetWeak(&g1s2, &WeakPointerCallback);
+ g1c1.handle.SetWeak(&g1c1, &WeakPointerCallback);
- g2s1.Reset(iso, Object::New());
- g2s2.Reset(iso, Object::New());
- g2c1.Reset(iso, Object::New());
- g2s1.MakeWeak(&counter, &WeakPointerCallback);
- g2s2.MakeWeak(&counter, &WeakPointerCallback);
- g2c1.MakeWeak(&counter, &WeakPointerCallback);
+ g2s1.handle.Reset(iso, Object::New(iso));
+ g2s2.handle.Reset(iso, Object::New(iso));
+ g2c1.handle.Reset(iso, Object::New(iso));
+ g2s1.handle.SetWeak(&g2s1, &WeakPointerCallback);
+ g2s2.handle.SetWeak(&g2s2, &WeakPointerCallback);
+ g2c1.handle.SetWeak(&g2c1, &WeakPointerCallback);
}
- Persistent<Value> root(iso, g1s1); // make a root.
+ WeakCallCounterAndPersistent<Value> root(&counter);
+ root.handle.Reset(iso, g1s1.handle); // make a root.
// Connect group 1 and 2, make a cycle.
{
HandleScope scope(iso);
- CHECK(Local<Object>::New(iso, g1s2.As<Object>())->
- Set(0, Local<Value>::New(iso, g2s2)));
- CHECK(Local<Object>::New(iso, g2s1.As<Object>())->
- Set(0, Local<Value>::New(iso, g1s1)));
+ CHECK(Local<Object>::New(iso, g1s2.handle.As<Object>())->
+ Set(0, Local<Value>::New(iso, g2s2.handle)));
+ CHECK(Local<Object>::New(iso, g2s1.handle.As<Object>())->
+ Set(0, Local<Value>::New(iso, g1s1.handle)));
}
{
- UniqueId id1 = MakeUniqueId(g1s1);
- UniqueId id2 = MakeUniqueId(g2s2);
- iso->SetObjectGroupId(g1s1, id1);
- iso->SetObjectGroupId(g1s2, id1);
- iso->SetReferenceFromGroup(id1, g1c1);
- iso->SetObjectGroupId(g2s1, id2);
- iso->SetObjectGroupId(g2s2, id2);
- iso->SetReferenceFromGroup(id2, g2c1);
+ UniqueId id1 = MakeUniqueId(g1s1.handle);
+ UniqueId id2 = MakeUniqueId(g2s2.handle);
+ iso->SetObjectGroupId(g1s1.handle, id1);
+ iso->SetObjectGroupId(g1s2.handle, id1);
+ iso->SetReferenceFromGroup(id1, g1c1.handle);
+ iso->SetObjectGroupId(g2s1.handle, id2);
+ iso->SetObjectGroupId(g2s2.handle, id2);
+ iso->SetReferenceFromGroup(id2, g2c1.handle);
}
// Do a single full GC, ensure incremental marking is stopped.
v8::internal::Heap* heap = reinterpret_cast<v8::internal::Isolate*>(
@@ -3395,22 +3600,22 @@ THREADED_TEST(ApiObjectGroups) {
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(&counter, &WeakPointerCallback);
+ root.handle.SetWeak(&root, &WeakPointerCallback);
// But make children strong roots---all the objects (except for children)
// should be collectable now.
- g1c1.ClearWeak();
- g2c1.ClearWeak();
+ g1c1.handle.ClearWeak();
+ g2c1.handle.ClearWeak();
// Groups are deleted, rebuild groups.
{
- UniqueId id1 = MakeUniqueId(g1s1);
- UniqueId id2 = MakeUniqueId(g2s2);
- iso->SetObjectGroupId(g1s1, id1);
- iso->SetObjectGroupId(g1s2, id1);
- iso->SetReferenceFromGroup(id1, g1c1);
- iso->SetObjectGroupId(g2s1, id2);
- iso->SetObjectGroupId(g2s2, id2);
- iso->SetReferenceFromGroup(id2, g2c1);
+ UniqueId id1 = MakeUniqueId(g1s1.handle);
+ UniqueId id2 = MakeUniqueId(g2s2.handle);
+ iso->SetObjectGroupId(g1s1.handle, id1);
+ iso->SetObjectGroupId(g1s2.handle, id1);
+ iso->SetReferenceFromGroup(id1, g1c1.handle);
+ iso->SetObjectGroupId(g2s1.handle, id2);
+ iso->SetObjectGroupId(g2s2.handle, id2);
+ iso->SetReferenceFromGroup(id2, g2c1.handle);
}
heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
@@ -3419,8 +3624,8 @@ THREADED_TEST(ApiObjectGroups) {
CHECK_EQ(5, counter.NumberOfWeakCalls());
// And now make children weak again and collect them.
- g1c1.MakeWeak(&counter, &WeakPointerCallback);
- g2c1.MakeWeak(&counter, &WeakPointerCallback);
+ g1c1.handle.SetWeak(&g1c1, &WeakPointerCallback);
+ g2c1.handle.SetWeak(&g2c1, &WeakPointerCallback);
heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(7, counter.NumberOfWeakCalls());
@@ -3432,50 +3637,53 @@ THREADED_TEST(ApiObjectGroupsForSubtypes) {
v8::Isolate* iso = env->GetIsolate();
HandleScope scope(iso);
- Persistent<Object> g1s1;
- Persistent<String> g1s2;
- Persistent<String> g1c1;
- Persistent<Object> g2s1;
- Persistent<String> g2s2;
- Persistent<String> g2c1;
-
WeakCallCounter counter(1234);
+ WeakCallCounterAndPersistent<Object> g1s1(&counter);
+ WeakCallCounterAndPersistent<String> g1s2(&counter);
+ WeakCallCounterAndPersistent<String> g1c1(&counter);
+ WeakCallCounterAndPersistent<Object> g2s1(&counter);
+ WeakCallCounterAndPersistent<String> g2s2(&counter);
+ WeakCallCounterAndPersistent<String> g2c1(&counter);
+
{
HandleScope scope(iso);
- g1s1.Reset(iso, Object::New());
- g1s2.Reset(iso, String::New("foo1"));
- g1c1.Reset(iso, String::New("foo2"));
- g1s1.MakeWeak(&counter, &WeakPointerCallback);
- g1s2.MakeWeak(&counter, &WeakPointerCallback);
- g1c1.MakeWeak(&counter, &WeakPointerCallback);
+ g1s1.handle.Reset(iso, Object::New(iso));
+ g1s2.handle.Reset(iso, String::NewFromUtf8(iso, "foo1"));
+ g1c1.handle.Reset(iso, String::NewFromUtf8(iso, "foo2"));
+ g1s1.handle.SetWeak(&g1s1, &WeakPointerCallback);
+ g1s2.handle.SetWeak(&g1s2, &WeakPointerCallback);
+ g1c1.handle.SetWeak(&g1c1, &WeakPointerCallback);
- g2s1.Reset(iso, Object::New());
- g2s2.Reset(iso, String::New("foo3"));
- g2c1.Reset(iso, String::New("foo4"));
- g2s1.MakeWeak(&counter, &WeakPointerCallback);
- g2s2.MakeWeak(&counter, &WeakPointerCallback);
- g2c1.MakeWeak(&counter, &WeakPointerCallback);
+ g2s1.handle.Reset(iso, Object::New(iso));
+ g2s2.handle.Reset(iso, String::NewFromUtf8(iso, "foo3"));
+ g2c1.handle.Reset(iso, String::NewFromUtf8(iso, "foo4"));
+ g2s1.handle.SetWeak(&g2s1, &WeakPointerCallback);
+ g2s2.handle.SetWeak(&g2s2, &WeakPointerCallback);
+ g2c1.handle.SetWeak(&g2c1, &WeakPointerCallback);
}
- Persistent<Value> root(iso, g1s1); // make a root.
+ WeakCallCounterAndPersistent<Value> root(&counter);
+ root.handle.Reset(iso, g1s1.handle); // make a root.
// Connect group 1 and 2, make a cycle.
{
HandleScope scope(iso);
- CHECK(Local<Object>::New(iso, g1s1)->Set(0, Local<Object>::New(iso, g2s1)));
- CHECK(Local<Object>::New(iso, g2s1)->Set(0, Local<Object>::New(iso, g1s1)));
+ CHECK(Local<Object>::New(iso, g1s1.handle)
+ ->Set(0, Local<Object>::New(iso, g2s1.handle)));
+ CHECK(Local<Object>::New(iso, g2s1.handle)
+ ->Set(0, Local<Object>::New(iso, g1s1.handle)));
}
{
- UniqueId id1 = MakeUniqueId(g1s1);
- UniqueId id2 = MakeUniqueId(g2s2);
- iso->SetObjectGroupId(g1s1, id1);
- iso->SetObjectGroupId(g1s2, id1);
- iso->SetReference(g1s1, g1c1);
- iso->SetObjectGroupId(g2s1, id2);
- iso->SetObjectGroupId(g2s2, id2);
- iso->SetReferenceFromGroup(id2, g2c1);
+ UniqueId id1 = MakeUniqueId(g1s1.handle);
+ UniqueId id2 = MakeUniqueId(g2s2.handle);
+ iso->SetObjectGroupId(g1s1.handle, id1);
+ iso->SetObjectGroupId(g1s2.handle, id1);
+ iso->SetReference(g1s1.handle, g1c1.handle);
+ iso->SetObjectGroupId(g2s1.handle, id2);
+ iso->SetObjectGroupId(g2s2.handle, id2);
+ iso->SetReferenceFromGroup(id2, g2c1.handle);
}
// Do a single full GC, ensure incremental marking is stopped.
v8::internal::Heap* heap = reinterpret_cast<v8::internal::Isolate*>(
@@ -3486,22 +3694,22 @@ THREADED_TEST(ApiObjectGroupsForSubtypes) {
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(&counter, &WeakPointerCallback);
+ root.handle.SetWeak(&root, &WeakPointerCallback);
// But make children strong roots---all the objects (except for children)
// should be collectable now.
- g1c1.ClearWeak();
- g2c1.ClearWeak();
+ g1c1.handle.ClearWeak();
+ g2c1.handle.ClearWeak();
// Groups are deleted, rebuild groups.
{
- UniqueId id1 = MakeUniqueId(g1s1);
- UniqueId id2 = MakeUniqueId(g2s2);
- iso->SetObjectGroupId(g1s1, id1);
- iso->SetObjectGroupId(g1s2, id1);
- iso->SetReference(g1s1, g1c1);
- iso->SetObjectGroupId(g2s1, id2);
- iso->SetObjectGroupId(g2s2, id2);
- iso->SetReferenceFromGroup(id2, g2c1);
+ UniqueId id1 = MakeUniqueId(g1s1.handle);
+ UniqueId id2 = MakeUniqueId(g2s2.handle);
+ iso->SetObjectGroupId(g1s1.handle, id1);
+ iso->SetObjectGroupId(g1s2.handle, id1);
+ iso->SetReference(g1s1.handle, g1c1.handle);
+ iso->SetObjectGroupId(g2s1.handle, id2);
+ iso->SetObjectGroupId(g2s2.handle, id2);
+ iso->SetReferenceFromGroup(id2, g2c1.handle);
}
heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
@@ -3510,8 +3718,8 @@ THREADED_TEST(ApiObjectGroupsForSubtypes) {
CHECK_EQ(5, counter.NumberOfWeakCalls());
// And now make children weak again and collect them.
- g1c1.MakeWeak(&counter, &WeakPointerCallback);
- g2c1.MakeWeak(&counter, &WeakPointerCallback);
+ g1c1.handle.SetWeak(&g1c1, &WeakPointerCallback);
+ g2c1.handle.SetWeak(&g2c1, &WeakPointerCallback);
heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(7, counter.NumberOfWeakCalls());
@@ -3525,68 +3733,69 @@ THREADED_TEST(ApiObjectGroupsCycle) {
WeakCallCounter counter(1234);
- Persistent<Value> g1s1;
- Persistent<Value> g1s2;
- Persistent<Value> g2s1;
- Persistent<Value> g2s2;
- Persistent<Value> g3s1;
- Persistent<Value> g3s2;
- Persistent<Value> g4s1;
- Persistent<Value> g4s2;
+ WeakCallCounterAndPersistent<Value> g1s1(&counter);
+ WeakCallCounterAndPersistent<Value> g1s2(&counter);
+ WeakCallCounterAndPersistent<Value> g2s1(&counter);
+ WeakCallCounterAndPersistent<Value> g2s2(&counter);
+ WeakCallCounterAndPersistent<Value> g3s1(&counter);
+ WeakCallCounterAndPersistent<Value> g3s2(&counter);
+ WeakCallCounterAndPersistent<Value> g4s1(&counter);
+ WeakCallCounterAndPersistent<Value> g4s2(&counter);
{
HandleScope scope(iso);
- g1s1.Reset(iso, Object::New());
- g1s2.Reset(iso, Object::New());
- g1s1.MakeWeak(&counter, &WeakPointerCallback);
- g1s2.MakeWeak(&counter, &WeakPointerCallback);
- CHECK(g1s1.IsWeak());
- CHECK(g1s2.IsWeak());
-
- g2s1.Reset(iso, Object::New());
- g2s2.Reset(iso, Object::New());
- g2s1.MakeWeak(&counter, &WeakPointerCallback);
- g2s2.MakeWeak(&counter, &WeakPointerCallback);
- CHECK(g2s1.IsWeak());
- CHECK(g2s2.IsWeak());
-
- g3s1.Reset(iso, Object::New());
- g3s2.Reset(iso, Object::New());
- g3s1.MakeWeak(&counter, &WeakPointerCallback);
- g3s2.MakeWeak(&counter, &WeakPointerCallback);
- CHECK(g3s1.IsWeak());
- CHECK(g3s2.IsWeak());
-
- g4s1.Reset(iso, Object::New());
- g4s2.Reset(iso, Object::New());
- g4s1.MakeWeak(&counter, &WeakPointerCallback);
- g4s2.MakeWeak(&counter, &WeakPointerCallback);
- CHECK(g4s1.IsWeak());
- CHECK(g4s2.IsWeak());
- }
-
- Persistent<Value> root(iso, g1s1); // make a root.
+ g1s1.handle.Reset(iso, Object::New(iso));
+ g1s2.handle.Reset(iso, Object::New(iso));
+ g1s1.handle.SetWeak(&g1s1, &WeakPointerCallback);
+ g1s2.handle.SetWeak(&g1s2, &WeakPointerCallback);
+ CHECK(g1s1.handle.IsWeak());
+ CHECK(g1s2.handle.IsWeak());
+
+ g2s1.handle.Reset(iso, Object::New(iso));
+ g2s2.handle.Reset(iso, Object::New(iso));
+ g2s1.handle.SetWeak(&g2s1, &WeakPointerCallback);
+ g2s2.handle.SetWeak(&g2s2, &WeakPointerCallback);
+ CHECK(g2s1.handle.IsWeak());
+ CHECK(g2s2.handle.IsWeak());
+
+ g3s1.handle.Reset(iso, Object::New(iso));
+ g3s2.handle.Reset(iso, Object::New(iso));
+ g3s1.handle.SetWeak(&g3s1, &WeakPointerCallback);
+ g3s2.handle.SetWeak(&g3s2, &WeakPointerCallback);
+ CHECK(g3s1.handle.IsWeak());
+ CHECK(g3s2.handle.IsWeak());
+
+ g4s1.handle.Reset(iso, Object::New(iso));
+ g4s2.handle.Reset(iso, Object::New(iso));
+ g4s1.handle.SetWeak(&g4s1, &WeakPointerCallback);
+ g4s2.handle.SetWeak(&g4s2, &WeakPointerCallback);
+ CHECK(g4s1.handle.IsWeak());
+ CHECK(g4s2.handle.IsWeak());
+ }
+
+ WeakCallCounterAndPersistent<Value> root(&counter);
+ root.handle.Reset(iso, g1s1.handle); // make a root.
// Connect groups. We're building the following cycle:
// G1: { g1s1, g2s1 }, g1s1 implicitly references g2s1, ditto for other
// groups.
{
- UniqueId id1 = MakeUniqueId(g1s1);
- UniqueId id2 = MakeUniqueId(g2s1);
- UniqueId id3 = MakeUniqueId(g3s1);
- UniqueId id4 = MakeUniqueId(g4s1);
- iso->SetObjectGroupId(g1s1, id1);
- iso->SetObjectGroupId(g1s2, id1);
- iso->SetReferenceFromGroup(id1, g2s1);
- iso->SetObjectGroupId(g2s1, id2);
- iso->SetObjectGroupId(g2s2, id2);
- iso->SetReferenceFromGroup(id2, g3s1);
- iso->SetObjectGroupId(g3s1, id3);
- iso->SetObjectGroupId(g3s2, id3);
- iso->SetReferenceFromGroup(id3, g4s1);
- iso->SetObjectGroupId(g4s1, id4);
- iso->SetObjectGroupId(g4s2, id4);
- iso->SetReferenceFromGroup(id4, g1s1);
+ UniqueId id1 = MakeUniqueId(g1s1.handle);
+ UniqueId id2 = MakeUniqueId(g2s1.handle);
+ UniqueId id3 = MakeUniqueId(g3s1.handle);
+ UniqueId id4 = MakeUniqueId(g4s1.handle);
+ iso->SetObjectGroupId(g1s1.handle, id1);
+ iso->SetObjectGroupId(g1s2.handle, id1);
+ iso->SetReferenceFromGroup(id1, g2s1.handle);
+ iso->SetObjectGroupId(g2s1.handle, id2);
+ iso->SetObjectGroupId(g2s2.handle, id2);
+ iso->SetReferenceFromGroup(id2, g3s1.handle);
+ iso->SetObjectGroupId(g3s1.handle, id3);
+ iso->SetObjectGroupId(g3s2.handle, id3);
+ iso->SetReferenceFromGroup(id3, g4s1.handle);
+ iso->SetObjectGroupId(g4s1.handle, id4);
+ iso->SetObjectGroupId(g4s2.handle, id4);
+ iso->SetReferenceFromGroup(id4, g1s1.handle);
}
// Do a single full GC
v8::internal::Heap* heap = reinterpret_cast<v8::internal::Isolate*>(
@@ -3597,26 +3806,26 @@ THREADED_TEST(ApiObjectGroupsCycle) {
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(&counter, &WeakPointerCallback);
+ root.handle.SetWeak(&root, &WeakPointerCallback);
// Groups are deleted, rebuild groups.
{
- UniqueId id1 = MakeUniqueId(g1s1);
- UniqueId id2 = MakeUniqueId(g2s1);
- UniqueId id3 = MakeUniqueId(g3s1);
- UniqueId id4 = MakeUniqueId(g4s1);
- iso->SetObjectGroupId(g1s1, id1);
- iso->SetObjectGroupId(g1s2, id1);
- iso->SetReferenceFromGroup(id1, g2s1);
- iso->SetObjectGroupId(g2s1, id2);
- iso->SetObjectGroupId(g2s2, id2);
- iso->SetReferenceFromGroup(id2, g3s1);
- iso->SetObjectGroupId(g3s1, id3);
- iso->SetObjectGroupId(g3s2, id3);
- iso->SetReferenceFromGroup(id3, g4s1);
- iso->SetObjectGroupId(g4s1, id4);
- iso->SetObjectGroupId(g4s2, id4);
- iso->SetReferenceFromGroup(id4, g1s1);
+ UniqueId id1 = MakeUniqueId(g1s1.handle);
+ UniqueId id2 = MakeUniqueId(g2s1.handle);
+ UniqueId id3 = MakeUniqueId(g3s1.handle);
+ UniqueId id4 = MakeUniqueId(g4s1.handle);
+ iso->SetObjectGroupId(g1s1.handle, id1);
+ iso->SetObjectGroupId(g1s2.handle, id1);
+ iso->SetReferenceFromGroup(id1, g2s1.handle);
+ iso->SetObjectGroupId(g2s1.handle, id2);
+ iso->SetObjectGroupId(g2s2.handle, id2);
+ iso->SetReferenceFromGroup(id2, g3s1.handle);
+ iso->SetObjectGroupId(g3s1.handle, id3);
+ iso->SetObjectGroupId(g3s2.handle, id3);
+ iso->SetReferenceFromGroup(id3, g4s1.handle);
+ iso->SetObjectGroupId(g4s1.handle, id4);
+ iso->SetObjectGroupId(g4s2.handle, id4);
+ iso->SetReferenceFromGroup(id4, g1s1.handle);
}
heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
@@ -3637,58 +3846,59 @@ TEST(ApiObjectGroupsCycleForScavenger) {
WeakCallCounter counter(1234);
- Persistent<Value> g1s1;
- Persistent<Value> g1s2;
- Persistent<Value> g2s1;
- Persistent<Value> g2s2;
- Persistent<Value> g3s1;
- Persistent<Value> g3s2;
+ WeakCallCounterAndPersistent<Value> g1s1(&counter);
+ WeakCallCounterAndPersistent<Value> g1s2(&counter);
+ WeakCallCounterAndPersistent<Value> g2s1(&counter);
+ WeakCallCounterAndPersistent<Value> g2s2(&counter);
+ WeakCallCounterAndPersistent<Value> g3s1(&counter);
+ WeakCallCounterAndPersistent<Value> g3s2(&counter);
{
HandleScope scope(iso);
- g1s1.Reset(iso, Object::New());
- g1s2.Reset(iso, Object::New());
- g1s1.MakeWeak(&counter, &WeakPointerCallback);
- g1s2.MakeWeak(&counter, &WeakPointerCallback);
+ g1s1.handle.Reset(iso, Object::New(iso));
+ g1s2.handle.Reset(iso, Object::New(iso));
+ g1s1.handle.SetWeak(&g1s1, &WeakPointerCallback);
+ g1s2.handle.SetWeak(&g1s2, &WeakPointerCallback);
- g2s1.Reset(iso, Object::New());
- g2s2.Reset(iso, Object::New());
- g2s1.MakeWeak(&counter, &WeakPointerCallback);
- g2s2.MakeWeak(&counter, &WeakPointerCallback);
+ g2s1.handle.Reset(iso, Object::New(iso));
+ g2s2.handle.Reset(iso, Object::New(iso));
+ g2s1.handle.SetWeak(&g2s1, &WeakPointerCallback);
+ g2s2.handle.SetWeak(&g2s2, &WeakPointerCallback);
- g3s1.Reset(iso, Object::New());
- g3s2.Reset(iso, Object::New());
- g3s1.MakeWeak(&counter, &WeakPointerCallback);
- g3s2.MakeWeak(&counter, &WeakPointerCallback);
+ g3s1.handle.Reset(iso, Object::New(iso));
+ g3s2.handle.Reset(iso, Object::New(iso));
+ g3s1.handle.SetWeak(&g3s1, &WeakPointerCallback);
+ g3s2.handle.SetWeak(&g3s2, &WeakPointerCallback);
}
// Make a root.
- Persistent<Value> root(iso, g1s1);
- root.MarkPartiallyDependent();
+ WeakCallCounterAndPersistent<Value> root(&counter);
+ root.handle.Reset(iso, g1s1.handle);
+ root.handle.MarkPartiallyDependent();
// Connect groups. We're building the following cycle:
// G1: { g1s1, g2s1 }, g1s1 implicitly references g2s1, ditto for other
// groups.
{
HandleScope handle_scope(iso);
- g1s1.MarkPartiallyDependent();
- g1s2.MarkPartiallyDependent();
- g2s1.MarkPartiallyDependent();
- g2s2.MarkPartiallyDependent();
- g3s1.MarkPartiallyDependent();
- g3s2.MarkPartiallyDependent();
- iso->SetObjectGroupId(g1s1, UniqueId(1));
- iso->SetObjectGroupId(g1s2, UniqueId(1));
- Local<Object>::New(iso, g1s1.As<Object>())->Set(
- v8_str("x"), Local<Value>::New(iso, g2s1));
- iso->SetObjectGroupId(g2s1, UniqueId(2));
- iso->SetObjectGroupId(g2s2, UniqueId(2));
- Local<Object>::New(iso, g2s1.As<Object>())->Set(
- v8_str("x"), Local<Value>::New(iso, g3s1));
- iso->SetObjectGroupId(g3s1, UniqueId(3));
- iso->SetObjectGroupId(g3s2, UniqueId(3));
- Local<Object>::New(iso, g3s1.As<Object>())->Set(
- v8_str("x"), Local<Value>::New(iso, g1s1));
+ g1s1.handle.MarkPartiallyDependent();
+ g1s2.handle.MarkPartiallyDependent();
+ g2s1.handle.MarkPartiallyDependent();
+ g2s2.handle.MarkPartiallyDependent();
+ g3s1.handle.MarkPartiallyDependent();
+ g3s2.handle.MarkPartiallyDependent();
+ iso->SetObjectGroupId(g1s1.handle, UniqueId(1));
+ iso->SetObjectGroupId(g1s2.handle, UniqueId(1));
+ Local<Object>::New(iso, g1s1.handle.As<Object>())->Set(
+ v8_str("x"), Local<Value>::New(iso, g2s1.handle));
+ iso->SetObjectGroupId(g2s1.handle, UniqueId(2));
+ iso->SetObjectGroupId(g2s2.handle, UniqueId(2));
+ Local<Object>::New(iso, g2s1.handle.As<Object>())->Set(
+ v8_str("x"), Local<Value>::New(iso, g3s1.handle));
+ iso->SetObjectGroupId(g3s1.handle, UniqueId(3));
+ iso->SetObjectGroupId(g3s2.handle, UniqueId(3));
+ Local<Object>::New(iso, g3s1.handle.As<Object>())->Set(
+ v8_str("x"), Local<Value>::New(iso, g1s1.handle));
}
v8::internal::Heap* heap = reinterpret_cast<v8::internal::Isolate*>(
@@ -3699,30 +3909,30 @@ TEST(ApiObjectGroupsCycleForScavenger) {
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(&counter, &WeakPointerCallback);
- root.MarkPartiallyDependent();
+ root.handle.SetWeak(&root, &WeakPointerCallback);
+ root.handle.MarkPartiallyDependent();
// Groups are deleted, rebuild groups.
{
HandleScope handle_scope(iso);
- g1s1.MarkPartiallyDependent();
- g1s2.MarkPartiallyDependent();
- g2s1.MarkPartiallyDependent();
- g2s2.MarkPartiallyDependent();
- g3s1.MarkPartiallyDependent();
- g3s2.MarkPartiallyDependent();
- iso->SetObjectGroupId(g1s1, UniqueId(1));
- iso->SetObjectGroupId(g1s2, UniqueId(1));
- Local<Object>::New(iso, g1s1.As<Object>())->Set(
- v8_str("x"), Local<Value>::New(iso, g2s1));
- iso->SetObjectGroupId(g2s1, UniqueId(2));
- iso->SetObjectGroupId(g2s2, UniqueId(2));
- Local<Object>::New(iso, g2s1.As<Object>())->Set(
- v8_str("x"), Local<Value>::New(iso, g3s1));
- iso->SetObjectGroupId(g3s1, UniqueId(3));
- iso->SetObjectGroupId(g3s2, UniqueId(3));
- Local<Object>::New(iso, g3s1.As<Object>())->Set(
- v8_str("x"), Local<Value>::New(iso, g1s1));
+ g1s1.handle.MarkPartiallyDependent();
+ g1s2.handle.MarkPartiallyDependent();
+ g2s1.handle.MarkPartiallyDependent();
+ g2s2.handle.MarkPartiallyDependent();
+ g3s1.handle.MarkPartiallyDependent();
+ g3s2.handle.MarkPartiallyDependent();
+ iso->SetObjectGroupId(g1s1.handle, UniqueId(1));
+ iso->SetObjectGroupId(g1s2.handle, UniqueId(1));
+ Local<Object>::New(iso, g1s1.handle.As<Object>())->Set(
+ v8_str("x"), Local<Value>::New(iso, g2s1.handle));
+ iso->SetObjectGroupId(g2s1.handle, UniqueId(2));
+ iso->SetObjectGroupId(g2s2.handle, UniqueId(2));
+ Local<Object>::New(iso, g2s1.handle.As<Object>())->Set(
+ v8_str("x"), Local<Value>::New(iso, g3s1.handle));
+ iso->SetObjectGroupId(g3s1.handle, UniqueId(3));
+ iso->SetObjectGroupId(g3s2.handle, UniqueId(3));
+ Local<Object>::New(iso, g3s1.handle.As<Object>())->Set(
+ v8_str("x"), Local<Value>::New(iso, g1s1.handle));
}
heap->CollectGarbage(i::NEW_SPACE);
@@ -3774,8 +3984,8 @@ THREADED_TEST(MessageHandler0) {
message_received = false;
v8::HandleScope scope(CcTest::isolate());
CHECK(!message_received);
- v8::V8::AddMessageListener(check_message_0, v8_num(5.76));
LocalContext context;
+ v8::V8::AddMessageListener(check_message_0, v8_num(5.76));
v8::ScriptOrigin origin =
v8::ScriptOrigin(v8_str("6.75"));
v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"),
@@ -3856,8 +4066,8 @@ TEST(MessageHandler3) {
LocalContext context;
v8::ScriptOrigin origin =
v8::ScriptOrigin(v8_str("6.75"),
- v8::Integer::New(1, isolate),
- v8::Integer::New(2, isolate),
+ v8::Integer::New(isolate, 1),
+ v8::Integer::New(isolate, 2),
v8::True(isolate));
v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"),
&origin);
@@ -3885,8 +4095,8 @@ TEST(MessageHandler4) {
LocalContext context;
v8::ScriptOrigin origin =
v8::ScriptOrigin(v8_str("6.75"),
- v8::Integer::New(1, isolate),
- v8::Integer::New(2, isolate),
+ v8::Integer::New(isolate, 1),
+ v8::Integer::New(isolate, 2),
v8::False(isolate));
v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"),
&origin);
@@ -3922,8 +4132,8 @@ TEST(MessageHandler5) {
LocalContext context;
v8::ScriptOrigin origin =
v8::ScriptOrigin(v8_str("6.75"),
- v8::Integer::New(1, isolate),
- v8::Integer::New(2, isolate),
+ v8::Integer::New(isolate, 1),
+ v8::Integer::New(isolate, 2),
v8::True(isolate));
v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"),
&origin);
@@ -3936,8 +4146,8 @@ TEST(MessageHandler5) {
v8::V8::AddMessageListener(check_message_5b);
origin =
v8::ScriptOrigin(v8_str("6.75"),
- v8::Integer::New(1, isolate),
- v8::Integer::New(2, isolate),
+ v8::Integer::New(isolate, 1),
+ v8::Integer::New(isolate, 2),
v8::False(isolate));
script = Script::Compile(v8_str("throw 'error'"),
&origin);
@@ -3950,10 +4160,11 @@ TEST(MessageHandler5) {
THREADED_TEST(GetSetProperty) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
context->Global()->Set(v8_str("foo"), v8_num(14));
context->Global()->Set(v8_str("12"), v8_num(92));
- context->Global()->Set(v8::Integer::New(16), v8_num(32));
+ context->Global()->Set(v8::Integer::New(isolate, 16), v8_num(32));
context->Global()->Set(v8_num(13), v8_num(56));
Local<Value> foo = Script::Compile(v8_str("this.foo"))->Run();
CHECK_EQ(14, foo->Int32Value());
@@ -3963,13 +4174,16 @@ THREADED_TEST(GetSetProperty) {
CHECK_EQ(32, sixteen->Int32Value());
Local<Value> thirteen = Script::Compile(v8_str("this[13]"))->Run();
CHECK_EQ(56, thirteen->Int32Value());
- CHECK_EQ(92, context->Global()->Get(v8::Integer::New(12))->Int32Value());
+ CHECK_EQ(92,
+ context->Global()->Get(v8::Integer::New(isolate, 12))->Int32Value());
CHECK_EQ(92, context->Global()->Get(v8_str("12"))->Int32Value());
CHECK_EQ(92, context->Global()->Get(v8_num(12))->Int32Value());
- CHECK_EQ(32, context->Global()->Get(v8::Integer::New(16))->Int32Value());
+ CHECK_EQ(32,
+ context->Global()->Get(v8::Integer::New(isolate, 16))->Int32Value());
CHECK_EQ(32, context->Global()->Get(v8_str("16"))->Int32Value());
CHECK_EQ(32, context->Global()->Get(v8_num(16))->Int32Value());
- CHECK_EQ(56, context->Global()->Get(v8::Integer::New(13))->Int32Value());
+ CHECK_EQ(56,
+ context->Global()->Get(v8::Integer::New(isolate, 13))->Int32Value());
CHECK_EQ(56, context->Global()->Get(v8_str("13"))->Int32Value());
CHECK_EQ(56, context->Global()->Get(v8_num(13))->Int32Value());
}
@@ -4022,7 +4236,7 @@ THREADED_TEST(PropertyAttributes) {
THREADED_TEST(Array) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Local<v8::Array> array = v8::Array::New();
+ Local<v8::Array> array = v8::Array::New(context->GetIsolate());
CHECK_EQ(0, array->Length());
CHECK(array->Get(0)->IsUndefined());
CHECK(!array->Has(0));
@@ -4040,27 +4254,28 @@ THREADED_TEST(Array) {
CHECK_EQ(1, arr->Get(0)->Int32Value());
CHECK_EQ(2, arr->Get(1)->Int32Value());
CHECK_EQ(3, arr->Get(2)->Int32Value());
- array = v8::Array::New(27);
+ array = v8::Array::New(context->GetIsolate(), 27);
CHECK_EQ(27, array->Length());
- array = v8::Array::New(-27);
+ array = v8::Array::New(context->GetIsolate(), -27);
CHECK_EQ(0, array->Length());
}
void HandleF(const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::HandleScope scope(args.GetIsolate());
+ v8::EscapableHandleScope scope(args.GetIsolate());
ApiTestFuzzer::Fuzz();
- Local<v8::Array> result = v8::Array::New(args.Length());
+ Local<v8::Array> result = v8::Array::New(args.GetIsolate(), args.Length());
for (int i = 0; i < args.Length(); i++)
result->Set(i, args[i]);
- args.GetReturnValue().Set(scope.Close(result));
+ args.GetReturnValue().Set(scope.Escape(result));
}
THREADED_TEST(Vector) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> global = ObjectTemplate::New();
- global->Set(v8_str("f"), v8::FunctionTemplate::New(HandleF));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> global = ObjectTemplate::New(isolate);
+ global->Set(v8_str("f"), v8::FunctionTemplate::New(isolate, HandleF));
LocalContext context(0, global);
const char* fun = "f()";
@@ -4128,23 +4343,23 @@ THREADED_TEST(FunctionCall) {
v8::Handle<Value> args1[] = { v8_num(1.1) };
Local<v8::Array> a1 = Local<v8::Array>::Cast(Foo->Call(Foo, 1, args1));
CHECK_EQ(1, a1->Length());
- CHECK_EQ(1.1, a1->Get(v8::Integer::New(0))->NumberValue());
+ CHECK_EQ(1.1, a1->Get(v8::Integer::New(isolate, 0))->NumberValue());
v8::Handle<Value> args2[] = { v8_num(2.2),
v8_num(3.3) };
Local<v8::Array> a2 = Local<v8::Array>::Cast(Foo->Call(Foo, 2, args2));
CHECK_EQ(2, a2->Length());
- CHECK_EQ(2.2, a2->Get(v8::Integer::New(0))->NumberValue());
- CHECK_EQ(3.3, a2->Get(v8::Integer::New(1))->NumberValue());
+ CHECK_EQ(2.2, a2->Get(v8::Integer::New(isolate, 0))->NumberValue());
+ CHECK_EQ(3.3, a2->Get(v8::Integer::New(isolate, 1))->NumberValue());
v8::Handle<Value> args3[] = { v8_num(4.4),
v8_num(5.5),
v8_num(6.6) };
Local<v8::Array> a3 = Local<v8::Array>::Cast(Foo->Call(Foo, 3, args3));
CHECK_EQ(3, a3->Length());
- CHECK_EQ(4.4, a3->Get(v8::Integer::New(0))->NumberValue());
- CHECK_EQ(5.5, a3->Get(v8::Integer::New(1))->NumberValue());
- CHECK_EQ(6.6, a3->Get(v8::Integer::New(2))->NumberValue());
+ CHECK_EQ(4.4, a3->Get(v8::Integer::New(isolate, 0))->NumberValue());
+ CHECK_EQ(5.5, a3->Get(v8::Integer::New(isolate, 1))->NumberValue());
+ CHECK_EQ(6.6, a3->Get(v8::Integer::New(isolate, 2))->NumberValue());
v8::Handle<Value> args4[] = { v8_num(7.7),
v8_num(8.8),
@@ -4152,10 +4367,10 @@ THREADED_TEST(FunctionCall) {
v8_num(10.11) };
Local<v8::Array> a4 = Local<v8::Array>::Cast(Foo->Call(Foo, 4, args4));
CHECK_EQ(4, a4->Length());
- CHECK_EQ(7.7, a4->Get(v8::Integer::New(0))->NumberValue());
- CHECK_EQ(8.8, a4->Get(v8::Integer::New(1))->NumberValue());
- CHECK_EQ(9.9, a4->Get(v8::Integer::New(2))->NumberValue());
- CHECK_EQ(10.11, a4->Get(v8::Integer::New(3))->NumberValue());
+ CHECK_EQ(7.7, a4->Get(v8::Integer::New(isolate, 0))->NumberValue());
+ CHECK_EQ(8.8, a4->Get(v8::Integer::New(isolate, 1))->NumberValue());
+ CHECK_EQ(9.9, a4->Get(v8::Integer::New(isolate, 2))->NumberValue());
+ CHECK_EQ(10.11, a4->Get(v8::Integer::New(isolate, 3))->NumberValue());
Local<v8::Value> r1 = ReturnThisSloppy->Call(v8::Undefined(isolate), 0, NULL);
CHECK(r1->StrictEquals(context->Global()));
@@ -4198,14 +4413,14 @@ TEST(OutOfMemory) {
v8::ResourceConstraints constraints;
constraints.set_max_young_space_size(256 * K);
constraints.set_max_old_space_size(5 * K * K);
- v8::SetResourceConstraints(&constraints);
+ v8::SetResourceConstraints(CcTest::isolate(), &constraints);
// Execute a script that causes out of memory.
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
v8::V8::IgnoreOutOfMemoryException();
- Local<Script> script =
- Script::Compile(String::New(js_code_causing_out_of_memory));
+ Local<Script> script = Script::Compile(String::NewFromUtf8(
+ context->GetIsolate(), js_code_causing_out_of_memory));
Local<Value> result = script->Run();
// Check for out of memory state.
@@ -4219,8 +4434,8 @@ void ProvokeOutOfMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Local<Script> script =
- Script::Compile(String::New(js_code_causing_out_of_memory));
+ Local<Script> script = Script::Compile(String::NewFromUtf8(
+ context->GetIsolate(), js_code_causing_out_of_memory));
Local<Value> result = script->Run();
// Check for out of memory state.
@@ -4239,12 +4454,13 @@ TEST(OutOfMemoryNested) {
v8::ResourceConstraints constraints;
constraints.set_max_young_space_size(256 * K);
constraints.set_max_old_space_size(5 * K * K);
- v8::SetResourceConstraints(&constraints);
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::SetResourceConstraints(isolate, &constraints);
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ProvokeOutOfMemory"),
- v8::FunctionTemplate::New(ProvokeOutOfMemory));
+ v8::FunctionTemplate::New(isolate, ProvokeOutOfMemory));
LocalContext context(0, templ);
v8::V8::IgnoreOutOfMemoryException();
Local<Value> result = CompileRun(
@@ -4260,6 +4476,11 @@ TEST(OutOfMemoryNested) {
}
+void OOMCallback(const char* location, const char* message) {
+ exit(0);
+}
+
+
TEST(HugeConsStringOutOfMemory) {
// It's not possible to read a snapshot into a heap with different dimensions.
if (i::Snapshot::IsEnabled()) return;
@@ -4268,28 +4489,27 @@ TEST(HugeConsStringOutOfMemory) {
v8::ResourceConstraints constraints;
constraints.set_max_young_space_size(256 * K);
constraints.set_max_old_space_size(4 * K * K);
- v8::SetResourceConstraints(&constraints);
+ v8::SetResourceConstraints(CcTest::isolate(), &constraints);
// Execute a script that causes out of memory.
- v8::V8::IgnoreOutOfMemoryException();
+ v8::V8::SetFatalErrorHandler(OOMCallback);
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
// Build huge string. This should fail with out of memory exception.
- Local<Value> result = CompileRun(
+ CompileRun(
"var str = Array.prototype.join.call({length: 513}, \"A\").toUpperCase();"
"for (var i = 0; i < 22; i++) { str = str + str; }");
- // Check for out of memory state.
- CHECK(result.IsEmpty());
- CHECK(context->HasOutOfMemoryException());
+ CHECK(false); // Should not return.
}
THREADED_TEST(ConstructCall) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
CompileRun(
"function Foo() {"
" var result = [];"
@@ -4308,23 +4528,23 @@ THREADED_TEST(ConstructCall) {
v8::Handle<Value> args1[] = { v8_num(1.1) };
Local<v8::Array> a1 = Local<v8::Array>::Cast(Foo->NewInstance(1, args1));
CHECK_EQ(1, a1->Length());
- CHECK_EQ(1.1, a1->Get(v8::Integer::New(0))->NumberValue());
+ CHECK_EQ(1.1, a1->Get(v8::Integer::New(isolate, 0))->NumberValue());
v8::Handle<Value> args2[] = { v8_num(2.2),
v8_num(3.3) };
Local<v8::Array> a2 = Local<v8::Array>::Cast(Foo->NewInstance(2, args2));
CHECK_EQ(2, a2->Length());
- CHECK_EQ(2.2, a2->Get(v8::Integer::New(0))->NumberValue());
- CHECK_EQ(3.3, a2->Get(v8::Integer::New(1))->NumberValue());
+ CHECK_EQ(2.2, a2->Get(v8::Integer::New(isolate, 0))->NumberValue());
+ CHECK_EQ(3.3, a2->Get(v8::Integer::New(isolate, 1))->NumberValue());
v8::Handle<Value> args3[] = { v8_num(4.4),
v8_num(5.5),
v8_num(6.6) };
Local<v8::Array> a3 = Local<v8::Array>::Cast(Foo->NewInstance(3, args3));
CHECK_EQ(3, a3->Length());
- CHECK_EQ(4.4, a3->Get(v8::Integer::New(0))->NumberValue());
- CHECK_EQ(5.5, a3->Get(v8::Integer::New(1))->NumberValue());
- CHECK_EQ(6.6, a3->Get(v8::Integer::New(2))->NumberValue());
+ CHECK_EQ(4.4, a3->Get(v8::Integer::New(isolate, 0))->NumberValue());
+ CHECK_EQ(5.5, a3->Get(v8::Integer::New(isolate, 1))->NumberValue());
+ CHECK_EQ(6.6, a3->Get(v8::Integer::New(isolate, 2))->NumberValue());
v8::Handle<Value> args4[] = { v8_num(7.7),
v8_num(8.8),
@@ -4332,10 +4552,10 @@ THREADED_TEST(ConstructCall) {
v8_num(10.11) };
Local<v8::Array> a4 = Local<v8::Array>::Cast(Foo->NewInstance(4, args4));
CHECK_EQ(4, a4->Length());
- CHECK_EQ(7.7, a4->Get(v8::Integer::New(0))->NumberValue());
- CHECK_EQ(8.8, a4->Get(v8::Integer::New(1))->NumberValue());
- CHECK_EQ(9.9, a4->Get(v8::Integer::New(2))->NumberValue());
- CHECK_EQ(10.11, a4->Get(v8::Integer::New(3))->NumberValue());
+ CHECK_EQ(7.7, a4->Get(v8::Integer::New(isolate, 0))->NumberValue());
+ CHECK_EQ(8.8, a4->Get(v8::Integer::New(isolate, 1))->NumberValue());
+ CHECK_EQ(9.9, a4->Get(v8::Integer::New(isolate, 2))->NumberValue());
+ CHECK_EQ(10.11, a4->Get(v8::Integer::New(isolate, 3))->NumberValue());
}
@@ -4521,10 +4741,11 @@ void CCatcher(const v8::FunctionCallbackInfo<v8::Value>& args) {
THREADED_TEST(APICatch) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ThrowFromC"),
- v8::FunctionTemplate::New(ThrowFromC));
+ v8::FunctionTemplate::New(isolate, ThrowFromC));
LocalContext context(0, templ);
CompileRun(
"var thrown = false;"
@@ -4539,10 +4760,11 @@ THREADED_TEST(APICatch) {
THREADED_TEST(APIThrowTryCatch) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ThrowFromC"),
- v8::FunctionTemplate::New(ThrowFromC));
+ v8::FunctionTemplate::New(isolate, ThrowFromC));
LocalContext context(0, templ);
v8::TryCatch try_catch;
CompileRun("ThrowFromC();");
@@ -4558,10 +4780,11 @@ THREADED_TEST(APIThrowTryCatch) {
// JS stack. This test therefore fails on the simulator. The test is
// not threaded to allow the threading tests to run on the simulator.
TEST(TryCatchInTryFinally) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("CCatcher"),
- v8::FunctionTemplate::New(CCatcher));
+ v8::FunctionTemplate::New(isolate, CCatcher));
LocalContext context(0, templ);
Local<Value> result = CompileRun("try {"
" try {"
@@ -4592,10 +4815,11 @@ static void Fail(const v8::FunctionCallbackInfo<v8::Value>& args) {
// formatting. However, they are invoked when performing normal error
// string conversions.
TEST(APIThrowMessageOverwrittenToString) {
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
v8::V8::AddMessageListener(check_reference_error_message);
- Local<ObjectTemplate> templ = ObjectTemplate::New();
- templ->Set(v8_str("fail"), v8::FunctionTemplate::New(Fail));
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->Set(v8_str("fail"), v8::FunctionTemplate::New(isolate, Fail));
LocalContext context(NULL, templ);
CompileRun("asdf;");
CompileRun("var limit = {};"
@@ -4716,11 +4940,12 @@ static void receive_message(v8::Handle<v8::Message> message,
TEST(APIThrowMessage) {
message_received = false;
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
v8::V8::AddMessageListener(receive_message);
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ThrowFromC"),
- v8::FunctionTemplate::New(ThrowFromC));
+ v8::FunctionTemplate::New(isolate, ThrowFromC));
LocalContext context(0, templ);
CompileRun("ThrowFromC();");
CHECK(message_received);
@@ -4730,11 +4955,12 @@ TEST(APIThrowMessage) {
TEST(APIThrowMessageAndVerboseTryCatch) {
message_received = false;
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
v8::V8::AddMessageListener(receive_message);
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ThrowFromC"),
- v8::FunctionTemplate::New(ThrowFromC));
+ v8::FunctionTemplate::New(isolate, ThrowFromC));
LocalContext context(0, templ);
v8::TryCatch try_catch;
try_catch.SetVerbose(true);
@@ -4762,10 +4988,11 @@ TEST(APIStackOverflowAndVerboseTryCatch) {
THREADED_TEST(ExternalScriptException) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ThrowFromC"),
- v8::FunctionTemplate::New(ThrowFromC));
+ v8::FunctionTemplate::New(isolate, ThrowFromC));
LocalContext context(0, templ);
v8::TryCatch try_catch;
@@ -4867,11 +5094,12 @@ THREADED_TEST(EvalInTryFinally) {
// JS stack. This test therefore fails on the simulator. The test is
// not threaded to allow the threading tests to run on the simulator.
TEST(ExceptionOrder) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
- templ->Set(v8_str("check"), v8::FunctionTemplate::New(JSCheck));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->Set(v8_str("check"), v8::FunctionTemplate::New(isolate, JSCheck));
templ->Set(v8_str("CThrowCountDown"),
- v8::FunctionTemplate::New(CThrowCountDown));
+ v8::FunctionTemplate::New(isolate, CThrowCountDown));
LocalContext context(0, templ);
CompileRun(
"function JSThrowCountDown(count, jsInterval, cInterval, expected) {"
@@ -4931,9 +5159,10 @@ void ThrowValue(const v8::FunctionCallbackInfo<v8::Value>& args) {
THREADED_TEST(ThrowValues) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
- templ->Set(v8_str("Throw"), v8::FunctionTemplate::New(ThrowValue));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->Set(v8_str("Throw"), v8::FunctionTemplate::New(isolate, ThrowValue));
LocalContext context(0, templ);
v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun(
"function Run(obj) {"
@@ -4946,13 +5175,13 @@ THREADED_TEST(ThrowValues) {
"}"
"[Run('str'), Run(1), Run(0), Run(null), Run(void 0)];"));
CHECK_EQ(5, result->Length());
- CHECK(result->Get(v8::Integer::New(0))->IsString());
- CHECK(result->Get(v8::Integer::New(1))->IsNumber());
- CHECK_EQ(1, result->Get(v8::Integer::New(1))->Int32Value());
- CHECK(result->Get(v8::Integer::New(2))->IsNumber());
- CHECK_EQ(0, result->Get(v8::Integer::New(2))->Int32Value());
- CHECK(result->Get(v8::Integer::New(3))->IsNull());
- CHECK(result->Get(v8::Integer::New(4))->IsUndefined());
+ CHECK(result->Get(v8::Integer::New(isolate, 0))->IsString());
+ CHECK(result->Get(v8::Integer::New(isolate, 1))->IsNumber());
+ CHECK_EQ(1, result->Get(v8::Integer::New(isolate, 1))->Int32Value());
+ CHECK(result->Get(v8::Integer::New(isolate, 2))->IsNumber());
+ CHECK_EQ(0, result->Get(v8::Integer::New(isolate, 2))->Int32Value());
+ CHECK(result->Get(v8::Integer::New(isolate, 3))->IsNull());
+ CHECK(result->Get(v8::Integer::New(isolate, 4))->IsUndefined());
}
@@ -5000,10 +5229,11 @@ void WithTryCatch(const v8::FunctionCallbackInfo<v8::Value>& args) {
THREADED_TEST(TryCatchAndFinally) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
context->Global()->Set(
v8_str("native_with_try_catch"),
- v8::FunctionTemplate::New(WithTryCatch)->GetFunction());
+ v8::FunctionTemplate::New(isolate, WithTryCatch)->GetFunction());
v8::TryCatch try_catch;
CHECK(!try_catch.HasCaught());
CompileRun(
@@ -5069,12 +5299,13 @@ void TryCatchMixedNestingHelper(
// This exercises the ability of TryCatch.ReThrow() to restore the
// inner pending Message before throwing the exception again.
TEST(TryCatchMixedNesting) {
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
v8::V8::Initialize();
v8::TryCatch try_catch;
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("TryCatchMixedNestingHelper"),
- v8::FunctionTemplate::New(TryCatchMixedNestingHelper));
+ v8::FunctionTemplate::New(isolate, TryCatchMixedNestingHelper));
LocalContext context(0, templ);
CompileRunWithOrigin("TryCatchMixedNestingHelper();\n", "outer", 1, 1);
TryCatchMixedNestingCheck(&try_catch);
@@ -5107,10 +5338,10 @@ THREADED_TEST(Equality) {
CHECK(v8::False(isolate)->StrictEquals(v8::False(isolate)));
CHECK(!v8::False(isolate)->StrictEquals(v8::Undefined(isolate)));
- v8::Handle<v8::Object> obj = v8::Object::New();
+ v8::Handle<v8::Object> obj = v8::Object::New(isolate);
v8::Persistent<v8::Object> alias(isolate, obj);
CHECK(v8::Local<v8::Object>::New(isolate, alias)->StrictEquals(obj));
- alias.Dispose();
+ alias.Reset();
CHECK(v8_str("a")->SameValue(v8_str("a")));
CHECK(!v8_str("a")->SameValue(v8_str("b")));
@@ -5144,8 +5375,9 @@ static void GetXValue(Local<String> name,
THREADED_TEST(SimplePropertyRead) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
context->Global()->Set(v8_str("obj"), templ->NewInstance());
Local<Script> script = Script::Compile(v8_str("obj.x"));
@@ -5158,8 +5390,9 @@ THREADED_TEST(SimplePropertyRead) {
THREADED_TEST(DefinePropertyOnAPIAccessor) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
context->Global()->Set(v8_str("obj"), templ->NewInstance());
@@ -5205,8 +5438,9 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
@@ -5257,8 +5491,9 @@ static v8::Handle<v8::Object> GetGlobalProperty(LocalContext* context,
THREADED_TEST(DefineAPIAccessorOnObject) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
LocalContext context;
context->Global()->Set(v8_str("obj1"), templ->NewInstance());
@@ -5331,8 +5566,9 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
LocalContext context;
context->Global()->Set(v8_str("obj1"), templ->NewInstance());
@@ -5387,8 +5623,9 @@ static void Get239Value(Local<String> name,
THREADED_TEST(ElementAPIAccessor) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
LocalContext context;
context->Global()->Set(v8_str("obj1"), templ->NewInstance());
@@ -5425,8 +5662,9 @@ static void SetXValue(Local<String> name,
THREADED_TEST(SimplePropertyWrite) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), GetXValue, SetXValue, v8_str("donut"));
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
@@ -5435,15 +5673,15 @@ THREADED_TEST(SimplePropertyWrite) {
CHECK(xValue.IsEmpty());
script->Run();
CHECK_EQ(v8_num(4), Local<Value>::New(CcTest::isolate(), xValue));
- xValue.Dispose();
- xValue.Clear();
+ xValue.Reset();
}
}
THREADED_TEST(SetterOnly) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), NULL, SetXValue, v8_str("donut"));
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
@@ -5452,15 +5690,15 @@ THREADED_TEST(SetterOnly) {
CHECK(xValue.IsEmpty());
script->Run();
CHECK_EQ(v8_num(4), Local<Value>::New(CcTest::isolate(), xValue));
- xValue.Dispose();
- xValue.Clear();
+ xValue.Reset();
}
}
THREADED_TEST(NoAccessors) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"),
static_cast<v8::AccessorGetterCallback>(NULL),
NULL,
@@ -5483,8 +5721,9 @@ static void XPropertyGetter(Local<String> property,
THREADED_TEST(NamedInterceptorPropertyRead) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(XPropertyGetter);
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
@@ -5497,8 +5736,9 @@ THREADED_TEST(NamedInterceptorPropertyRead) {
THREADED_TEST(NamedInterceptorDictionaryIC) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(XPropertyGetter);
LocalContext context;
// Create an object with a named interceptor.
@@ -5532,7 +5772,7 @@ THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(XPropertyGetter);
// Create an object with a named interceptor.
v8::Local<v8::Object> object = templ->NewInstance();
@@ -5572,15 +5812,18 @@ static void SetXOnPrototypeGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
// Set x on the prototype object and do not handle the get request.
v8::Handle<v8::Value> proto = info.Holder()->GetPrototype();
- proto.As<v8::Object>()->Set(v8_str("x"), v8::Integer::New(23));
+ proto.As<v8::Object>()->Set(v8_str("x"),
+ v8::Integer::New(info.GetIsolate(), 23));
}
// This is a regression test for http://crbug.com/20104. Map
// transitions should not interfere with post interceptor lookup.
THREADED_TEST(NamedInterceptorMapTransitionRead) {
- v8::HandleScope scope(CcTest::isolate());
- Local<v8::FunctionTemplate> function_template = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::FunctionTemplate> function_template =
+ v8::FunctionTemplate::New(isolate);
Local<v8::ObjectTemplate> instance_template
= function_template->InstanceTemplate();
instance_template->SetNamedPropertyHandler(SetXOnPrototypeGetter);
@@ -5616,8 +5859,9 @@ static void IndexedPropertySetter(
THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(IndexedPropertyGetter,
IndexedPropertySetter);
LocalContext context;
@@ -5681,8 +5925,9 @@ void UnboxedDoubleIndexedPropertyEnumerator(
// Make sure that the the interceptor code in the runtime properly handles
// merging property name lists for double-array-backed arrays.
THREADED_TEST(IndexedInterceptorUnboxedDoubleWithIndexedAccessor) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(UnboxedDoubleIndexedPropertyGetter,
UnboxedDoubleIndexedPropertySetter,
0,
@@ -5737,8 +5982,9 @@ static void NonStrictIndexedPropertyGetter(
// Make sure that the the interceptor code in the runtime properly handles
// merging property name lists for non-string arguments arrays.
THREADED_TEST(IndexedInterceptorNonStrictArgsWithIndexedAccessor) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(NonStrictIndexedPropertyGetter,
0,
0,
@@ -5763,8 +6009,9 @@ static void IdentityIndexedPropertyGetter(
THREADED_TEST(IndexedInterceptorWithGetOwnPropertyDescriptor) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
LocalContext context;
@@ -5784,8 +6031,9 @@ THREADED_TEST(IndexedInterceptorWithGetOwnPropertyDescriptor) {
THREADED_TEST(IndexedInterceptorWithNoSetter) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
LocalContext context;
@@ -5807,8 +6055,9 @@ THREADED_TEST(IndexedInterceptorWithNoSetter) {
THREADED_TEST(IndexedInterceptorWithAccessorCheck) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
LocalContext context;
@@ -5832,8 +6081,9 @@ THREADED_TEST(IndexedInterceptorWithAccessorCheck) {
THREADED_TEST(IndexedInterceptorWithAccessorCheckSwitchedOn) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
LocalContext context;
@@ -5861,8 +6111,9 @@ THREADED_TEST(IndexedInterceptorWithAccessorCheckSwitchedOn) {
THREADED_TEST(IndexedInterceptorWithDifferentIndices) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
LocalContext context;
@@ -5884,8 +6135,9 @@ THREADED_TEST(IndexedInterceptorWithDifferentIndices) {
THREADED_TEST(IndexedInterceptorWithNegativeIndices) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
LocalContext context;
@@ -5923,8 +6175,9 @@ THREADED_TEST(IndexedInterceptorWithNegativeIndices) {
THREADED_TEST(IndexedInterceptorWithNotSmiLookup) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
LocalContext context;
@@ -5952,8 +6205,9 @@ THREADED_TEST(IndexedInterceptorWithNotSmiLookup) {
THREADED_TEST(IndexedInterceptorGoingMegamorphic) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
LocalContext context;
@@ -5982,8 +6236,9 @@ THREADED_TEST(IndexedInterceptorGoingMegamorphic) {
THREADED_TEST(IndexedInterceptorReceiverTurningSmi) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
LocalContext context;
@@ -6012,8 +6267,9 @@ THREADED_TEST(IndexedInterceptorReceiverTurningSmi) {
THREADED_TEST(IndexedInterceptorOnProto) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
LocalContext context;
@@ -6036,9 +6292,11 @@ THREADED_TEST(IndexedInterceptorOnProto) {
THREADED_TEST(MultiContexts) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<ObjectTemplate> templ = ObjectTemplate::New();
- templ->Set(v8_str("dummy"), v8::FunctionTemplate::New(DummyCallHandler));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->Set(v8_str("dummy"), v8::FunctionTemplate::New(isolate,
+ DummyCallHandler));
Local<String> password = v8_str("Password");
@@ -6125,7 +6383,8 @@ THREADED_TEST(UndetectableObject) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> desc =
+ v8::FunctionTemplate::New(env->GetIsolate());
desc->InstanceTemplate()->MarkAsUndetectable(); // undetectable
Local<v8::Object> obj = desc->GetFunction()->NewInstance();
@@ -6166,9 +6425,10 @@ THREADED_TEST(UndetectableObject) {
THREADED_TEST(VoidLiteral) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
desc->InstanceTemplate()->MarkAsUndetectable(); // undetectable
Local<v8::Object> obj = desc->GetFunction()->NewInstance();
@@ -6209,9 +6469,10 @@ THREADED_TEST(VoidLiteral) {
THREADED_TEST(ExtensibleOnUndetectable) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
- Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
desc->InstanceTemplate()->MarkAsUndetectable(); // undetectable
Local<v8::Object> obj = desc->GetFunction()->NewInstance();
@@ -6222,7 +6483,7 @@ THREADED_TEST(ExtensibleOnUndetectable) {
Local<Script> script = Script::Compile(source);
- CHECK_EQ(v8::Integer::New(42), script->Run());
+ CHECK_EQ(v8::Integer::New(isolate, 42), script->Run());
ExpectBoolean("Object.isExtensible(undetectable)", true);
@@ -6243,7 +6504,8 @@ THREADED_TEST(UndetectableString) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<String> obj = String::NewUndetectable("foo");
+ Local<String> obj = String::NewFromUtf8(env->GetIsolate(), "foo",
+ String::kUndetectableString);
env->Global()->Set(v8_str("undetectable"), obj);
ExpectString("undetectable", "foo");
@@ -6284,7 +6546,8 @@ TEST(UndetectableOptimized) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<String> obj = String::NewUndetectable("foo");
+ Local<String> obj = String::NewFromUtf8(env->GetIsolate(), "foo",
+ String::kUndetectableString);
env->Global()->Set(v8_str("undetectable"), obj);
env->Global()->Set(v8_str("detectable"), v8_str("bar"));
@@ -6314,18 +6577,21 @@ TEST(UndetectableOptimized) {
template <typename T> static void USE(T) { }
-// This test is not intended to be run, just type checked.
-static inline void PersistentHandles(v8::Isolate* isolate) {
- USE(PersistentHandles);
+// The point of this test is type checking. We run it only so compilers
+// don't complain about an unused function.
+TEST(PersistentHandles) {
+ LocalContext env;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
Local<String> str = v8_str("foo");
v8::Persistent<String> p_str(isolate, str);
- p_str.Dispose();
+ p_str.Reset();
Local<Script> scr = Script::Compile(v8_str(""));
v8::Persistent<Script> p_scr(isolate, scr);
- p_scr.Dispose();
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ p_scr.Reset();
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
v8::Persistent<ObjectTemplate> p_templ(isolate, templ);
- p_templ.Dispose();
+ p_templ.Reset();
}
@@ -6338,9 +6604,9 @@ static void HandleLogDelegator(
THREADED_TEST(GlobalObjectTemplate) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- Local<ObjectTemplate> global_template = ObjectTemplate::New();
+ Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
global_template->Set(v8_str("JSNI_Log"),
- v8::FunctionTemplate::New(HandleLogDelegator));
+ v8::FunctionTemplate::New(isolate, HandleLogDelegator));
v8::Local<Context> context = Context::New(isolate, 0, global_template);
Context::Scope context_scope(context);
Script::Compile(v8_str("JSNI_Log('LOG')"))->Run();
@@ -6353,7 +6619,7 @@ static const char* kSimpleExtensionSource =
"}";
-THREADED_TEST(SimpleExtensions) {
+TEST(SimpleExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("simpletest", kSimpleExtensionSource));
const char* extension_names[] = { "simpletest" };
@@ -6362,11 +6628,11 @@ THREADED_TEST(SimpleExtensions) {
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("Foo()"))->Run();
- CHECK_EQ(result, v8::Integer::New(4));
+ CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 4));
}
-THREADED_TEST(NullExtensions) {
+TEST(NullExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("nulltest", NULL));
const char* extension_names[] = { "nulltest" };
@@ -6375,7 +6641,7 @@ THREADED_TEST(NullExtensions) {
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("1+3"))->Run();
- CHECK_EQ(result, v8::Integer::New(4));
+ CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 4));
}
@@ -6385,7 +6651,7 @@ static const char* kEmbeddedExtensionSource =
static const int kEmbeddedExtensionSourceValidLen = 34;
-THREADED_TEST(ExtensionMissingSourceLength) {
+TEST(ExtensionMissingSourceLength) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("srclentest_fail",
kEmbeddedExtensionSource));
@@ -6397,7 +6663,7 @@ THREADED_TEST(ExtensionMissingSourceLength) {
}
-THREADED_TEST(ExtensionWithSourceLength) {
+TEST(ExtensionWithSourceLength) {
for (int source_len = kEmbeddedExtensionSourceValidLen - 1;
source_len <= kEmbeddedExtensionSourceValidLen + 1; ++source_len) {
v8::HandleScope handle_scope(CcTest::isolate());
@@ -6413,7 +6679,7 @@ THREADED_TEST(ExtensionWithSourceLength) {
if (source_len == kEmbeddedExtensionSourceValidLen) {
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("Ret54321()"))->Run();
- CHECK_EQ(v8::Integer::New(54321), result);
+ CHECK_EQ(v8::Integer::New(CcTest::isolate(), 54321), result);
} else {
// Anything but exactly the right length should fail to compile.
CHECK_EQ(0, *context);
@@ -6439,7 +6705,7 @@ static const char* kEvalExtensionSource2 =
"})()";
-THREADED_TEST(UseEvalFromExtension) {
+TEST(UseEvalFromExtension) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("evaltest1", kEvalExtensionSource1));
v8::RegisterExtension(new Extension("evaltest2", kEvalExtensionSource2));
@@ -6449,9 +6715,9 @@ THREADED_TEST(UseEvalFromExtension) {
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("UseEval1()"))->Run();
- CHECK_EQ(result, v8::Integer::New(42));
+ CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 42));
result = Script::Compile(v8_str("UseEval2()"))->Run();
- CHECK_EQ(result, v8::Integer::New(42));
+ CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 42));
}
@@ -6473,7 +6739,7 @@ static const char* kWithExtensionSource2 =
"})()";
-THREADED_TEST(UseWithFromExtension) {
+TEST(UseWithFromExtension) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("withtest1", kWithExtensionSource1));
v8::RegisterExtension(new Extension("withtest2", kWithExtensionSource2));
@@ -6483,13 +6749,13 @@ THREADED_TEST(UseWithFromExtension) {
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("UseWith1()"))->Run();
- CHECK_EQ(result, v8::Integer::New(87));
+ CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 87));
result = Script::Compile(v8_str("UseWith2()"))->Run();
- CHECK_EQ(result, v8::Integer::New(87));
+ CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 87));
}
-THREADED_TEST(AutoExtensions) {
+TEST(AutoExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
Extension* extension = new Extension("autotest", kSimpleExtensionSource);
extension->set_auto_enable(true);
@@ -6498,7 +6764,7 @@ THREADED_TEST(AutoExtensions) {
Context::New(CcTest::isolate());
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("Foo()"))->Run();
- CHECK_EQ(result, v8::Integer::New(4));
+ CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 4));
}
@@ -6508,7 +6774,7 @@ static const char* kSyntaxErrorInExtensionSource =
// Test that a syntax error in an extension does not cause a fatal
// error but results in an empty context.
-THREADED_TEST(SyntaxErrorExtensions) {
+TEST(SyntaxErrorExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("syntaxerror",
kSyntaxErrorInExtensionSource));
@@ -6526,7 +6792,7 @@ static const char* kExceptionInExtensionSource =
// Test that an exception when installing an extension does not cause
// a fatal error but results in an empty context.
-THREADED_TEST(ExceptionExtensions) {
+TEST(ExceptionExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("exception",
kExceptionInExtensionSource));
@@ -6548,7 +6814,7 @@ static const char* kNativeCallTest =
"call_runtime_last_index_of('bobbobboellebobboellebobbob');";
// Test that a native runtime calls are supported in extensions.
-THREADED_TEST(NativeCallInExtensions) {
+TEST(NativeCallInExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("nativecall",
kNativeCallInExtensionSource));
@@ -6558,7 +6824,7 @@ THREADED_TEST(NativeCallInExtensions) {
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str(kNativeCallTest))->Run();
- CHECK_EQ(result, v8::Integer::New(3));
+ CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 3));
}
@@ -6570,9 +6836,10 @@ class NativeFunctionExtension : public Extension {
: Extension(name, source),
function_(fun) { }
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
v8::Handle<v8::String> name) {
- return v8::FunctionTemplate::New(function_);
+ return v8::FunctionTemplate::New(isolate, function_);
}
static void Echo(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -6583,7 +6850,7 @@ class NativeFunctionExtension : public Extension {
};
-THREADED_TEST(NativeFunctionDeclaration) {
+TEST(NativeFunctionDeclaration) {
v8::HandleScope handle_scope(CcTest::isolate());
const char* name = "nativedecl";
v8::RegisterExtension(new NativeFunctionExtension(name,
@@ -6594,11 +6861,11 @@ THREADED_TEST(NativeFunctionDeclaration) {
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("foo(42);"))->Run();
- CHECK_EQ(result, v8::Integer::New(42));
+ CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 42));
}
-THREADED_TEST(NativeFunctionDeclarationError) {
+TEST(NativeFunctionDeclarationError) {
v8::HandleScope handle_scope(CcTest::isolate());
const char* name = "nativedeclerr";
// Syntax error in extension code.
@@ -6612,7 +6879,7 @@ THREADED_TEST(NativeFunctionDeclarationError) {
}
-THREADED_TEST(NativeFunctionDeclarationErrorEscape) {
+TEST(NativeFunctionDeclarationErrorEscape) {
v8::HandleScope handle_scope(CcTest::isolate());
const char* name = "nativedeclerresc";
// Syntax error in extension code - escape code in "native" means that
@@ -6632,7 +6899,8 @@ static void CheckDependencies(const char* name, const char* expected) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::ExtensionConfiguration config(1, &name);
LocalContext context(&config);
- CHECK_EQ(String::New(expected), context->Global()->Get(v8_str("loaded")));
+ CHECK_EQ(String::NewFromUtf8(CcTest::isolate(), expected),
+ context->Global()->Get(v8_str("loaded")));
}
@@ -6690,21 +6958,25 @@ static void CallFun(const v8::FunctionCallbackInfo<v8::Value>& args) {
class FunctionExtension : public Extension {
public:
FunctionExtension() : Extension("functiontest", kExtensionTestScript) { }
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
v8::Handle<String> name);
};
static int lookup_count = 0;
-v8::Handle<v8::FunctionTemplate> FunctionExtension::GetNativeFunction(
- v8::Handle<String> name) {
+v8::Handle<v8::FunctionTemplate> FunctionExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Handle<String> name) {
lookup_count++;
if (name->Equals(v8_str("A"))) {
- return v8::FunctionTemplate::New(CallFun, v8::Integer::New(8));
+ return v8::FunctionTemplate::New(
+ isolate, CallFun, v8::Integer::New(isolate, 8));
} else if (name->Equals(v8_str("B"))) {
- return v8::FunctionTemplate::New(CallFun, v8::Integer::New(7));
+ return v8::FunctionTemplate::New(
+ isolate, CallFun, v8::Integer::New(isolate, 7));
} else if (name->Equals(v8_str("C"))) {
- return v8::FunctionTemplate::New(CallFun, v8::Integer::New(6));
+ return v8::FunctionTemplate::New(
+ isolate, CallFun, v8::Integer::New(isolate, 6));
} else {
return v8::Handle<v8::FunctionTemplate>();
}
@@ -6718,9 +6990,12 @@ THREADED_TEST(FunctionLookup) {
v8::ExtensionConfiguration config(1, exts);
LocalContext context(&config);
CHECK_EQ(3, lookup_count);
- CHECK_EQ(v8::Integer::New(8), Script::Compile(v8_str("Foo(0)"))->Run());
- CHECK_EQ(v8::Integer::New(7), Script::Compile(v8_str("Foo(1)"))->Run());
- CHECK_EQ(v8::Integer::New(6), Script::Compile(v8_str("Foo(2)"))->Run());
+ CHECK_EQ(v8::Integer::New(CcTest::isolate(), 8),
+ Script::Compile(v8_str("Foo(0)"))->Run());
+ CHECK_EQ(v8::Integer::New(CcTest::isolate(), 7),
+ Script::Compile(v8_str("Foo(1)"))->Run());
+ CHECK_EQ(v8::Integer::New(CcTest::isolate(), 6),
+ Script::Compile(v8_str("Foo(2)"))->Run());
}
@@ -6733,11 +7008,11 @@ THREADED_TEST(NativeFunctionConstructCall) {
for (int i = 0; i < 10; i++) {
// Run a few times to ensure that allocation of objects doesn't
// change behavior of a constructor function.
- CHECK_EQ(v8::Integer::New(8),
+ CHECK_EQ(v8::Integer::New(CcTest::isolate(), 8),
Script::Compile(v8_str("(new A()).data"))->Run());
- CHECK_EQ(v8::Integer::New(7),
+ CHECK_EQ(v8::Integer::New(CcTest::isolate(), 7),
Script::Compile(v8_str("(new B()).data"))->Run());
- CHECK_EQ(v8::Integer::New(6),
+ CHECK_EQ(v8::Integer::New(CcTest::isolate(), 6),
Script::Compile(v8_str("(new C()).data"))->Run());
}
}
@@ -6771,33 +7046,6 @@ TEST(ErrorReporting) {
}
-static const char* js_code_causing_huge_string_flattening =
- "var str = 'X';"
- "for (var i = 0; i < 30; i++) {"
- " str = str + str;"
- "}"
- "str.match(/X/);";
-
-
-void OOMCallback(const char* location, const char* message) {
- exit(0);
-}
-
-
-TEST(RegexpOutOfMemory) {
- // Execute a script that causes out of memory when flattening a string.
- v8::HandleScope scope(CcTest::isolate());
- v8::V8::SetFatalErrorHandler(OOMCallback);
- LocalContext context;
- Local<Script> script =
- Script::Compile(String::New(js_code_causing_huge_string_flattening));
- last_location = NULL;
- script->Run();
-
- CHECK(false); // Should not return.
-}
-
-
static void MissingScriptInfoMessageListener(v8::Handle<v8::Message> message,
v8::Handle<Value> data) {
CHECK(message->GetScriptResourceName()->IsUndefined());
@@ -6816,93 +7064,16 @@ THREADED_TEST(ErrorWithMissingScriptInfo) {
}
-int global_index = 0;
-
-class Snorkel {
- public:
- Snorkel() { index_ = global_index++; }
- int index_;
+struct FlagAndPersistent {
+ bool flag;
+ v8::Persistent<v8::Object> handle;
};
-class Whammy {
- public:
- explicit Whammy(v8::Isolate* isolate) : cursor_(0), isolate_(isolate) { }
- ~Whammy() { script_.Dispose(); }
- v8::Handle<Script> getScript() {
- if (script_.IsEmpty()) script_.Reset(isolate_, v8_compile("({}).blammo"));
- return Local<Script>::New(isolate_, script_);
- }
- public:
- static const int kObjectCount = 256;
- int cursor_;
- v8::Isolate* isolate_;
- v8::Persistent<v8::Object> objects_[kObjectCount];
- v8::Persistent<Script> script_;
-};
-
-static void HandleWeakReference(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- Snorkel* snorkel) {
- delete snorkel;
- obj->ClearWeak();
-}
-
-void WhammyPropertyGetter(Local<String> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- Whammy* whammy =
- static_cast<Whammy*>(v8::Handle<v8::External>::Cast(info.Data())->Value());
-
- v8::Persistent<v8::Object>& prev = whammy->objects_[whammy->cursor_];
-
- v8::Handle<v8::Object> obj = v8::Object::New();
- if (!prev.IsEmpty()) {
- v8::Local<v8::Object>::New(info.GetIsolate(), prev)
- ->Set(v8_str("next"), obj);
- prev.MakeWeak<Value, Snorkel>(new Snorkel(), &HandleWeakReference);
- whammy->objects_[whammy->cursor_].Clear();
- }
- whammy->objects_[whammy->cursor_].Reset(info.GetIsolate(), obj);
- whammy->cursor_ = (whammy->cursor_ + 1) % Whammy::kObjectCount;
- info.GetReturnValue().Set(whammy->getScript()->Run());
-}
-
-
-THREADED_TEST(WeakReference) {
- v8::HandleScope handle_scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ= v8::ObjectTemplate::New();
- Whammy* whammy = new Whammy(CcTest::isolate());
- templ->SetNamedPropertyHandler(WhammyPropertyGetter,
- 0, 0, 0, 0,
- v8::External::New(whammy));
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
- Context::Scope context_scope(context);
-
- v8::Handle<v8::Object> interceptor = templ->NewInstance();
- context->Global()->Set(v8_str("whammy"), interceptor);
- const char* code =
- "var last;"
- "for (var i = 0; i < 10000; i++) {"
- " var obj = whammy.length;"
- " if (last) last.next = obj;"
- " last = obj;"
- "}"
- "gc();"
- "4";
- v8::Handle<Value> result = CompileRun(code);
- CHECK_EQ(4.0, result->NumberValue());
- delete whammy;
-}
-
-
-static void DisposeAndSetFlag(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* obj,
- bool* data) {
- obj->Dispose();
- *(data) = true;
+static void DisposeAndSetFlag(
+ const v8::WeakCallbackData<v8::Object, FlagAndPersistent>& data) {
+ data.GetParameter()->handle.Reset();
+ data.GetParameter()->flag = true;
}
@@ -6912,30 +7083,30 @@ THREADED_TEST(IndependentWeakHandle) {
v8::Handle<Context> context = Context::New(iso);
Context::Scope context_scope(context);
- v8::Persistent<v8::Object> object_a, object_b;
+ FlagAndPersistent object_a, object_b;
{
v8::HandleScope handle_scope(iso);
- object_a.Reset(iso, v8::Object::New());
- object_b.Reset(iso, v8::Object::New());
- }
-
- bool object_a_disposed = false;
- bool object_b_disposed = false;
- object_a.MakeWeak(&object_a_disposed, &DisposeAndSetFlag);
- object_b.MakeWeak(&object_b_disposed, &DisposeAndSetFlag);
- CHECK(!object_b.IsIndependent());
- object_a.MarkIndependent();
- object_b.MarkIndependent();
- CHECK(object_b.IsIndependent());
- CcTest::heap()->PerformScavenge();
- CHECK(object_a_disposed);
- CHECK(object_b_disposed);
+ object_a.handle.Reset(iso, v8::Object::New(iso));
+ object_b.handle.Reset(iso, v8::Object::New(iso));
+ }
+
+ object_a.flag = false;
+ object_b.flag = false;
+ object_a.handle.SetWeak(&object_a, &DisposeAndSetFlag);
+ object_b.handle.SetWeak(&object_b, &DisposeAndSetFlag);
+ CHECK(!object_b.handle.IsIndependent());
+ object_a.handle.MarkIndependent();
+ object_b.handle.MarkIndependent();
+ CHECK(object_b.handle.IsIndependent());
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CHECK(object_a.flag);
+ CHECK(object_b.flag);
}
static void InvokeScavenge() {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
}
@@ -6944,20 +7115,18 @@ static void InvokeMarkSweep() {
}
-static void ForceScavenge(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* obj,
- bool* data) {
- obj->Dispose();
- *(data) = true;
+static void ForceScavenge(
+ const v8::WeakCallbackData<v8::Object, FlagAndPersistent>& data) {
+ data.GetParameter()->handle.Reset();
+ data.GetParameter()->flag = true;
InvokeScavenge();
}
-static void ForceMarkSweep(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* obj,
- bool* data) {
- obj->Dispose();
- *(data) = true;
+static void ForceMarkSweep(
+ const v8::WeakCallbackData<v8::Object, FlagAndPersistent>& data) {
+ data.GetParameter()->handle.Reset();
+ data.GetParameter()->flag = true;
InvokeMarkSweep();
}
@@ -6969,7 +7138,8 @@ THREADED_TEST(GCFromWeakCallbacks) {
Context::Scope context_scope(context);
static const int kNumberOfGCTypes = 2;
- typedef v8::WeakReferenceCallbacks<v8::Object, bool>::Revivable Callback;
+ typedef v8::WeakCallbackData<v8::Object, FlagAndPersistent>::Callback
+ Callback;
Callback gc_forcing_callback[kNumberOfGCTypes] =
{&ForceScavenge, &ForceMarkSweep};
@@ -6978,26 +7148,25 @@ THREADED_TEST(GCFromWeakCallbacks) {
for (int outer_gc = 0; outer_gc < kNumberOfGCTypes; outer_gc++) {
for (int inner_gc = 0; inner_gc < kNumberOfGCTypes; inner_gc++) {
- v8::Persistent<v8::Object> object;
+ FlagAndPersistent object;
{
v8::HandleScope handle_scope(isolate);
- object.Reset(isolate, v8::Object::New());
+ object.handle.Reset(isolate, v8::Object::New(isolate));
}
- bool disposed = false;
- object.MakeWeak(&disposed, gc_forcing_callback[inner_gc]);
- object.MarkIndependent();
+ object.flag = false;
+ object.handle.SetWeak(&object, gc_forcing_callback[inner_gc]);
+ object.handle.MarkIndependent();
invoke_gc[outer_gc]();
- CHECK(disposed);
+ CHECK(object.flag);
}
}
}
-static void RevivingCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* obj,
- bool* data) {
- obj->ClearWeak();
- *(data) = true;
+static void RevivingCallback(
+ const v8::WeakCallbackData<v8::Object, FlagAndPersistent>& data) {
+ data.GetParameter()->handle.ClearWeak();
+ data.GetParameter()->flag = true;
}
@@ -7007,26 +7176,27 @@ THREADED_TEST(IndependentHandleRevival) {
v8::Handle<Context> context = Context::New(isolate);
Context::Scope context_scope(context);
- v8::Persistent<v8::Object> object;
+ FlagAndPersistent object;
{
v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Object> o = v8::Object::New();
- object.Reset(isolate, o);
- o->Set(v8_str("x"), v8::Integer::New(1));
+ v8::Local<v8::Object> o = v8::Object::New(isolate);
+ object.handle.Reset(isolate, o);
+ o->Set(v8_str("x"), v8::Integer::New(isolate, 1));
v8::Local<String> y_str = v8_str("y");
o->Set(y_str, y_str);
}
- bool revived = false;
- object.MakeWeak(&revived, &RevivingCallback);
- object.MarkIndependent();
- CcTest::heap()->PerformScavenge();
- CHECK(revived);
+ object.flag = false;
+ object.handle.SetWeak(&object, &RevivingCallback);
+ object.handle.MarkIndependent();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CHECK(object.flag);
CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
{
v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Object> o = v8::Local<v8::Object>::New(isolate, object);
+ v8::Local<v8::Object> o =
+ v8::Local<v8::Object>::New(isolate, object.handle);
v8::Local<String> y_str = v8_str("y");
- CHECK_EQ(v8::Integer::New(1), o->Get(v8_str("x")));
+ CHECK_EQ(v8::Integer::New(isolate, 1), o->Get(v8_str("x")));
CHECK(o->Get(y_str)->Equals(y_str));
}
}
@@ -7041,9 +7211,9 @@ static void ArgumentsTestCallback(
v8::Isolate* isolate = args.GetIsolate();
CHECK_EQ(args_fun, args.Callee());
CHECK_EQ(3, args.Length());
- CHECK_EQ(v8::Integer::New(1, isolate), args[0]);
- CHECK_EQ(v8::Integer::New(2, isolate), args[1]);
- CHECK_EQ(v8::Integer::New(3, isolate), args[2]);
+ CHECK_EQ(v8::Integer::New(isolate, 1), args[0]);
+ CHECK_EQ(v8::Integer::New(isolate, 2), args[1]);
+ CHECK_EQ(v8::Integer::New(isolate, 3), args[2]);
CHECK_EQ(v8::Undefined(isolate), args[3]);
v8::HandleScope scope(args.GetIsolate());
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -7051,9 +7221,11 @@ static void ArgumentsTestCallback(
THREADED_TEST(Arguments) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> global = ObjectTemplate::New();
- global->Set(v8_str("f"), v8::FunctionTemplate::New(ArgumentsTestCallback));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global = ObjectTemplate::New(isolate);
+ global->Set(v8_str("f"),
+ v8::FunctionTemplate::New(isolate, ArgumentsTestCallback));
LocalContext context(NULL, global);
args_fun = context->Global()->Get(v8_str("f")).As<Function>();
v8_compile("f(1, 2, 3)")->Run();
@@ -7091,8 +7263,9 @@ static void IDeleter(uint32_t index,
THREADED_TEST(Deleter) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetNamedPropertyHandler(NoBlockGetterX, NULL, NULL, PDeleter, NULL);
obj->SetIndexedPropertyHandler(NoBlockGetterI, NULL, NULL, IDeleter, NULL);
LocalContext context;
@@ -7136,26 +7309,27 @@ static void IndexedGetK(uint32_t index,
static void NamedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
ApiTestFuzzer::Fuzz();
- v8::Handle<v8::Array> result = v8::Array::New(3);
- result->Set(v8::Integer::New(0), v8_str("foo"));
- result->Set(v8::Integer::New(1), v8_str("bar"));
- result->Set(v8::Integer::New(2), v8_str("baz"));
+ v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 3);
+ result->Set(v8::Integer::New(info.GetIsolate(), 0), v8_str("foo"));
+ result->Set(v8::Integer::New(info.GetIsolate(), 1), v8_str("bar"));
+ result->Set(v8::Integer::New(info.GetIsolate(), 2), v8_str("baz"));
info.GetReturnValue().Set(result);
}
static void IndexedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
ApiTestFuzzer::Fuzz();
- v8::Handle<v8::Array> result = v8::Array::New(2);
- result->Set(v8::Integer::New(0), v8_str("0"));
- result->Set(v8::Integer::New(1), v8_str("1"));
+ v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
+ result->Set(v8::Integer::New(info.GetIsolate(), 0), v8_str("0"));
+ result->Set(v8::Integer::New(info.GetIsolate(), 1), v8_str("1"));
info.GetReturnValue().Set(result);
}
THREADED_TEST(Enumerators) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetNamedPropertyHandler(GetK, NULL, NULL, NULL, NamedEnum);
obj->SetIndexedPropertyHandler(IndexedGetK, NULL, NULL, NULL, IndexedEnum);
LocalContext context;
@@ -7186,27 +7360,27 @@ THREADED_TEST(Enumerators) {
// documenting our behavior.
CHECK_EQ(17, result->Length());
// Indexed properties in numerical order.
- CHECK_EQ(v8_str("5"), result->Get(v8::Integer::New(0)));
- CHECK_EQ(v8_str("10"), result->Get(v8::Integer::New(1)));
- CHECK_EQ(v8_str("140000"), result->Get(v8::Integer::New(2)));
- CHECK_EQ(v8_str("4294967295"), result->Get(v8::Integer::New(3)));
+ CHECK_EQ(v8_str("5"), result->Get(v8::Integer::New(isolate, 0)));
+ CHECK_EQ(v8_str("10"), result->Get(v8::Integer::New(isolate, 1)));
+ CHECK_EQ(v8_str("140000"), result->Get(v8::Integer::New(isolate, 2)));
+ CHECK_EQ(v8_str("4294967295"), result->Get(v8::Integer::New(isolate, 3)));
// Indexed interceptor properties in the order they are returned
// from the enumerator interceptor.
- CHECK_EQ(v8_str("0"), result->Get(v8::Integer::New(4)));
- CHECK_EQ(v8_str("1"), result->Get(v8::Integer::New(5)));
+ CHECK_EQ(v8_str("0"), result->Get(v8::Integer::New(isolate, 4)));
+ CHECK_EQ(v8_str("1"), result->Get(v8::Integer::New(isolate, 5)));
// Named properties in insertion order.
- CHECK_EQ(v8_str("a"), result->Get(v8::Integer::New(6)));
- CHECK_EQ(v8_str("b"), result->Get(v8::Integer::New(7)));
- CHECK_EQ(v8_str("c"), result->Get(v8::Integer::New(8)));
- CHECK_EQ(v8_str("4294967296"), result->Get(v8::Integer::New(9)));
- CHECK_EQ(v8_str("d"), result->Get(v8::Integer::New(10)));
- CHECK_EQ(v8_str("e"), result->Get(v8::Integer::New(11)));
- CHECK_EQ(v8_str("30000000000"), result->Get(v8::Integer::New(12)));
- CHECK_EQ(v8_str("f"), result->Get(v8::Integer::New(13)));
+ CHECK_EQ(v8_str("a"), result->Get(v8::Integer::New(isolate, 6)));
+ CHECK_EQ(v8_str("b"), result->Get(v8::Integer::New(isolate, 7)));
+ CHECK_EQ(v8_str("c"), result->Get(v8::Integer::New(isolate, 8)));
+ CHECK_EQ(v8_str("4294967296"), result->Get(v8::Integer::New(isolate, 9)));
+ CHECK_EQ(v8_str("d"), result->Get(v8::Integer::New(isolate, 10)));
+ CHECK_EQ(v8_str("e"), result->Get(v8::Integer::New(isolate, 11)));
+ CHECK_EQ(v8_str("30000000000"), result->Get(v8::Integer::New(isolate, 12)));
+ CHECK_EQ(v8_str("f"), result->Get(v8::Integer::New(isolate, 13)));
// Named interceptor properties.
- CHECK_EQ(v8_str("foo"), result->Get(v8::Integer::New(14)));
- CHECK_EQ(v8_str("bar"), result->Get(v8::Integer::New(15)));
- CHECK_EQ(v8_str("baz"), result->Get(v8::Integer::New(16)));
+ CHECK_EQ(v8_str("foo"), result->Get(v8::Integer::New(isolate, 14)));
+ CHECK_EQ(v8_str("bar"), result->Get(v8::Integer::New(isolate, 15)));
+ CHECK_EQ(v8_str("baz"), result->Get(v8::Integer::New(isolate, 16)));
}
@@ -7269,8 +7443,9 @@ static void PGetter2(Local<String> name,
THREADED_TEST(GetterHolders) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("p1"), PGetter);
obj->SetAccessor(v8_str("p2"), PGetter);
obj->SetAccessor(v8_str("p3"), PGetter);
@@ -7282,8 +7457,9 @@ THREADED_TEST(GetterHolders) {
THREADED_TEST(PreInterceptorHolders) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetNamedPropertyHandler(PGetter2);
p_getter_count2 = 0;
RunHolderTest(obj);
@@ -7294,7 +7470,7 @@ THREADED_TEST(PreInterceptorHolders) {
THREADED_TEST(ObjectInstantiation) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("t"), PGetter2);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -7349,7 +7525,24 @@ THREADED_TEST(StringWrite) {
v8::Handle<String> str = v8_str("abcde");
// abc<Icelandic eth><Unicode snowman>.
v8::Handle<String> str2 = v8_str("abc\303\260\342\230\203");
- v8::Handle<String> str3 = v8::String::New("abc\0def", 7);
+ v8::Handle<String> str3 = v8::String::NewFromUtf8(
+ context->GetIsolate(), "abc\0def", v8::String::kNormalString, 7);
+ // "ab" + lead surrogate + "cd" + trail surrogate + "ef"
+ uint16_t orphans[8] = { 0x61, 0x62, 0xd800, 0x63, 0x64, 0xdc00, 0x65, 0x66 };
+ v8::Handle<String> orphans_str = v8::String::NewFromTwoByte(
+ context->GetIsolate(), orphans, v8::String::kNormalString, 8);
+ // single lead surrogate
+ uint16_t lead[1] = { 0xd800 };
+ v8::Handle<String> lead_str = v8::String::NewFromTwoByte(
+ context->GetIsolate(), lead, v8::String::kNormalString, 1);
+ // single trail surrogate
+ uint16_t trail[1] = { 0xdc00 };
+ v8::Handle<String> trail_str = v8::String::NewFromTwoByte(
+ context->GetIsolate(), trail, v8::String::kNormalString, 1);
+ // surrogate pair
+ uint16_t pair[2] = { 0xd800, 0xdc00 };
+ v8::Handle<String> pair_str = v8::String::NewFromTwoByte(
+ context->GetIsolate(), pair, v8::String::kNormalString, 2);
const int kStride = 4; // Must match stride in for loops in JS below.
CompileRun(
"var left = '';"
@@ -7423,6 +7616,53 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(2, charlen);
CHECK_EQ(0, strncmp(utf8buf, "ab\1", 3));
+ // allow orphan surrogates by default
+ memset(utf8buf, 0x1, 1000);
+ len = orphans_str->WriteUtf8(utf8buf, sizeof(utf8buf), &charlen);
+ CHECK_EQ(13, len);
+ CHECK_EQ(8, charlen);
+ CHECK_EQ(0, strcmp(utf8buf, "ab\355\240\200cd\355\260\200ef"));
+
+ // replace orphan surrogates with unicode replacement character
+ memset(utf8buf, 0x1, 1000);
+ len = orphans_str->WriteUtf8(utf8buf,
+ sizeof(utf8buf),
+ &charlen,
+ String::REPLACE_INVALID_UTF8);
+ CHECK_EQ(13, len);
+ CHECK_EQ(8, charlen);
+ CHECK_EQ(0, strcmp(utf8buf, "ab\357\277\275cd\357\277\275ef"));
+
+ // replace single lead surrogate with unicode replacement character
+ memset(utf8buf, 0x1, 1000);
+ len = lead_str->WriteUtf8(utf8buf,
+ sizeof(utf8buf),
+ &charlen,
+ String::REPLACE_INVALID_UTF8);
+ CHECK_EQ(4, len);
+ CHECK_EQ(1, charlen);
+ CHECK_EQ(0, strcmp(utf8buf, "\357\277\275"));
+
+ // replace single trail surrogate with unicode replacement character
+ memset(utf8buf, 0x1, 1000);
+ len = trail_str->WriteUtf8(utf8buf,
+ sizeof(utf8buf),
+ &charlen,
+ String::REPLACE_INVALID_UTF8);
+ CHECK_EQ(4, len);
+ CHECK_EQ(1, charlen);
+ CHECK_EQ(0, strcmp(utf8buf, "\357\277\275"));
+
+ // do not replace / write anything if surrogate pair does not fit the buffer
+ // space
+ memset(utf8buf, 0x1, 1000);
+ len = pair_str->WriteUtf8(utf8buf,
+ 3,
+ &charlen,
+ String::REPLACE_INVALID_UTF8);
+ CHECK_EQ(0, len);
+ CHECK_EQ(0, charlen);
+
memset(utf8buf, 0x1, sizeof(utf8buf));
len = GetUtf8Length(left_tree);
int utf8_expected =
@@ -7743,10 +7983,12 @@ static bool SameSymbol(Handle<String> s1, Handle<String> s2) {
return *is1 == *is2;
}
-
-static void SameSymbolHelper(const char* a, const char* b) {
- Handle<String> symbol1 = v8::String::NewSymbol(a);
- Handle<String> symbol2 = v8::String::NewSymbol(b);
+static void SameSymbolHelper(v8::Isolate* isolate, const char* a,
+ const char* b) {
+ Handle<String> symbol1 =
+ v8::String::NewFromUtf8(isolate, a, v8::String::kInternalizedString);
+ Handle<String> symbol2 =
+ v8::String::NewFromUtf8(isolate, b, v8::String::kInternalizedString);
CHECK(SameSymbol(symbol1, symbol2));
}
@@ -7755,17 +7997,23 @@ THREADED_TEST(Utf16Symbol) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Handle<String> symbol1 = v8::String::NewSymbol("abc");
- Handle<String> symbol2 = v8::String::NewSymbol("abc");
+ Handle<String> symbol1 = v8::String::NewFromUtf8(
+ context->GetIsolate(), "abc", v8::String::kInternalizedString);
+ Handle<String> symbol2 = v8::String::NewFromUtf8(
+ context->GetIsolate(), "abc", v8::String::kInternalizedString);
CHECK(SameSymbol(symbol1, symbol2));
- SameSymbolHelper("\360\220\220\205", // 4 byte encoding.
+ SameSymbolHelper(context->GetIsolate(),
+ "\360\220\220\205", // 4 byte encoding.
"\355\240\201\355\260\205"); // 2 3-byte surrogates.
- SameSymbolHelper("\355\240\201\355\260\206", // 2 3-byte surrogates.
+ SameSymbolHelper(context->GetIsolate(),
+ "\355\240\201\355\260\206", // 2 3-byte surrogates.
"\360\220\220\206"); // 4 byte encoding.
- SameSymbolHelper("x\360\220\220\205", // 4 byte encoding.
+ SameSymbolHelper(context->GetIsolate(),
+ "x\360\220\220\205", // 4 byte encoding.
"x\355\240\201\355\260\205"); // 2 3-byte surrogates.
- SameSymbolHelper("x\355\240\201\355\260\206", // 2 3-byte surrogates.
+ SameSymbolHelper(context->GetIsolate(),
+ "x\355\240\201\355\260\206", // 2 3-byte surrogates.
"x\360\220\220\206"); // 4 byte encoding.
CompileRun(
"var sym0 = 'benedictus';"
@@ -7782,12 +8030,22 @@ THREADED_TEST(Utf16Symbol) {
"if (sym3.charCodeAt(2) != 0xdc07) throw sym1.charCodeAt(2);"
"if (sym4.length != 3) throw sym4;"
"if (sym4.charCodeAt(2) != 0xdc08) throw sym2.charCodeAt(2);");
- Handle<String> sym0 = v8::String::NewSymbol("benedictus");
- Handle<String> sym0b = v8::String::NewSymbol("S\303\270ren");
- Handle<String> sym1 = v8::String::NewSymbol("\355\240\201\355\260\207");
- Handle<String> sym2 = v8::String::NewSymbol("\360\220\220\210");
- Handle<String> sym3 = v8::String::NewSymbol("x\355\240\201\355\260\207");
- Handle<String> sym4 = v8::String::NewSymbol("x\360\220\220\210");
+ Handle<String> sym0 = v8::String::NewFromUtf8(
+ context->GetIsolate(), "benedictus", v8::String::kInternalizedString);
+ Handle<String> sym0b = v8::String::NewFromUtf8(
+ context->GetIsolate(), "S\303\270ren", v8::String::kInternalizedString);
+ Handle<String> sym1 =
+ v8::String::NewFromUtf8(context->GetIsolate(), "\355\240\201\355\260\207",
+ v8::String::kInternalizedString);
+ Handle<String> sym2 =
+ v8::String::NewFromUtf8(context->GetIsolate(), "\360\220\220\210",
+ v8::String::kInternalizedString);
+ Handle<String> sym3 = v8::String::NewFromUtf8(
+ context->GetIsolate(), "x\355\240\201\355\260\207",
+ v8::String::kInternalizedString);
+ Handle<String> sym4 =
+ v8::String::NewFromUtf8(context->GetIsolate(), "x\360\220\220\210",
+ v8::String::kInternalizedString);
v8::Local<v8::Object> global = context->Global();
Local<Value> s0 = global->Get(v8_str("sym0"));
Local<Value> s0b = global->Get(v8_str("sym0b"));
@@ -7806,7 +8064,8 @@ THREADED_TEST(Utf16Symbol) {
THREADED_TEST(ToArrayIndex) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::Handle<String> str = v8_str("42");
v8::Handle<v8::Uint32> index = str->ToArrayIndex();
@@ -7822,14 +8081,14 @@ THREADED_TEST(ToArrayIndex) {
index = str->ToArrayIndex();
CHECK(!index.IsEmpty());
CHECK_EQ(4294967295.0, index->Uint32Value());
- v8::Handle<v8::Number> num = v8::Number::New(1);
+ v8::Handle<v8::Number> num = v8::Number::New(isolate, 1);
index = num->ToArrayIndex();
CHECK(!index.IsEmpty());
CHECK_EQ(1.0, index->Uint32Value());
- num = v8::Number::New(-1);
+ num = v8::Number::New(isolate, -1);
index = num->ToArrayIndex();
CHECK(index.IsEmpty());
- v8::Handle<v8::Object> obj = v8::Object::New();
+ v8::Handle<v8::Object> obj = v8::Object::New(isolate);
index = obj->ToArrayIndex();
CHECK(index.IsEmpty());
}
@@ -7877,8 +8136,9 @@ static void YSetter(Local<String> name,
THREADED_TEST(DeleteAccessor) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("y"), YGetter, YSetter);
LocalContext context;
v8::Handle<v8::Object> holder = obj->NewInstance();
@@ -7890,14 +8150,15 @@ THREADED_TEST(DeleteAccessor) {
THREADED_TEST(TypeSwitch) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> templ1 = v8::FunctionTemplate::New();
- v8::Handle<v8::FunctionTemplate> templ2 = v8::FunctionTemplate::New();
- v8::Handle<v8::FunctionTemplate> templ3 = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> templ1 = v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::FunctionTemplate> templ2 = v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::FunctionTemplate> templ3 = v8::FunctionTemplate::New(isolate);
v8::Handle<v8::FunctionTemplate> templs[3] = { templ1, templ2, templ3 };
v8::Handle<v8::TypeSwitch> type_switch = v8::TypeSwitch::New(3, templs);
LocalContext context;
- v8::Handle<v8::Object> obj0 = v8::Object::New();
+ v8::Handle<v8::Object> obj0 = v8::Object::New(isolate);
v8::Handle<v8::Object> obj1 = templ1->GetFunction()->NewInstance();
v8::Handle<v8::Object> obj2 = templ2->GetFunction()->NewInstance();
v8::Handle<v8::Object> obj3 = templ3->GetFunction()->NewInstance();
@@ -7971,10 +8232,12 @@ static void ApiUncaughtExceptionTestListener(v8::Handle<v8::Message>,
TEST(ApiUncaughtException) {
report_count = 0;
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::V8::AddMessageListener(ApiUncaughtExceptionTestListener);
- Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(TroubleCallback);
+ Local<v8::FunctionTemplate> fun =
+ v8::FunctionTemplate::New(isolate, TroubleCallback);
v8::Local<v8::Object> global = env->Global();
global->Set(v8_str("trouble"), fun->GetFunction());
@@ -8011,17 +8274,22 @@ static void ExceptionInNativeScriptTestListener(v8::Handle<v8::Message> message,
TEST(ExceptionInNativeScript) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::V8::AddMessageListener(ExceptionInNativeScriptTestListener);
- Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(TroubleCallback);
+ Local<v8::FunctionTemplate> fun =
+ v8::FunctionTemplate::New(isolate, TroubleCallback);
v8::Local<v8::Object> global = env->Global();
global->Set(v8_str("trouble"), fun->GetFunction());
- Script::Compile(v8_str("function trouble() {\n"
- " var o = {};\n"
- " new o.foo();\n"
- "};"), v8::String::New(script_resource_name))->Run();
+ Script::Compile(
+ v8_str(
+ "function trouble() {\n"
+ " var o = {};\n"
+ " new o.foo();\n"
+ "};"),
+ v8::String::NewFromUtf8(isolate, script_resource_name))->Run();
Local<Value> trouble = global->Get(v8_str("trouble"));
CHECK(trouble->IsFunction());
Function::Cast(*trouble)->Call(global, 0, NULL);
@@ -8061,13 +8329,14 @@ TEST(TryCatchFinallyUsingTryCatchHandler) {
// SecurityHandler can't be run twice
TEST(SecurityHandler) {
- v8::HandleScope scope0(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope0(isolate);
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallbacks(NamedSecurityTestCallback,
IndexedSecurityTestCallback);
// Create an environment
- v8::Handle<Context> context0 =
- Context::New(CcTest::isolate(), NULL, global_template);
+ v8::Handle<Context> context0 = Context::New(isolate, NULL, global_template);
context0->Enter();
v8::Handle<v8::Object> global0 = context0->Global();
@@ -8080,10 +8349,10 @@ TEST(SecurityHandler) {
CHECK_EQ(999, z0->Int32Value());
// Create another environment, should fail security checks.
- v8::HandleScope scope1(CcTest::isolate());
+ v8::HandleScope scope1(isolate);
v8::Handle<Context> context1 =
- Context::New(CcTest::isolate(), NULL, global_template);
+ Context::New(isolate, NULL, global_template);
context1->Enter();
v8::Handle<v8::Object> global1 = context1->Global();
@@ -8101,7 +8370,7 @@ TEST(SecurityHandler) {
// Create another environment, should pass security checks.
{ g_security_callback_result = true; // allow security handler to pass.
- v8::HandleScope scope2(CcTest::isolate());
+ v8::HandleScope scope2(isolate);
LocalContext context2;
v8::Handle<v8::Object> global2 = context2->Global();
global2->Set(v8_str("othercontext"), global0);
@@ -8343,7 +8612,7 @@ TEST(ContextDetachGlobal) {
// Create a function in env2 and add a reference to it in env1.
Local<v8::Object> global2 = env2->Global();
- global2->Set(v8_str("prop"), v8::Integer::New(1));
+ global2->Set(v8_str("prop"), v8::Integer::New(env2->GetIsolate(), 1));
CompileRun("function getProp() {return prop;}");
env1->Global()->Set(v8_str("getProp"),
@@ -8352,8 +8621,6 @@ TEST(ContextDetachGlobal) {
// Detach env2's global, and reuse the global object of env2
env2->Exit();
env2->DetachGlobal();
- // env2 has a new global object.
- CHECK(!env2->Global()->Equals(global2));
v8::Handle<Context> env3 = Context::New(env1->GetIsolate(),
0,
@@ -8366,8 +8633,8 @@ TEST(ContextDetachGlobal) {
CHECK_EQ(global2, global3);
CHECK(global3->Get(v8_str("prop"))->IsUndefined());
CHECK(global3->Get(v8_str("getProp"))->IsUndefined());
- global3->Set(v8_str("prop"), v8::Integer::New(-1));
- global3->Set(v8_str("prop2"), v8::Integer::New(2));
+ global3->Set(v8_str("prop"), v8::Integer::New(env3->GetIsolate(), -1));
+ global3->Set(v8_str("prop2"), v8::Integer::New(env3->GetIsolate(), 2));
env3->Exit();
// Call getProp in env1, and it should return the value 1
@@ -8388,7 +8655,7 @@ TEST(ContextDetachGlobal) {
}
-TEST(DetachAndReattachGlobal) {
+TEST(DetachGlobal) {
LocalContext env1;
v8::HandleScope scope(env1->GetIsolate());
@@ -8404,7 +8671,7 @@ TEST(DetachAndReattachGlobal) {
// Create a property on the global object in env2.
{
v8::Context::Scope scope(env2);
- env2->Global()->Set(v8_str("p"), v8::Integer::New(42));
+ env2->Global()->Set(v8_str("p"), v8::Integer::New(env2->GetIsolate(), 42));
}
// Create a reference to env2 global from env1 global.
@@ -8437,7 +8704,7 @@ TEST(DetachAndReattachGlobal) {
// Create a property on the global object in env3.
{
v8::Context::Scope scope(env3);
- env3->Global()->Set(v8_str("p"), v8::Integer::New(24));
+ env3->Global()->Set(v8_str("p"), v8::Integer::New(env3->GetIsolate(), 24));
}
// Check that other.p is now the property in env3 and that we have access.
@@ -8453,16 +8720,128 @@ TEST(DetachAndReattachGlobal) {
// so access should be blocked.
result = CompileRun("other.p");
CHECK(result->IsUndefined());
+}
- // Detach the global for env3 and reattach it to env2.
- env3->DetachGlobal();
- env2->ReattachGlobal(global2);
- // Check that we have access to other.p again in env1. |other| is now
- // the global object for env2 which has the same security token as env1.
- result = CompileRun("other.p");
- CHECK(result->IsInt32());
- CHECK_EQ(42, result->Int32Value());
+void GetThisX(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(
+ info.GetIsolate()->GetCurrentContext()->Global()->Get(v8_str("x")));
+}
+
+
+TEST(DetachedAccesses) {
+ LocalContext env1;
+ v8::HandleScope scope(env1->GetIsolate());
+
+ // Create second environment.
+ Local<ObjectTemplate> inner_global_template =
+ FunctionTemplate::New(env1->GetIsolate())->InstanceTemplate();
+ inner_global_template ->SetAccessorProperty(
+ v8_str("this_x"), FunctionTemplate::New(env1->GetIsolate(), GetThisX));
+ v8::Local<Context> env2 =
+ Context::New(env1->GetIsolate(), NULL, inner_global_template);
+
+ Local<Value> foo = v8_str("foo");
+
+ // Set same security token for env1 and env2.
+ env1->SetSecurityToken(foo);
+ env2->SetSecurityToken(foo);
+
+ env1->Global()->Set(v8_str("x"), v8_str("env1_x"));
+
+ {
+ v8::Context::Scope scope(env2);
+ env2->Global()->Set(v8_str("x"), v8_str("env2_x"));
+ CompileRun(
+ "function bound_x() { return x; }"
+ "function get_x() { return this.x; }"
+ "function get_x_w() { return (function() {return this.x;})(); }");
+ env1->Global()->Set(v8_str("bound_x"), CompileRun("bound_x"));
+ env1->Global()->Set(v8_str("get_x"), CompileRun("get_x"));
+ env1->Global()->Set(v8_str("get_x_w"), CompileRun("get_x_w"));
+ env1->Global()->Set(
+ v8_str("this_x"),
+ CompileRun("Object.getOwnPropertyDescriptor(this, 'this_x').get"));
+ }
+
+ Local<Object> env2_global = env2->Global();
+ env2_global->TurnOnAccessCheck();
+ env2->DetachGlobal();
+
+ Local<Value> result;
+ result = CompileRun("bound_x()");
+ CHECK_EQ(v8_str("env2_x"), result);
+ result = CompileRun("get_x()");
+ CHECK(result->IsUndefined());
+ result = CompileRun("get_x_w()");
+ CHECK(result->IsUndefined());
+ result = CompileRun("this_x()");
+ CHECK_EQ(v8_str("env2_x"), result);
+
+ // Reattach env2's proxy
+ env2 = Context::New(env1->GetIsolate(),
+ 0,
+ v8::Handle<v8::ObjectTemplate>(),
+ env2_global);
+ env2->SetSecurityToken(foo);
+ {
+ v8::Context::Scope scope(env2);
+ env2->Global()->Set(v8_str("x"), v8_str("env3_x"));
+ env2->Global()->Set(v8_str("env1"), env1->Global());
+ result = CompileRun(
+ "results = [];"
+ "for (var i = 0; i < 4; i++ ) {"
+ " results.push(env1.bound_x());"
+ " results.push(env1.get_x());"
+ " results.push(env1.get_x_w());"
+ " results.push(env1.this_x());"
+ "}"
+ "results");
+ Local<v8::Array> results = Local<v8::Array>::Cast(result);
+ CHECK_EQ(16, results->Length());
+ for (int i = 0; i < 16; i += 4) {
+ CHECK_EQ(v8_str("env2_x"), results->Get(i + 0));
+ CHECK_EQ(v8_str("env1_x"), results->Get(i + 1));
+ CHECK_EQ(v8_str("env3_x"), results->Get(i + 2));
+ CHECK_EQ(v8_str("env2_x"), results->Get(i + 3));
+ }
+ }
+
+ result = CompileRun(
+ "results = [];"
+ "for (var i = 0; i < 4; i++ ) {"
+ " results.push(bound_x());"
+ " results.push(get_x());"
+ " results.push(get_x_w());"
+ " results.push(this_x());"
+ "}"
+ "results");
+ Local<v8::Array> results = Local<v8::Array>::Cast(result);
+ CHECK_EQ(16, results->Length());
+ for (int i = 0; i < 16; i += 4) {
+ CHECK_EQ(v8_str("env2_x"), results->Get(i + 0));
+ CHECK_EQ(v8_str("env3_x"), results->Get(i + 1));
+ CHECK_EQ(v8_str("env3_x"), results->Get(i + 2));
+ CHECK_EQ(v8_str("env2_x"), results->Get(i + 3));
+ }
+
+ result = CompileRun(
+ "results = [];"
+ "for (var i = 0; i < 4; i++ ) {"
+ " results.push(this.bound_x());"
+ " results.push(this.get_x());"
+ " results.push(this.get_x_w());"
+ " results.push(this.this_x());"
+ "}"
+ "results");
+ results = Local<v8::Array>::Cast(result);
+ CHECK_EQ(16, results->Length());
+ for (int i = 0; i < 16; i += 4) {
+ CHECK_EQ(v8_str("env2_x"), results->Get(i + 0));
+ CHECK_EQ(v8_str("env1_x"), results->Get(i + 1));
+ CHECK_EQ(v8_str("env3_x"), results->Get(i + 2));
+ CHECK_EQ(v8_str("env2_x"), results->Get(i + 3));
+ }
}
@@ -8539,7 +8918,8 @@ static void UnreachableFunction(
TEST(AccessControl) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallbacks(NamedAccessBlocker,
IndexedAccessBlocker);
@@ -8554,8 +8934,8 @@ TEST(AccessControl) {
global_template->SetAccessorProperty(
v8_str("accessible_js_prop"),
- v8::FunctionTemplate::New(EchoGetter),
- v8::FunctionTemplate::New(EchoSetter),
+ v8::FunctionTemplate::New(isolate, EchoGetter),
+ v8::FunctionTemplate::New(isolate, EchoSetter),
v8::None,
v8::AccessControl(v8::ALL_CAN_READ | v8::ALL_CAN_WRITE));
@@ -8567,8 +8947,8 @@ TEST(AccessControl) {
global_template->SetAccessorProperty(
v8_str("blocked_js_prop"),
- v8::FunctionTemplate::New(UnreachableFunction),
- v8::FunctionTemplate::New(UnreachableFunction),
+ v8::FunctionTemplate::New(isolate, UnreachableFunction),
+ v8::FunctionTemplate::New(isolate, UnreachableFunction),
v8::None,
v8::DEFAULT);
@@ -8818,7 +9198,8 @@ TEST(AccessControl) {
TEST(AccessControlES5) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallbacks(NamedAccessBlocker,
IndexedAccessBlocker);
@@ -8905,9 +9286,10 @@ static bool GetOwnPropertyNamesIndexedBlocker(Local<v8::Object> global,
THREADED_TEST(AccessControlGetOwnPropertyNames) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> obj_template =
+ v8::ObjectTemplate::New(isolate);
- obj_template->Set(v8_str("x"), v8::Integer::New(42));
+ obj_template->Set(v8_str("x"), v8::Integer::New(isolate, 42));
obj_template->SetAccessCheckCallbacks(GetOwnPropertyNamesNamedBlocker,
GetOwnPropertyNamesIndexedBlocker);
@@ -8946,28 +9328,30 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
static void IndexedPropertyEnumerator(
const v8::PropertyCallbackInfo<v8::Array>& info) {
- v8::Handle<v8::Array> result = v8::Array::New(2);
- result->Set(0, v8::Integer::New(7));
- result->Set(1, v8::Object::New());
+ v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
+ result->Set(0, v8::Integer::New(info.GetIsolate(), 7));
+ result->Set(1, v8::Object::New(info.GetIsolate()));
info.GetReturnValue().Set(result);
}
static void NamedPropertyEnumerator(
const v8::PropertyCallbackInfo<v8::Array>& info) {
- v8::Handle<v8::Array> result = v8::Array::New(2);
+ v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
result->Set(0, v8_str("x"));
- result->Set(1, v8::Object::New());
+ result->Set(1, v8::Object::New(info.GetIsolate()));
info.GetReturnValue().Set(result);
}
THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
- v8::HandleScope handle_scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj_template =
+ v8::ObjectTemplate::New(isolate);
- obj_template->Set(v8_str("7"), v8::Integer::New(7));
- obj_template->Set(v8_str("x"), v8::Integer::New(42));
+ obj_template->Set(v8_str("7"), v8::Integer::New(CcTest::isolate(), 7));
+ obj_template->Set(v8_str("x"), v8::Integer::New(CcTest::isolate(), 42));
obj_template->SetIndexedPropertyHandler(NULL, NULL, NULL, NULL,
IndexedPropertyEnumerator);
obj_template->SetNamedPropertyHandler(NULL, NULL, NULL, NULL,
@@ -9001,7 +9385,8 @@ THREADED_TEST(CrossDomainAccessors) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> func_template =
+ v8::FunctionTemplate::New(isolate);
v8::Handle<v8::ObjectTemplate> global_template =
func_template->InstanceTemplate();
@@ -9084,7 +9469,8 @@ TEST(AccessControlIC) {
// Create an object that requires access-check functions to be
// called for cross-domain access.
- v8::Handle<v8::ObjectTemplate> object_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
object_template->SetAccessCheckCallbacks(NamedAccessCounter,
IndexedAccessCounter);
Local<v8::Object> object = object_template->NewInstance();
@@ -9232,7 +9618,8 @@ THREADED_TEST(AccessControlFlatten) {
// Create an object that requires access-check functions to be
// called for cross-domain access.
- v8::Handle<v8::ObjectTemplate> object_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
object_template->SetAccessCheckCallbacks(NamedAccessFlatten,
IndexedAccessFlatten);
Local<v8::Object> object = object_template->NewInstance();
@@ -9301,7 +9688,8 @@ THREADED_TEST(AccessControlInterceptorIC) {
// Create an object that requires access-check functions to be
// called for cross-domain access. The object also has interceptors
// interceptor.
- v8::Handle<v8::ObjectTemplate> object_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
object_template->SetAccessCheckCallbacks(NamedAccessCounter,
IndexedAccessCounter);
object_template->SetNamedPropertyHandler(AccessControlNamedGetter,
@@ -9367,14 +9755,15 @@ static void InstanceFunctionCallback(
THREADED_TEST(InstanceProperties) {
LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
- Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance = t->InstanceTemplate();
instance->Set(v8_str("x"), v8_num(42));
instance->Set(v8_str("f"),
- v8::FunctionTemplate::New(InstanceFunctionCallback));
+ v8::FunctionTemplate::New(isolate, InstanceFunctionCallback));
Local<Value> o = t->GetFunction()->NewInstance();
@@ -9395,17 +9784,19 @@ static void GlobalObjectInstancePropertiesGet(
THREADED_TEST(GlobalObjectInstanceProperties) {
- v8::HandleScope handle_scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
Local<Value> global_object;
- Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
t->InstanceTemplate()->SetNamedPropertyHandler(
GlobalObjectInstancePropertiesGet);
Local<ObjectTemplate> instance_template = t->InstanceTemplate();
instance_template->Set(v8_str("x"), v8_num(42));
instance_template->Set(v8_str("f"),
- v8::FunctionTemplate::New(InstanceFunctionCallback));
+ v8::FunctionTemplate::New(isolate,
+ InstanceFunctionCallback));
// The script to check how Crankshaft compiles missing global function
// invocations. function g is not defined and should throw on call.
@@ -9451,11 +9842,12 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
THREADED_TEST(CallKnownGlobalReceiver) {
- v8::HandleScope handle_scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
Local<Value> global_object;
- Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template = t->InstanceTemplate();
// The script to check that we leave global object not
@@ -9530,19 +9922,22 @@ static void ShadowNamedGet(Local<String> key,
THREADED_TEST(ShadowObject) {
shadow_y = shadow_y_setter_call_count = shadow_y_getter_call_count = 0;
- v8::HandleScope handle_scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
- Local<ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ Local<ObjectTemplate> global_template = v8::ObjectTemplate::New(isolate);
LocalContext context(NULL, global_template);
- Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
t->InstanceTemplate()->SetNamedPropertyHandler(ShadowNamedGet);
t->InstanceTemplate()->SetIndexedPropertyHandler(ShadowIndexedGet);
Local<ObjectTemplate> proto = t->PrototypeTemplate();
Local<ObjectTemplate> instance = t->InstanceTemplate();
proto->Set(v8_str("f"),
- v8::FunctionTemplate::New(ShadowFunctionCallback, Local<Value>()));
+ v8::FunctionTemplate::New(isolate,
+ ShadowFunctionCallback,
+ Local<Value>()));
proto->Set(v8_str("x"), v8_num(12));
instance->SetAccessor(v8_str("y"), ShadowYGetter, ShadowYSetter);
@@ -9571,17 +9966,18 @@ THREADED_TEST(ShadowObject) {
THREADED_TEST(HiddenPrototype) {
LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
- Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
t0->InstanceTemplate()->Set(v8_str("x"), v8_num(0));
- Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
t1->SetHiddenPrototype(true);
t1->InstanceTemplate()->Set(v8_str("y"), v8_num(1));
- Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(isolate);
t2->SetHiddenPrototype(true);
t2->InstanceTemplate()->Set(v8_str("z"), v8_num(2));
- Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(isolate);
t3->InstanceTemplate()->Set(v8_str("u"), v8_num(3));
Local<v8::Object> o0 = t0->GetFunction()->NewInstance();
@@ -9615,12 +10011,13 @@ THREADED_TEST(HiddenPrototype) {
THREADED_TEST(HiddenPrototypeSet) {
LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
- Local<v8::FunctionTemplate> ot = v8::FunctionTemplate::New();
- Local<v8::FunctionTemplate> ht = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> ot = v8::FunctionTemplate::New(isolate);
+ Local<v8::FunctionTemplate> ht = v8::FunctionTemplate::New(isolate);
ht->SetHiddenPrototype(true);
- Local<v8::FunctionTemplate> pt = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> pt = v8::FunctionTemplate::New(isolate);
ht->InstanceTemplate()->Set(v8_str("x"), v8_num(0));
Local<v8::Object> o = ot->GetFunction()->NewInstance();
@@ -9659,11 +10056,11 @@ THREADED_TEST(HiddenPrototypeIdentityHash) {
LocalContext context;
v8::HandleScope handle_scope(context->GetIsolate());
- Handle<FunctionTemplate> t = FunctionTemplate::New();
+ Handle<FunctionTemplate> t = FunctionTemplate::New(context->GetIsolate());
t->SetHiddenPrototype(true);
t->InstanceTemplate()->Set(v8_str("foo"), v8_num(75));
Handle<Object> p = t->GetFunction()->NewInstance();
- Handle<Object> o = Object::New();
+ Handle<Object> o = Object::New(context->GetIsolate());
o->SetPrototype(p);
int hash = o->GetIdentityHash();
@@ -9675,17 +10072,18 @@ THREADED_TEST(HiddenPrototypeIdentityHash) {
THREADED_TEST(SetPrototype) {
LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
- Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
t0->InstanceTemplate()->Set(v8_str("x"), v8_num(0));
- Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
t1->SetHiddenPrototype(true);
t1->InstanceTemplate()->Set(v8_str("y"), v8_num(1));
- Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(isolate);
t2->SetHiddenPrototype(true);
t2->InstanceTemplate()->Set(v8_str("z"), v8_num(2));
- Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(isolate);
t3->InstanceTemplate()->Set(v8_str("u"), v8_num(3));
Local<v8::Object> o0 = t0->GetFunction()->NewInstance();
@@ -9736,20 +10134,21 @@ THREADED_TEST(SetPrototype) {
THREADED_TEST(Regress91517) {
i::FLAG_allow_natives_syntax = true;
LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
- Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
t1->SetHiddenPrototype(true);
t1->InstanceTemplate()->Set(v8_str("foo"), v8_num(1));
- Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(isolate);
t2->SetHiddenPrototype(true);
t2->InstanceTemplate()->Set(v8_str("fuz1"), v8_num(2));
- t2->InstanceTemplate()->Set(v8_str("objects"), v8::Object::New());
+ t2->InstanceTemplate()->Set(v8_str("objects"), v8::Object::New(isolate));
t2->InstanceTemplate()->Set(v8_str("fuz2"), v8_num(2));
- Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(isolate);
t3->SetHiddenPrototype(true);
t3->InstanceTemplate()->Set(v8_str("boo"), v8_num(3));
- Local<v8::FunctionTemplate> t4 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t4 = v8::FunctionTemplate::New(isolate);
t4->InstanceTemplate()->Set(v8_str("baz"), v8_num(4));
// Force dictionary-based properties.
@@ -9772,7 +10171,8 @@ THREADED_TEST(Regress91517) {
// Call the runtime version of GetLocalPropertyNames() on the natively
// created object through JavaScript.
context->Global()->Set(v8_str("obj"), o4);
- CompileRun("var names = %GetLocalPropertyNames(obj, true);");
+ // PROPERTY_ATTRIBUTES_NONE = 0
+ CompileRun("var names = %GetLocalPropertyNames(obj, 0);");
ExpectInt32("names.length", 1006);
ExpectTrue("names.indexOf(\"baz\") >= 0");
@@ -9784,12 +10184,69 @@ THREADED_TEST(Regress91517) {
}
-THREADED_TEST(FunctionReadOnlyPrototype) {
+// Getting property names of an object with a hidden and inherited
+// prototype should not duplicate the accessor properties inherited.
+THREADED_TEST(Regress269562) {
+ i::FLAG_allow_natives_syntax = true;
LocalContext context;
v8::HandleScope handle_scope(context->GetIsolate());
- Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
- t1->PrototypeTemplate()->Set(v8_str("x"), v8::Integer::New(42));
+ Local<v8::FunctionTemplate> t1 =
+ v8::FunctionTemplate::New(context->GetIsolate());
+ t1->SetHiddenPrototype(true);
+
+ Local<v8::ObjectTemplate> i1 = t1->InstanceTemplate();
+ i1->SetAccessor(v8_str("foo"),
+ SimpleAccessorGetter, SimpleAccessorSetter);
+ i1->SetAccessor(v8_str("bar"),
+ SimpleAccessorGetter, SimpleAccessorSetter);
+ i1->SetAccessor(v8_str("baz"),
+ SimpleAccessorGetter, SimpleAccessorSetter);
+ i1->Set(v8_str("n1"), v8_num(1));
+ i1->Set(v8_str("n2"), v8_num(2));
+
+ Local<v8::Object> o1 = t1->GetFunction()->NewInstance();
+ Local<v8::FunctionTemplate> t2 =
+ v8::FunctionTemplate::New(context->GetIsolate());
+ t2->SetHiddenPrototype(true);
+
+ // Inherit from t1 and mark prototype as hidden.
+ t2->Inherit(t1);
+ t2->InstanceTemplate()->Set(v8_str("mine"), v8_num(4));
+
+ Local<v8::Object> o2 = t2->GetFunction()->NewInstance();
+ CHECK(o2->SetPrototype(o1));
+
+ v8::Local<v8::Symbol> sym = v8::Symbol::New(context->GetIsolate(), "s1");
+ o1->Set(sym, v8_num(3));
+ o1->SetHiddenValue(v8_str("h1"),
+ v8::Integer::New(context->GetIsolate(), 2013));
+
+ // Call the runtime version of GetLocalPropertyNames() on
+ // the natively created object through JavaScript.
+ context->Global()->Set(v8_str("obj"), o2);
+ context->Global()->Set(v8_str("sym"), sym);
+ // PROPERTY_ATTRIBUTES_NONE = 0
+ CompileRun("var names = %GetLocalPropertyNames(obj, 0);");
+
+ ExpectInt32("names.length", 7);
+ ExpectTrue("names.indexOf(\"foo\") >= 0");
+ ExpectTrue("names.indexOf(\"bar\") >= 0");
+ ExpectTrue("names.indexOf(\"baz\") >= 0");
+ ExpectTrue("names.indexOf(\"n1\") >= 0");
+ ExpectTrue("names.indexOf(\"n2\") >= 0");
+ ExpectTrue("names.indexOf(sym) >= 0");
+ ExpectTrue("names.indexOf(\"mine\") >= 0");
+}
+
+
+THREADED_TEST(FunctionReadOnlyPrototype) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
+ t1->PrototypeTemplate()->Set(v8_str("x"), v8::Integer::New(isolate, 42));
t1->ReadOnlyPrototype();
context->Global()->Set(v8_str("func1"), t1->GetFunction());
// Configured value of ReadOnly flag.
@@ -9802,8 +10259,8 @@ THREADED_TEST(FunctionReadOnlyPrototype) {
CHECK_EQ(42,
CompileRun("func1.prototype = {}; func1.prototype.x")->Int32Value());
- Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New();
- t2->PrototypeTemplate()->Set(v8_str("x"), v8::Integer::New(42));
+ Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(isolate);
+ t2->PrototypeTemplate()->Set(v8_str("x"), v8::Integer::New(isolate, 42));
context->Global()->Set(v8_str("func2"), t2->GetFunction());
// Default value of ReadOnly flag.
CHECK(CompileRun(
@@ -9817,9 +10274,10 @@ THREADED_TEST(FunctionReadOnlyPrototype) {
THREADED_TEST(SetPrototypeThrows) {
LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
- Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<v8::Object> o0 = t->GetFunction()->NewInstance();
Local<v8::Object> o1 = t->GetFunction()->NewInstance();
@@ -9838,9 +10296,10 @@ THREADED_TEST(SetPrototypeThrows) {
THREADED_TEST(FunctionRemovePrototype) {
LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
- Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
t1->RemovePrototype();
Local<v8::Function> fun = t1->GetFunction();
context->Global()->Set(v8_str("fun"), fun);
@@ -9858,7 +10317,8 @@ THREADED_TEST(FunctionRemovePrototype) {
THREADED_TEST(GetterSetterExceptions) {
LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
CompileRun(
"function Foo() { };"
"function Throw() { throw 5; };"
@@ -9868,21 +10328,22 @@ THREADED_TEST(GetterSetterExceptions) {
Local<v8::Object> x =
Local<v8::Object>::Cast(context->Global()->Get(v8_str("x")));
v8::TryCatch try_catch;
- x->Set(v8_str("set"), v8::Integer::New(8));
+ x->Set(v8_str("set"), v8::Integer::New(isolate, 8));
x->Get(v8_str("get"));
- x->Set(v8_str("set"), v8::Integer::New(8));
+ x->Set(v8_str("set"), v8::Integer::New(isolate, 8));
x->Get(v8_str("get"));
- x->Set(v8_str("set"), v8::Integer::New(8));
+ x->Set(v8_str("set"), v8::Integer::New(isolate, 8));
x->Get(v8_str("get"));
- x->Set(v8_str("set"), v8::Integer::New(8));
+ x->Set(v8_str("set"), v8::Integer::New(isolate, 8));
x->Get(v8_str("get"));
}
THREADED_TEST(Constructor) {
LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
- Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+ Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->SetClassName(v8_str("Fun"));
Local<Function> cons = templ->GetFunction();
context->Global()->Set(v8_str("Fun"), cons);
@@ -9901,7 +10362,7 @@ static void ConstructorCallback(
if (args.IsConstructCall()) {
Local<Object> Holder = args.Holder();
- This = Object::New();
+ This = Object::New(args.GetIsolate());
Local<Value> proto = Holder->GetPrototype();
if (proto->IsObject()) {
This->SetPrototype(proto);
@@ -9927,7 +10388,7 @@ THREADED_TEST(ConstructorForObject) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope handle_scope(isolate);
- { Local<ObjectTemplate> instance_template = ObjectTemplate::New();
+ { Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
instance_template->SetCallAsFunctionHandler(ConstructorCallback);
Local<Object> instance = instance_template->NewInstance();
context->Global()->Set(v8_str("obj"), instance);
@@ -10003,7 +10464,7 @@ THREADED_TEST(ConstructorForObject) {
}
// Check exception handling when there is no constructor set for the Object.
- { Local<ObjectTemplate> instance_template = ObjectTemplate::New();
+ { Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
Local<Object> instance = instance_template->NewInstance();
context->Global()->Set(v8_str("obj2"), instance);
v8::TryCatch try_catch;
@@ -10025,7 +10486,7 @@ THREADED_TEST(ConstructorForObject) {
}
// Check the case when constructor throws exception.
- { Local<ObjectTemplate> instance_template = ObjectTemplate::New();
+ { Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
instance_template->SetCallAsFunctionHandler(ThrowValue);
Local<Object> instance = instance_template->NewInstance();
context->Global()->Set(v8_str("obj3"), instance);
@@ -10049,7 +10510,7 @@ THREADED_TEST(ConstructorForObject) {
// Check whether constructor returns with an object or non-object.
{ Local<FunctionTemplate> function_template =
- FunctionTemplate::New(FakeConstructorCallback);
+ FunctionTemplate::New(isolate, FakeConstructorCallback);
Local<Function> function = function_template->GetFunction();
Local<Object> instance1 = function;
context->Global()->Set(v8_str("obj4"), instance1);
@@ -10069,7 +10530,7 @@ THREADED_TEST(ConstructorForObject) {
CHECK(!try_catch.HasCaught());
CHECK(value->IsObject());
- Local<ObjectTemplate> instance_template = ObjectTemplate::New();
+ Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
instance_template->SetCallAsFunctionHandler(FakeConstructorCallback);
Local<Object> instance2 = instance_template->NewInstance();
context->Global()->Set(v8_str("obj5"), instance2);
@@ -10092,8 +10553,9 @@ THREADED_TEST(ConstructorForObject) {
THREADED_TEST(FunctionDescriptorException) {
LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
- Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+ Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->SetClassName(v8_str("Fun"));
Local<Function> cons = templ->GetFunction();
context->Global()->Set(v8_str("Fun"), cons);
@@ -10103,9 +10565,9 @@ THREADED_TEST(FunctionDescriptorException) {
" (new Fun()).blah()"
" } catch (e) {"
" var str = String(e);"
- " if (str.indexOf('TypeError') == -1) return 1;"
- " if (str.indexOf('[object Fun]') != -1) return 2;"
- " if (str.indexOf('#<Fun>') == -1) return 3;"
+ // " if (str.indexOf('TypeError') == -1) return 1;"
+ // " if (str.indexOf('[object Fun]') != -1) return 2;"
+ // " if (str.indexOf('#<Fun>') == -1) return 3;"
" return 0;"
" }"
" return 4;"
@@ -10310,9 +10772,10 @@ static void ReturnThis(const v8::FunctionCallbackInfo<v8::Value>& args) {
// functions.
THREADED_TEST(CallAsFunction) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
- { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template = t->InstanceTemplate();
instance_template->SetCallAsFunctionHandler(call_as_function);
Local<v8::Object> instance = t->GetFunction()->NewInstance();
@@ -10365,7 +10828,7 @@ THREADED_TEST(CallAsFunction) {
CHECK_EQ(28, value->Int32Value());
}
- { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template(t->InstanceTemplate());
USE(instance_template);
Local<v8::Object> instance = t->GetFunction()->NewInstance();
@@ -10379,7 +10842,8 @@ THREADED_TEST(CallAsFunction) {
CHECK(value.IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value1(try_catch.Exception());
- CHECK_EQ("TypeError: Property 'obj2' of object #<Object> is not a function",
+ // TODO(verwaest): Better message
+ CHECK_EQ("TypeError: object is not a function",
*exception_value1);
try_catch.Reset();
@@ -10394,7 +10858,7 @@ THREADED_TEST(CallAsFunction) {
try_catch.Reset();
}
- { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template = t->InstanceTemplate();
instance_template->SetCallAsFunctionHandler(ThrowValue);
Local<v8::Object> instance = t->GetFunction()->NewInstance();
@@ -10418,8 +10882,7 @@ THREADED_TEST(CallAsFunction) {
try_catch.Reset();
}
- { v8::Isolate* isolate = context->GetIsolate();
- Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template = t->InstanceTemplate();
instance_template->SetCallAsFunctionHandler(ReturnThis);
Local<v8::Object> instance = t->GetFunction()->NewInstance();
@@ -10441,8 +10904,7 @@ THREADED_TEST(CallAsFunction) {
CHECK(a5->StrictEquals(instance));
}
- { v8::Isolate* isolate = context->GetIsolate();
- CompileRun(
+ { CompileRun(
"function ReturnThisSloppy() {"
" return this;"
"}"
@@ -10498,9 +10960,10 @@ THREADED_TEST(CallAsFunction) {
// Check whether a non-function object is callable.
THREADED_TEST(CallableObject) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
- { Local<ObjectTemplate> instance_template = ObjectTemplate::New();
+ { Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
instance_template->SetCallAsFunctionHandler(call_as_function);
Local<Object> instance = instance_template->NewInstance();
v8::TryCatch try_catch;
@@ -10509,7 +10972,7 @@ THREADED_TEST(CallableObject) {
CHECK(!try_catch.HasCaught());
}
- { Local<ObjectTemplate> instance_template = ObjectTemplate::New();
+ { Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
Local<Object> instance = instance_template->NewInstance();
v8::TryCatch try_catch;
@@ -10518,7 +10981,7 @@ THREADED_TEST(CallableObject) {
}
{ Local<FunctionTemplate> function_template =
- FunctionTemplate::New(call_as_function);
+ FunctionTemplate::New(isolate, call_as_function);
Local<Function> function = function_template->GetFunction();
Local<Object> instance = function;
v8::TryCatch try_catch;
@@ -10527,7 +10990,7 @@ THREADED_TEST(CallableObject) {
CHECK(!try_catch.HasCaught());
}
- { Local<FunctionTemplate> function_template = FunctionTemplate::New();
+ { Local<FunctionTemplate> function_template = FunctionTemplate::New(isolate);
Local<Function> function = function_template->GetFunction();
Local<Object> instance = function;
v8::TryCatch try_catch;
@@ -10538,45 +11001,44 @@ THREADED_TEST(CallableObject) {
}
-static int CountHandles() {
- return v8::HandleScope::NumberOfHandles();
-}
-
-
-static int Recurse(int depth, int iterations) {
- v8::HandleScope scope(CcTest::isolate());
- if (depth == 0) return CountHandles();
+static int Recurse(v8::Isolate* isolate, int depth, int iterations) {
+ v8::HandleScope scope(isolate);
+ if (depth == 0) return v8::HandleScope::NumberOfHandles(isolate);
for (int i = 0; i < iterations; i++) {
- Local<v8::Number> n(v8::Integer::New(42));
+ Local<v8::Number> n(v8::Integer::New(isolate, 42));
}
- return Recurse(depth - 1, iterations);
+ return Recurse(isolate, depth - 1, iterations);
}
THREADED_TEST(HandleIteration) {
static const int kIterations = 500;
static const int kNesting = 200;
- CHECK_EQ(0, CountHandles());
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope0(isolate);
+ CHECK_EQ(0, v8::HandleScope::NumberOfHandles(isolate));
{
- v8::HandleScope scope1(CcTest::isolate());
- CHECK_EQ(0, CountHandles());
+ v8::HandleScope scope1(isolate);
+ CHECK_EQ(0, v8::HandleScope::NumberOfHandles(isolate));
for (int i = 0; i < kIterations; i++) {
- Local<v8::Number> n(v8::Integer::New(42));
- CHECK_EQ(i + 1, CountHandles());
+ Local<v8::Number> n(v8::Integer::New(CcTest::isolate(), 42));
+ CHECK_EQ(i + 1, v8::HandleScope::NumberOfHandles(isolate));
}
- CHECK_EQ(kIterations, CountHandles());
+ CHECK_EQ(kIterations, v8::HandleScope::NumberOfHandles(isolate));
{
v8::HandleScope scope2(CcTest::isolate());
for (int j = 0; j < kIterations; j++) {
- Local<v8::Number> n(v8::Integer::New(42));
- CHECK_EQ(j + 1 + kIterations, CountHandles());
+ Local<v8::Number> n(v8::Integer::New(CcTest::isolate(), 42));
+ CHECK_EQ(j + 1 + kIterations,
+ v8::HandleScope::NumberOfHandles(isolate));
}
}
- CHECK_EQ(kIterations, CountHandles());
+ CHECK_EQ(kIterations, v8::HandleScope::NumberOfHandles(isolate));
}
- CHECK_EQ(0, CountHandles());
- CHECK_EQ(kNesting * kIterations, Recurse(kNesting, kIterations));
+ CHECK_EQ(0, v8::HandleScope::NumberOfHandles(isolate));
+ CHECK_EQ(kNesting * kIterations, Recurse(isolate, kNesting, kIterations));
}
@@ -10589,8 +11051,9 @@ static void InterceptorHasOwnPropertyGetter(
THREADED_TEST(InterceptorHasOwnProperty) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
instance_templ->SetNamedPropertyHandler(InterceptorHasOwnPropertyGetter);
Local<Function> function = fun_templ->GetFunction();
@@ -10620,8 +11083,9 @@ static void InterceptorHasOwnPropertyGetterGC(
THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
instance_templ->SetNamedPropertyHandler(InterceptorHasOwnPropertyGetterGC);
Local<Function> function = fun_templ->GetFunction();
@@ -10655,8 +11119,9 @@ typedef void (*NamedPropertyGetter)(
static void CheckInterceptorLoadIC(NamedPropertyGetter getter,
const char* source,
int expected) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(getter, 0, 0, 0, 0, v8_str("data"));
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -10673,7 +11138,7 @@ static void InterceptorLoadICGetter(
CHECK_EQ(isolate, info.GetIsolate());
CHECK_EQ(v8_str("data"), info.Data());
CHECK_EQ(v8_str("x"), name);
- info.GetReturnValue().Set(v8::Integer::New(42));
+ info.GetReturnValue().Set(v8::Integer::New(isolate, 42));
}
@@ -10698,7 +11163,7 @@ static void InterceptorLoadXICGetter(
ApiTestFuzzer::Fuzz();
info.GetReturnValue().Set(
v8_str("x")->Equals(name) ?
- v8::Handle<v8::Value>(v8::Integer::New(42)) :
+ v8::Handle<v8::Value>(v8::Integer::New(info.GetIsolate(), 42)) :
v8::Handle<v8::Value>());
}
@@ -10868,8 +11333,9 @@ static void SetOnThis(Local<String> name,
THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
templ->SetAccessor(v8_str("y"), Return239Callback);
LocalContext context;
@@ -10897,10 +11363,11 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
- v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
templ_p->SetAccessor(v8_str("y"), Return239Callback);
LocalContext context;
@@ -10930,8 +11397,9 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
THREADED_TEST(InterceptorLoadICForCallbackWithOverride) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
templ->SetAccessor(v8_str("y"), Return239Callback);
@@ -10958,10 +11426,11 @@ THREADED_TEST(InterceptorLoadICForCallbackWithOverride) {
// Test the case when we stored callback into
// a stub, but interceptor produced value on its own.
THREADED_TEST(InterceptorLoadICCallbackNotNeeded) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
- v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
templ_p->SetAccessor(v8_str("y"), Return239Callback);
LocalContext context;
@@ -10986,10 +11455,11 @@ THREADED_TEST(InterceptorLoadICCallbackNotNeeded) {
// Test the case when we stored callback into
// a stub, but it got invalidated later on.
THREADED_TEST(InterceptorLoadICInvalidatedCallback) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
- v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
templ_p->SetAccessor(v8_str("y"), Return239Callback, SetOnThis);
LocalContext context;
@@ -11018,10 +11488,11 @@ THREADED_TEST(InterceptorLoadICInvalidatedCallback) {
// a stub, but it got invalidated later on due to override on
// global object which is between interceptor and callbacks' holders.
THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
- v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
templ_p->SetAccessor(v8_str("y"), Return239Callback, SetOnThis);
LocalContext context;
@@ -11050,7 +11521,7 @@ static void InterceptorLoadICGetter0(
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
CHECK(v8_str("x")->Equals(name));
- info.GetReturnValue().Set(v8::Integer::New(0));
+ info.GetReturnValue().Set(v8::Integer::New(info.GetIsolate(), 0));
}
@@ -11073,8 +11544,9 @@ static void InterceptorStoreICSetter(
// This test should hit the store IC for the interceptor case.
THREADED_TEST(InterceptorStoreIC) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(InterceptorLoadICGetter,
InterceptorStoreICSetter,
0, 0, 0, v8_str("data"));
@@ -11088,8 +11560,9 @@ THREADED_TEST(InterceptorStoreIC) {
THREADED_TEST(InterceptorStoreICWithNoSetter) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -11119,8 +11592,9 @@ static void InterceptorCallICGetter(
// This test should hit the call IC for the interceptor case.
THREADED_TEST(InterceptorCallIC) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(InterceptorCallICGetter);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -11138,8 +11612,9 @@ THREADED_TEST(InterceptorCallIC) {
// This test checks that if interceptor doesn't provide
// a value, we can fetch regular value.
THREADED_TEST(InterceptorCallICSeesOthers) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -11167,8 +11642,9 @@ static void InterceptorCallICGetter4(
// even if we cached shadowed variant, interceptor's function
// is invoked
THREADED_TEST(InterceptorCallICCacheableNotNeeded) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(InterceptorCallICGetter4);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -11187,8 +11663,9 @@ THREADED_TEST(InterceptorCallICCacheableNotNeeded) {
// Test the case when we stored cacheable lookup into
// a stub, but it got invalidated later on
THREADED_TEST(InterceptorCallICInvalidatedCacheable) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -11214,8 +11691,9 @@ THREADED_TEST(InterceptorCallICInvalidatedCacheable) {
// This test checks that if interceptor doesn't provide a function,
// cached constant function is used
THREADED_TEST(InterceptorCallICConstantFunctionUsed) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -11245,8 +11723,9 @@ static void InterceptorCallICGetter5(
// even if we cached constant function, interceptor's function
// is invoked
THREADED_TEST(InterceptorCallICConstantFunctionNotNeeded) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(InterceptorCallICGetter5);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -11278,8 +11757,9 @@ static void InterceptorCallICGetter6(
// to test the optimized compiler.
THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(InterceptorCallICGetter6);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -11308,8 +11788,9 @@ THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
// Test the case when we stored constant function into
// a stub, but it got invalidated later on
THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -11338,8 +11819,9 @@ THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
// a stub, but it got invalidated later on due to override on
// global object which is between interceptor and constant function' holders.
THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -11363,8 +11845,9 @@ THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
// Test the case when actual function to call sits on global object.
THREADED_TEST(InterceptorCallICCachedFromGlobal) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
@@ -11451,10 +11934,13 @@ void DirectApiCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
THREADED_TEST(CallICFastApi_DirectCall_GCMoveStub) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::ObjectTemplate> nativeobject_templ = v8::ObjectTemplate::New();
- nativeobject_templ->Set("callback",
- v8::FunctionTemplate::New(DirectApiCallback));
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> nativeobject_templ =
+ v8::ObjectTemplate::New(isolate);
+ nativeobject_templ->Set(isolate, "callback",
+ v8::FunctionTemplate::New(isolate,
+ DirectApiCallback));
v8::Local<v8::Object> nativeobject_obj = nativeobject_templ->NewInstance();
context->Global()->Set(v8_str("nativeobject"), nativeobject_obj);
// call the api function multiple times to ensure direct call stub creation.
@@ -11476,10 +11962,13 @@ void ThrowingDirectApiCallback(
THREADED_TEST(CallICFastApi_DirectCall_Throw) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::ObjectTemplate> nativeobject_templ = v8::ObjectTemplate::New();
- nativeobject_templ->Set("callback",
- v8::FunctionTemplate::New(ThrowingDirectApiCallback));
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> nativeobject_templ =
+ v8::ObjectTemplate::New(isolate);
+ nativeobject_templ->Set(isolate, "callback",
+ v8::FunctionTemplate::New(isolate,
+ ThrowingDirectApiCallback));
v8::Local<v8::Object> nativeobject_obj = nativeobject_templ->NewInstance();
context->Global()->Set(v8_str("nativeobject"), nativeobject_obj);
// call the api function multiple times to ensure direct call stub creation.
@@ -11514,8 +12003,9 @@ static void DirectGetterCallback(
template<typename Accessor>
static void LoadICFastApi_DirectCall_GCMoveStub(Accessor accessor) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::ObjectTemplate> obj = v8::ObjectTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = v8::ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("p1"), accessor);
context->Global()->Set(v8_str("o1"), obj->NewInstance());
p_getter_count = 0;
@@ -11544,8 +12034,9 @@ void ThrowingDirectGetterCallback(
THREADED_TEST(LoadICFastApi_DirectCall_Throw) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::ObjectTemplate> obj = v8::ObjectTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = v8::ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("p1"), ThrowingDirectGetterCallback);
context->Global()->Set(v8_str("o1"), obj->NewInstance());
v8::Handle<Value> result = CompileRun(
@@ -11560,18 +12051,21 @@ THREADED_TEST(LoadICFastApi_DirectCall_Throw) {
THREADED_PROFILED_TEST(InterceptorCallICFastApi_TrivialSignature) {
int interceptor_call_count = 0;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate);
v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(FastApiCallback_TrivialSignature,
+ v8::FunctionTemplate::New(isolate,
+ FastApiCallback_TrivialSignature,
v8_str("method_data"),
v8::Handle<v8::Signature>());
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
- templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
- NULL, NULL, NULL, NULL,
- v8::External::New(&interceptor_call_count));
+ templ->SetNamedPropertyHandler(
+ InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
+ v8::External::New(isolate, &interceptor_call_count));
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
@@ -11588,19 +12082,20 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_TrivialSignature) {
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature) {
int interceptor_call_count = 0;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
- v8_str("method_data"),
- v8::Signature::New(fun_templ));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
+ v8::Signature::New(isolate, fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
- templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
- NULL, NULL, NULL, NULL,
- v8::External::New(&interceptor_call_count));
+ templ->SetNamedPropertyHandler(
+ InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
+ v8::External::New(isolate, &interceptor_call_count));
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
@@ -11620,19 +12115,20 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature) {
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
int interceptor_call_count = 0;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
- v8_str("method_data"),
- v8::Signature::New(fun_templ));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
+ v8::Signature::New(isolate, fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
- templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
- NULL, NULL, NULL, NULL,
- v8::External::New(&interceptor_call_count));
+ templ->SetNamedPropertyHandler(
+ InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
+ v8::External::New(isolate, &interceptor_call_count));
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
@@ -11658,19 +12154,20 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
int interceptor_call_count = 0;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
- v8_str("method_data"),
- v8::Signature::New(fun_templ));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
+ v8::Signature::New(isolate, fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
- templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
- NULL, NULL, NULL, NULL,
- v8::External::New(&interceptor_call_count));
+ templ->SetNamedPropertyHandler(
+ InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
+ v8::External::New(isolate, &interceptor_call_count));
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
@@ -11696,19 +12193,20 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
int interceptor_call_count = 0;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
- v8_str("method_data"),
- v8::Signature::New(fun_templ));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
+ v8::Signature::New(isolate, fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
- templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
- NULL, NULL, NULL, NULL,
- v8::External::New(&interceptor_call_count));
+ templ->SetNamedPropertyHandler(
+ InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
+ v8::External::New(isolate, &interceptor_call_count));
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
@@ -11728,7 +12226,8 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
" }"
"}");
CHECK(try_catch.HasCaught());
- CHECK_EQ(v8_str("TypeError: Object 333 has no method 'method'"),
+ // TODO(verwaest): Adjust message.
+ CHECK_EQ(v8_str("TypeError: undefined is not a function"),
try_catch.Exception()->ToString());
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
CHECK_GE(interceptor_call_count, 50);
@@ -11737,19 +12236,20 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
int interceptor_call_count = 0;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
- v8_str("method_data"),
- v8::Signature::New(fun_templ));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
+ v8::Signature::New(isolate, fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
- templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
- NULL, NULL, NULL, NULL,
- v8::External::New(&interceptor_call_count));
+ templ->SetNamedPropertyHandler(
+ InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
+ v8::External::New(isolate, &interceptor_call_count));
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
@@ -11777,10 +12277,13 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
THREADED_PROFILED_TEST(CallICFastApi_TrivialSignature) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate);
v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(FastApiCallback_TrivialSignature,
+ v8::FunctionTemplate::New(isolate,
+ FastApiCallback_TrivialSignature,
v8_str("method_data"),
v8::Handle<v8::Signature>());
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
@@ -11802,12 +12305,13 @@ THREADED_PROFILED_TEST(CallICFastApi_TrivialSignature) {
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
- v8_str("method_data"),
- v8::Signature::New(fun_templ));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
+ v8::Signature::New(isolate, fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
@@ -11831,12 +12335,13 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature) {
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss1) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
- v8_str("method_data"),
- v8::Signature::New(fun_templ));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
+ v8::Signature::New(isolate, fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
@@ -11865,12 +12370,13 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss1) {
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss2) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
- v8_str("method_data"),
- v8::Signature::New(fun_templ));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
+ v8::Signature::New(isolate, fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
@@ -11895,19 +12401,21 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss2) {
" }"
"}");
CHECK(try_catch.HasCaught());
- CHECK_EQ(v8_str("TypeError: Object 333 has no method 'method'"),
+ // TODO(verwaest): Adjust message.
+ CHECK_EQ(v8_str("TypeError: undefined is not a function"),
try_catch.Exception()->ToString());
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
}
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_TypeError) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
- v8_str("method_data"),
- v8::Signature::New(fun_templ));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
+ v8::Signature::New(isolate, fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
@@ -11953,8 +12461,9 @@ static void InterceptorKeyedCallICGetter(
// Test the case when we stored cacheable lookup into
// a stub, but the function name changed (to another cacheable function).
THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -11977,8 +12486,9 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
// a stub, but the function name changed (and the new function is present
// both before and after the interceptor in the prototype chain).
THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(InterceptorKeyedCallICGetter);
LocalContext context;
context->Global()->Set(v8_str("proto1"), templ->NewInstance());
@@ -12004,8 +12514,9 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
// Same as InterceptorKeyedCallICKeyChange1 only the cacheable function sit
// on the global object.
THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -12029,8 +12540,9 @@ THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
// Test the case when actual function to call sits on global object.
THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ_o->NewInstance());
@@ -12054,8 +12566,9 @@ THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
// Test the map transition before the interceptor.
THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("proto"), templ_o->NewInstance());
@@ -12076,8 +12589,9 @@ THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
// Test the map transition after the interceptor.
THREADED_TEST(InterceptorKeyedCallICMapChangeAfter) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ_o->NewInstance());
@@ -12112,8 +12626,9 @@ static void InterceptorICRefErrorGetter(
// Once in a while, the interceptor will reply that a property was not
// found in which case we should get a reference error.
THREADED_TEST(InterceptorICReferenceErrors) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(InterceptorICRefErrorGetter);
LocalContext context(0, templ, v8::Handle<Value>());
call_ic_function2 = v8_compile("function h(x) { return x; }; h")->Run();
@@ -12159,8 +12674,9 @@ static void InterceptorICExceptionGetter(
// exception once in a while.
THREADED_TEST(InterceptorICGetterExceptions) {
interceptor_ic_exception_get_count = 0;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(InterceptorICExceptionGetter);
LocalContext context(0, templ, v8::Handle<Value>());
call_ic_function3 = v8_compile("function h(x) { return x; }; h")->Run();
@@ -12203,8 +12719,9 @@ static void InterceptorICExceptionSetter(
// once in a while.
THREADED_TEST(InterceptorICSetterExceptions) {
interceptor_ic_exception_set_count = 0;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(0, InterceptorICExceptionSetter);
LocalContext context(0, templ, v8::Handle<Value>());
v8::Handle<Value> value = CompileRun(
@@ -12221,12 +12738,13 @@ THREADED_TEST(InterceptorICSetterExceptions) {
// Test that we ignore null interceptors.
THREADED_TEST(NullNamedInterceptor) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(
static_cast<v8::NamedPropertyGetterCallback>(0));
LocalContext context;
- templ->Set("x", v8_num(42));
+ templ->Set(CcTest::isolate(), "x", v8_num(42));
v8::Handle<v8::Object> obj = templ->NewInstance();
context->Global()->Set(v8_str("obj"), obj);
v8::Handle<Value> value = CompileRun("obj.x");
@@ -12237,12 +12755,13 @@ THREADED_TEST(NullNamedInterceptor) {
// Test that we ignore null interceptors.
THREADED_TEST(NullIndexedInterceptor) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(
static_cast<v8::IndexedPropertyGetterCallback>(0));
LocalContext context;
- templ->Set("42", v8_num(42));
+ templ->Set(CcTest::isolate(), "42", v8_num(42));
v8::Handle<v8::Object> obj = templ->NewInstance();
context->Global()->Set(v8_str("obj"), obj);
v8::Handle<Value> value = CompileRun("obj[42]");
@@ -12252,8 +12771,9 @@ THREADED_TEST(NullIndexedInterceptor) {
THREADED_TEST(NamedPropertyHandlerGetterAttributes) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->InstanceTemplate()->SetNamedPropertyHandler(InterceptorLoadXICGetter);
LocalContext env;
env->Global()->Set(v8_str("obj"),
@@ -12275,13 +12795,13 @@ THREADED_TEST(VariousGetPropertiesAndThrowingCallbacks) {
LocalContext context;
HandleScope scope(context->GetIsolate());
- Local<FunctionTemplate> templ = FunctionTemplate::New();
+ Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
Local<ObjectTemplate> instance_templ = templ->InstanceTemplate();
instance_templ->SetAccessor(v8_str("f"), ThrowingGetter);
Local<Object> instance = templ->GetFunction()->NewInstance();
- Local<Object> another = Object::New();
+ Local<Object> another = Object::New(context->GetIsolate());
another->SetPrototype(instance);
Local<Object> with_js_getter = CompileRun(
@@ -12365,10 +12885,12 @@ static void WebKitLike(Handle<Message> message, Handle<Value> data) {
THREADED_TEST(ExceptionsDoNotPropagatePastTryCatch) {
LocalContext context;
- HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ HandleScope scope(isolate);
Local<Function> func =
- FunctionTemplate::New(ThrowingCallbackWithTryCatch)->GetFunction();
+ FunctionTemplate::New(isolate,
+ ThrowingCallbackWithTryCatch)->GetFunction();
context->Global()->Set(v8_str("func"), func);
MessageCallback callbacks[] =
@@ -12409,16 +12931,17 @@ static void ChildGetter(Local<String> name,
THREADED_TEST(Overriding) {
i::FLAG_es5_readonly = true;
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
// Parent template.
- Local<v8::FunctionTemplate> parent_templ = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> parent_templ = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> parent_instance_templ =
parent_templ->InstanceTemplate();
parent_instance_templ->SetAccessor(v8_str("f"), ParentGetter);
// Template that inherits from the parent template.
- Local<v8::FunctionTemplate> child_templ = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> child_templ = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> child_instance_templ =
child_templ->InstanceTemplate();
child_templ->Inherit(parent_templ);
@@ -12472,10 +12995,11 @@ static void IsConstructHandler(
THREADED_TEST(IsConstructCall) {
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
// Function template with call handler.
- Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->SetCallHandler(IsConstructHandler);
LocalContext context;
@@ -12489,8 +13013,9 @@ THREADED_TEST(IsConstructCall) {
THREADED_TEST(ObjectProtoToString) {
- v8::HandleScope scope(CcTest::isolate());
- Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->SetClassName(v8_str("MyClass"));
LocalContext context;
@@ -12751,11 +13276,13 @@ static void ThrowInJSNoCatch(const v8::FunctionCallbackInfo<v8::Value>& args) {
// These are locking tests that don't need to be run again
// as part of the locking aggregation tests.
TEST(NestedLockers) {
- v8::Locker locker(CcTest::isolate());
- CHECK(v8::Locker::IsLocked(CcTest::isolate()));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Locker locker(isolate);
+ CHECK(v8::Locker::IsLocked(isolate));
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(ThrowInJS);
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(isolate, ThrowInJS);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("throw_in_js"), fun);
Local<Script> script = v8_compile("(function () {"
@@ -12777,7 +13304,7 @@ TEST(NestedLockersNoTryCatch) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(ThrowInJSNoCatch);
+ v8::FunctionTemplate::New(env->GetIsolate(), ThrowInJSNoCatch);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("throw_in_js"), fun);
Local<Script> script = v8_compile("(function () {"
@@ -12813,7 +13340,7 @@ THREADED_TEST(LockUnlockLock) {
v8::HandleScope scope(CcTest::isolate());
LocalContext env;
Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(UnlockForAMoment);
+ v8::FunctionTemplate::New(CcTest::isolate(), UnlockForAMoment);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("unlock_for_a_moment"), fun);
Local<Script> script = v8_compile("(function () {"
@@ -12827,7 +13354,7 @@ THREADED_TEST(LockUnlockLock) {
v8::HandleScope scope(CcTest::isolate());
LocalContext env;
Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(UnlockForAMoment);
+ v8::FunctionTemplate::New(CcTest::isolate(), UnlockForAMoment);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("unlock_for_a_moment"), fun);
Local<Script> script = v8_compile("(function () {"
@@ -12868,6 +13395,7 @@ static void CheckSurvivingGlobalObjectsCount(int expected) {
TEST(DontLeakGlobalObjects) {
// Regression test for issues 1139850 and 1174891.
+ i::FLAG_expose_gc = true;
v8::V8::Initialize();
for (int i = 0; i < 5; i++) {
@@ -12915,7 +13443,7 @@ TEST(CopyablePersistent) {
CopyableObject handle1;
{
v8::HandleScope scope(isolate);
- handle1.Reset(isolate, v8::Object::New());
+ handle1.Reset(isolate, v8::Object::New(isolate));
}
CHECK_EQ(initial_handles + 1, globals->global_handles_count());
CopyableObject handle2;
@@ -12948,8 +13476,8 @@ TEST(WeakCallbackApi) {
int initial_handles = globals->global_handles_count();
{
v8::HandleScope scope(isolate);
- v8::Local<v8::Object> obj = v8::Object::New();
- obj->Set(v8_str("key"), v8::Integer::New(231, isolate));
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+ obj->Set(v8_str("key"), v8::Integer::New(isolate, 231));
v8::Persistent<v8::Object>* handle =
new v8::Persistent<v8::Object>(isolate, obj);
handle->SetWeak<v8::Object, v8::Persistent<v8::Object> >(handle,
@@ -12965,12 +13493,11 @@ TEST(WeakCallbackApi) {
v8::Persistent<v8::Object> some_object;
v8::Persistent<v8::Object> bad_handle;
-void NewPersistentHandleCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* handle,
- void*) {
- v8::HandleScope scope(isolate);
- bad_handle.Reset(isolate, some_object);
- handle->Dispose();
+void NewPersistentHandleCallback(
+ const v8::WeakCallbackData<v8::Object, v8::Persistent<v8::Object> >& data) {
+ v8::HandleScope scope(data.GetIsolate());
+ bad_handle.Reset(data.GetIsolate(), some_object);
+ data.GetParameter()->Reset();
}
@@ -12981,28 +13508,27 @@ THREADED_TEST(NewPersistentHandleFromWeakCallback) {
v8::Persistent<v8::Object> handle1, handle2;
{
v8::HandleScope scope(isolate);
- some_object.Reset(isolate, v8::Object::New());
- handle1.Reset(isolate, v8::Object::New());
- handle2.Reset(isolate, v8::Object::New());
+ some_object.Reset(isolate, v8::Object::New(isolate));
+ handle1.Reset(isolate, v8::Object::New(isolate));
+ handle2.Reset(isolate, v8::Object::New(isolate));
}
// Note: order is implementation dependent alas: currently
// global handle nodes are processed by PostGarbageCollectionProcessing
// in reverse allocation order, so if second allocated handle is deleted,
// weak callback of the first handle would be able to 'reallocate' it.
- handle1.MakeWeak<v8::Value, void>(NULL, NewPersistentHandleCallback);
- handle2.Dispose();
- CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ handle1.SetWeak(&handle1, NewPersistentHandleCallback);
+ handle2.Reset();
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
}
v8::Persistent<v8::Object> to_be_disposed;
-void DisposeAndForceGcCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* handle,
- void*) {
- to_be_disposed.Dispose();
+void DisposeAndForceGcCallback(
+ const v8::WeakCallbackData<v8::Object, v8::Persistent<v8::Object> >& data) {
+ to_be_disposed.Reset();
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
- handle->Dispose();
+ data.GetParameter()->Reset();
}
@@ -13013,26 +13539,25 @@ THREADED_TEST(DoNotUseDeletedNodesInSecondLevelGc) {
v8::Persistent<v8::Object> handle1, handle2;
{
v8::HandleScope scope(isolate);
- handle1.Reset(isolate, v8::Object::New());
- handle2.Reset(isolate, v8::Object::New());
+ handle1.Reset(isolate, v8::Object::New(isolate));
+ handle2.Reset(isolate, v8::Object::New(isolate));
}
- handle1.MakeWeak<v8::Value, void>(NULL, DisposeAndForceGcCallback);
+ handle1.SetWeak(&handle1, DisposeAndForceGcCallback);
to_be_disposed.Reset(isolate, handle2);
- CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
}
-void DisposingCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* handle,
- void*) {
- handle->Dispose();
+void DisposingCallback(
+ const v8::WeakCallbackData<v8::Object, v8::Persistent<v8::Object> >& data) {
+ data.GetParameter()->Reset();
}
-void HandleCreatingCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* handle,
- void*) {
- v8::HandleScope scope(isolate);
- v8::Persistent<v8::Object>(isolate, v8::Object::New());
- handle->Dispose();
+void HandleCreatingCallback(
+ const v8::WeakCallbackData<v8::Object, v8::Persistent<v8::Object> >& data) {
+ v8::HandleScope scope(data.GetIsolate());
+ v8::Persistent<v8::Object>(data.GetIsolate(),
+ v8::Object::New(data.GetIsolate()));
+ data.GetParameter()->Reset();
}
@@ -13043,13 +13568,13 @@ THREADED_TEST(NoGlobalHandlesOrphaningDueToWeakCallback) {
v8::Persistent<v8::Object> handle1, handle2, handle3;
{
v8::HandleScope scope(isolate);
- handle3.Reset(isolate, v8::Object::New());
- handle2.Reset(isolate, v8::Object::New());
- handle1.Reset(isolate, v8::Object::New());
+ handle3.Reset(isolate, v8::Object::New(isolate));
+ handle2.Reset(isolate, v8::Object::New(isolate));
+ handle1.Reset(isolate, v8::Object::New(isolate));
}
- handle2.MakeWeak<v8::Value, void>(NULL, DisposingCallback);
- handle3.MakeWeak<v8::Value, void>(NULL, HandleCreatingCallback);
- CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ handle2.SetWeak(&handle2, DisposingCallback);
+ handle3.SetWeak(&handle3, HandleCreatingCallback);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
}
@@ -13077,10 +13602,10 @@ THREADED_TEST(CheckForCrossContextObjectLiterals) {
static v8::Handle<Value> NestedScope(v8::Local<Context> env) {
- v8::HandleScope inner(env->GetIsolate());
+ v8::EscapableHandleScope inner(env->GetIsolate());
env->Enter();
- v8::Handle<Value> three = v8_num(3);
- v8::Handle<Value> value = inner.Close(three);
+ v8::Local<Value> three = v8_num(3);
+ v8::Local<Value> value = inner.Escape(three);
env->Exit();
return value;
}
@@ -13337,8 +13862,8 @@ void SetFunctionEntryHookTest::RunLoopInNewEnv(v8::Isolate* isolate) {
v8::Local<Context> env = Context::New(isolate);
env->Enter();
- Local<ObjectTemplate> t = ObjectTemplate::New();
- t->Set(v8_str("asdf"), v8::FunctionTemplate::New(RuntimeCallback));
+ Local<ObjectTemplate> t = ObjectTemplate::New(isolate);
+ t->Set(v8_str("asdf"), v8::FunctionTemplate::New(isolate, RuntimeCallback));
env->Global()->Set(v8_str("obj"), t->NewInstance());
const char* script =
@@ -13683,20 +14208,17 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
}
-static int64_t cast(intptr_t x) { return static_cast<int64_t>(x); }
-
-
THREADED_TEST(ExternalAllocatedMemory) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer(isolate);
v8::Local<Context> env(Context::New(isolate));
CHECK(!env.IsEmpty());
- const intptr_t kSize = 1024*1024;
- int64_t baseline = cast(isolate->AdjustAmountOfExternalAllocatedMemory(0));
- CHECK_EQ(baseline + cast(kSize),
- cast(isolate->AdjustAmountOfExternalAllocatedMemory(kSize)));
+ const int64_t kSize = 1024*1024;
+ int64_t baseline = isolate->AdjustAmountOfExternalAllocatedMemory(0);
+ CHECK_EQ(baseline + kSize,
+ isolate->AdjustAmountOfExternalAllocatedMemory(kSize));
CHECK_EQ(baseline,
- cast(isolate->AdjustAmountOfExternalAllocatedMemory(-kSize)));
+ isolate->AdjustAmountOfExternalAllocatedMemory(-kSize));
}
@@ -13709,10 +14231,10 @@ THREADED_TEST(Regress54) {
v8::HandleScope outer(isolate);
static v8::Persistent<v8::ObjectTemplate> templ;
if (templ.IsEmpty()) {
- v8::HandleScope inner(isolate);
- v8::Handle<v8::ObjectTemplate> local = v8::ObjectTemplate::New();
+ v8::EscapableHandleScope inner(isolate);
+ v8::Local<v8::ObjectTemplate> local = v8::ObjectTemplate::New(isolate);
local->SetInternalFieldCount(1);
- templ.Reset(isolate, inner.Close(local));
+ templ.Reset(isolate, inner.Escape(local));
}
v8::Handle<v8::Object> result =
v8::Local<v8::ObjectTemplate>::New(isolate, templ)->NewInstance();
@@ -13726,7 +14248,8 @@ TEST(CatchStackOverflow) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
v8::TryCatch try_catch;
- v8::Handle<v8::Script> script = v8::Script::Compile(v8::String::New(
+ v8::Handle<v8::Script> script = v8::Script::Compile(v8::String::NewFromUtf8(
+ context->GetIsolate(),
"function f() {"
" return f();"
"}"
@@ -13762,7 +14285,8 @@ static void CheckTryCatchSourceInfo(v8::Handle<v8::Script> script,
THREADED_TEST(TryCatchSourceInfo) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::String> source = v8::String::New(
+ v8::Handle<v8::String> source = v8::String::NewFromUtf8(
+ context->GetIsolate(),
"function Foo() {\n"
" return Bar();\n"
"}\n"
@@ -13780,16 +14304,20 @@ THREADED_TEST(TryCatchSourceInfo) {
const char* resource_name;
v8::Handle<v8::Script> script;
resource_name = "test.js";
- script = v8::Script::Compile(source, v8::String::New(resource_name));
+ script = v8::Script::Compile(
+ source, v8::String::NewFromUtf8(context->GetIsolate(), resource_name));
CheckTryCatchSourceInfo(script, resource_name, 0);
resource_name = "test1.js";
- v8::ScriptOrigin origin1(v8::String::New(resource_name));
+ v8::ScriptOrigin origin1(
+ v8::String::NewFromUtf8(context->GetIsolate(), resource_name));
script = v8::Script::Compile(source, &origin1);
CheckTryCatchSourceInfo(script, resource_name, 0);
resource_name = "test2.js";
- v8::ScriptOrigin origin2(v8::String::New(resource_name), v8::Integer::New(7));
+ v8::ScriptOrigin origin2(
+ v8::String::NewFromUtf8(context->GetIsolate(), resource_name),
+ v8::Integer::New(context->GetIsolate(), 7));
script = v8::Script::Compile(source, &origin2);
CheckTryCatchSourceInfo(script, resource_name, 7);
}
@@ -13798,12 +14326,14 @@ THREADED_TEST(TryCatchSourceInfo) {
THREADED_TEST(CompilationCache) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::String> source0 = v8::String::New("1234");
- v8::Handle<v8::String> source1 = v8::String::New("1234");
- v8::Handle<v8::Script> script0 =
- v8::Script::Compile(source0, v8::String::New("test.js"));
- v8::Handle<v8::Script> script1 =
- v8::Script::Compile(source1, v8::String::New("test.js"));
+ v8::Handle<v8::String> source0 =
+ v8::String::NewFromUtf8(context->GetIsolate(), "1234");
+ v8::Handle<v8::String> source1 =
+ v8::String::NewFromUtf8(context->GetIsolate(), "1234");
+ v8::Handle<v8::Script> script0 = v8::Script::Compile(
+ source0, v8::String::NewFromUtf8(context->GetIsolate(), "test.js"));
+ v8::Handle<v8::Script> script1 = v8::Script::Compile(
+ source1, v8::String::NewFromUtf8(context->GetIsolate(), "test.js"));
v8::Handle<v8::Script> script2 =
v8::Script::Compile(source0); // different origin
CHECK_EQ(1234, script0->Run()->Int32Value());
@@ -13821,9 +14351,11 @@ static void FunctionNameCallback(
THREADED_TEST(CallbackFunctionName) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<ObjectTemplate> t = ObjectTemplate::New();
- t->Set(v8_str("asdf"), v8::FunctionTemplate::New(FunctionNameCallback));
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> t = ObjectTemplate::New(isolate);
+ t->Set(v8_str("asdf"),
+ v8::FunctionTemplate::New(isolate, FunctionNameCallback));
context->Global()->Set(v8_str("obj"), t->NewInstance());
v8::Handle<v8::Value> value = CompileRun("obj.asdf.name");
CHECK(value->IsString());
@@ -13835,31 +14367,36 @@ THREADED_TEST(CallbackFunctionName) {
THREADED_TEST(DateAccess) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::Value> date = v8::Date::New(1224744689038.0);
+ v8::Handle<v8::Value> date =
+ v8::Date::New(context->GetIsolate(), 1224744689038.0);
CHECK(date->IsDate());
CHECK_EQ(1224744689038.0, date.As<v8::Date>()->ValueOf());
}
-void CheckProperties(v8::Handle<v8::Value> val, int elmc, const char* elmv[]) {
+void CheckProperties(v8::Isolate* isolate,
+ v8::Handle<v8::Value> val,
+ int elmc,
+ const char* elmv[]) {
v8::Handle<v8::Object> obj = val.As<v8::Object>();
v8::Handle<v8::Array> props = obj->GetPropertyNames();
CHECK_EQ(elmc, props->Length());
for (int i = 0; i < elmc; i++) {
- v8::String::Utf8Value elm(props->Get(v8::Integer::New(i)));
+ v8::String::Utf8Value elm(props->Get(v8::Integer::New(isolate, i)));
CHECK_EQ(elmv[i], *elm);
}
}
-void CheckOwnProperties(v8::Handle<v8::Value> val,
+void CheckOwnProperties(v8::Isolate* isolate,
+ v8::Handle<v8::Value> val,
int elmc,
const char* elmv[]) {
v8::Handle<v8::Object> obj = val.As<v8::Object>();
v8::Handle<v8::Array> props = obj->GetOwnPropertyNames();
CHECK_EQ(elmc, props->Length());
for (int i = 0; i < elmc; i++) {
- v8::String::Utf8Value elm(props->Get(v8::Integer::New(i)));
+ v8::String::Utf8Value elm(props->Get(v8::Integer::New(isolate, i)));
CHECK_EQ(elmv[i], *elm);
}
}
@@ -13867,8 +14404,10 @@ void CheckOwnProperties(v8::Handle<v8::Value> val,
THREADED_TEST(PropertyEnumeration) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::Value> obj = v8::Script::Compile(v8::String::New(
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::Value> obj = v8::Script::Compile(v8::String::NewFromUtf8(
+ context->GetIsolate(),
"var result = [];"
"result[0] = {};"
"result[1] = {a: 1, b: 2};"
@@ -13881,29 +14420,39 @@ THREADED_TEST(PropertyEnumeration) {
CHECK_EQ(4, elms->Length());
int elmc0 = 0;
const char** elmv0 = NULL;
- CheckProperties(elms->Get(v8::Integer::New(0)), elmc0, elmv0);
- CheckOwnProperties(elms->Get(v8::Integer::New(0)), elmc0, elmv0);
+ CheckProperties(
+ isolate, elms->Get(v8::Integer::New(isolate, 0)), elmc0, elmv0);
+ CheckOwnProperties(
+ isolate, elms->Get(v8::Integer::New(isolate, 0)), elmc0, elmv0);
int elmc1 = 2;
const char* elmv1[] = {"a", "b"};
- CheckProperties(elms->Get(v8::Integer::New(1)), elmc1, elmv1);
- CheckOwnProperties(elms->Get(v8::Integer::New(1)), elmc1, elmv1);
+ CheckProperties(
+ isolate, elms->Get(v8::Integer::New(isolate, 1)), elmc1, elmv1);
+ CheckOwnProperties(
+ isolate, elms->Get(v8::Integer::New(isolate, 1)), elmc1, elmv1);
int elmc2 = 3;
const char* elmv2[] = {"0", "1", "2"};
- CheckProperties(elms->Get(v8::Integer::New(2)), elmc2, elmv2);
- CheckOwnProperties(elms->Get(v8::Integer::New(2)), elmc2, elmv2);
+ CheckProperties(
+ isolate, elms->Get(v8::Integer::New(isolate, 2)), elmc2, elmv2);
+ CheckOwnProperties(
+ isolate, elms->Get(v8::Integer::New(isolate, 2)), elmc2, elmv2);
int elmc3 = 4;
const char* elmv3[] = {"w", "z", "x", "y"};
- CheckProperties(elms->Get(v8::Integer::New(3)), elmc3, elmv3);
+ CheckProperties(
+ isolate, elms->Get(v8::Integer::New(isolate, 3)), elmc3, elmv3);
int elmc4 = 2;
const char* elmv4[] = {"w", "z"};
- CheckOwnProperties(elms->Get(v8::Integer::New(3)), elmc4, elmv4);
+ CheckOwnProperties(
+ isolate, elms->Get(v8::Integer::New(isolate, 3)), elmc4, elmv4);
}
THREADED_TEST(PropertyEnumeration2) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::Value> obj = v8::Script::Compile(v8::String::New(
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::Value> obj = v8::Script::Compile(v8::String::NewFromUtf8(
+ context->GetIsolate(),
"var result = [];"
"result[0] = {};"
"result[1] = {a: 1, b: 2};"
@@ -13916,9 +14465,10 @@ THREADED_TEST(PropertyEnumeration2) {
CHECK_EQ(4, elms->Length());
int elmc0 = 0;
const char** elmv0 = NULL;
- CheckProperties(elms->Get(v8::Integer::New(0)), elmc0, elmv0);
+ CheckProperties(isolate,
+ elms->Get(v8::Integer::New(isolate, 0)), elmc0, elmv0);
- v8::Handle<v8::Value> val = elms->Get(v8::Integer::New(0));
+ v8::Handle<v8::Value> val = elms->Get(v8::Integer::New(isolate, 0));
v8::Handle<v8::Array> props = val.As<v8::Object>()->GetPropertyNames();
CHECK_EQ(0, props->Length());
for (uint32_t i = 0; i < props->Length(); i++) {
@@ -13946,7 +14496,7 @@ THREADED_TEST(DisableAccessChecksWhileConfiguring) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessCheckCallbacks(NamedSetAccessBlocker,
IndexedSetAccessBlocker);
templ->Set(v8_str("x"), v8::True(isolate));
@@ -13976,8 +14526,9 @@ static bool IndexedGetAccessBlocker(Local<v8::Object> obj,
THREADED_TEST(AccessChecksReenabledCorrectly) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessCheckCallbacks(NamedGetAccessBlocker,
IndexedGetAccessBlocker);
templ->Set(v8_str("a"), v8_str("a"));
@@ -13992,7 +14543,7 @@ THREADED_TEST(AccessChecksReenabledCorrectly) {
for (char k = '0'; k <= '9'; k++) {
buf[2] = k;
buf[3] = 0;
- templ->Set(v8_str(buf), v8::Number::New(k));
+ templ->Set(v8_str(buf), v8::Number::New(isolate, k));
}
}
}
@@ -14016,7 +14567,8 @@ THREADED_TEST(AccessChecksReenabledCorrectly) {
THREADED_TEST(AccessControlRepeatedContextCreation) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallbacks(NamedSetAccessBlocker,
IndexedSetAccessBlocker);
i::Handle<i::ObjectTemplateInfo> internal_template =
@@ -14037,7 +14589,8 @@ THREADED_TEST(TurnOnAccessCheck) {
// Create an environment with access check to the global object disabled by
// default.
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallbacks(NamedGetAccessBlocker,
IndexedGetAccessBlocker,
v8::Handle<v8::Value>(),
@@ -14081,8 +14634,10 @@ THREADED_TEST(TurnOnAccessCheck) {
}
// Detach the global and turn on access check.
+ Local<Object> hidden_global = Local<Object>::Cast(
+ context->Global()->GetPrototype());
context->DetachGlobal();
- context->Global()->TurnOnAccessCheck();
+ hidden_global->TurnOnAccessCheck();
// Failing access check to property get results in undefined.
CHECK(f1->Call(global, 0, NULL)->IsUndefined());
@@ -14119,7 +14674,8 @@ THREADED_TEST(TurnOnAccessCheckAndRecompile) {
// Create an environment with access check to the global object disabled by
// default. When the registered access checker will block access to properties
// a and h.
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallbacks(NamedGetAccessBlockAandH,
IndexedGetAccessBlocker,
v8::Handle<v8::Value>(),
@@ -14166,8 +14722,10 @@ THREADED_TEST(TurnOnAccessCheckAndRecompile) {
// Detach the global and turn on access check now blocking access to property
// a and function h.
+ Local<Object> hidden_global = Local<Object>::Cast(
+ context->Global()->GetPrototype());
context->DetachGlobal();
- context->Global()->TurnOnAccessCheck();
+ hidden_global->TurnOnAccessCheck();
// Failing access check to property get results in undefined.
CHECK(f1->Call(global, 0, NULL)->IsUndefined());
@@ -14183,11 +14741,11 @@ THREADED_TEST(TurnOnAccessCheckAndRecompile) {
// Now compile the source again. And get the newly compiled functions, except
// for h for which access is blocked.
CompileRun(source);
- f1 = Local<Function>::Cast(context->Global()->Get(v8_str("f1")));
- f2 = Local<Function>::Cast(context->Global()->Get(v8_str("f2")));
- g1 = Local<Function>::Cast(context->Global()->Get(v8_str("g1")));
- g2 = Local<Function>::Cast(context->Global()->Get(v8_str("g2")));
- CHECK(context->Global()->Get(v8_str("h"))->IsUndefined());
+ f1 = Local<Function>::Cast(hidden_global->Get(v8_str("f1")));
+ f2 = Local<Function>::Cast(hidden_global->Get(v8_str("f2")));
+ g1 = Local<Function>::Cast(hidden_global->Get(v8_str("g1")));
+ g2 = Local<Function>::Cast(hidden_global->Get(v8_str("g2")));
+ CHECK(hidden_global->Get(v8_str("h"))->IsUndefined());
// Failing access check to property get results in undefined.
CHECK(f1->Call(global, 0, NULL)->IsUndefined());
@@ -14207,9 +14765,10 @@ TEST(PreCompile) {
// a workaround for now to make this test not fail.
v8::V8::Initialize();
v8::Isolate* isolate = CcTest::isolate();
+ HandleScope handle_scope(isolate);
const char* script = "function foo(a) { return a+1; }";
- v8::ScriptData* sd =
- v8::ScriptData::PreCompile(isolate, script, i::StrLength(script));
+ v8::ScriptData* sd = v8::ScriptData::PreCompile(v8::String::NewFromUtf8(
+ isolate, script, v8::String::kNormalString, i::StrLength(script)));
CHECK_NE(sd->Length(), 0);
CHECK_NE(sd->Data(), NULL);
CHECK(!sd->HasError());
@@ -14220,9 +14779,10 @@ TEST(PreCompile) {
TEST(PreCompileWithError) {
v8::V8::Initialize();
v8::Isolate* isolate = CcTest::isolate();
+ HandleScope handle_scope(isolate);
const char* script = "function foo(a) { return 1 * * 2; }";
- v8::ScriptData* sd =
- v8::ScriptData::PreCompile(isolate, script, i::StrLength(script));
+ v8::ScriptData* sd = v8::ScriptData::PreCompile(v8::String::NewFromUtf8(
+ isolate, script, v8::String::kNormalString, i::StrLength(script)));
CHECK(sd->HasError());
delete sd;
}
@@ -14231,9 +14791,10 @@ TEST(PreCompileWithError) {
TEST(Regress31661) {
v8::V8::Initialize();
v8::Isolate* isolate = CcTest::isolate();
+ HandleScope handle_scope(isolate);
const char* script = " The Definintive Guide";
- v8::ScriptData* sd =
- v8::ScriptData::PreCompile(isolate, script, i::StrLength(script));
+ v8::ScriptData* sd = v8::ScriptData::PreCompile(v8::String::NewFromUtf8(
+ isolate, script, v8::String::kNormalString, i::StrLength(script)));
CHECK(sd->HasError());
delete sd;
}
@@ -14243,9 +14804,10 @@ TEST(Regress31661) {
TEST(PreCompileSerialization) {
v8::V8::Initialize();
v8::Isolate* isolate = CcTest::isolate();
+ HandleScope handle_scope(isolate);
const char* script = "function foo(a) { return a+1; }";
- v8::ScriptData* sd =
- v8::ScriptData::PreCompile(isolate, script, i::StrLength(script));
+ v8::ScriptData* sd = v8::ScriptData::PreCompile(v8::String::NewFromUtf8(
+ isolate, script, v8::String::kNormalString, i::StrLength(script)));
// Serialize.
int serialized_data_length = sd->Length();
@@ -14263,6 +14825,7 @@ TEST(PreCompileSerialization) {
delete sd;
delete deserialized_sd;
+ i::DeleteArray(serialized_data);
}
@@ -14288,8 +14851,8 @@ TEST(PreCompileInvalidPreparseDataError) {
const char* script = "function foo(){ return 5;}\n"
"function bar(){ return 6 + 7;} foo();";
- v8::ScriptData* sd =
- v8::ScriptData::PreCompile(isolate, script, i::StrLength(script));
+ v8::ScriptData* sd = v8::ScriptData::PreCompile(v8::String::NewFromUtf8(
+ isolate, script, v8::String::kNormalString, i::StrLength(script)));
CHECK(!sd->HasError());
// ScriptDataImpl private implementation details
const int kHeaderSize = i::PreparseDataConstants::kHeaderSize;
@@ -14303,7 +14866,7 @@ TEST(PreCompileInvalidPreparseDataError) {
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryEndOffset] = 0;
v8::TryCatch try_catch;
- Local<String> source = String::New(script);
+ Local<String> source = String::NewFromUtf8(isolate, script);
Local<Script> compiled_script = Script::New(source, NULL, sd);
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Message()->Get());
@@ -14311,11 +14874,13 @@ TEST(PreCompileInvalidPreparseDataError) {
*exception_value);
try_catch.Reset();
+ delete sd;
// Overwrite function bar's start position with 200. The function entry
// will not be found when searching for it by position and we should fall
// back on eager compilation.
- sd = v8::ScriptData::PreCompile(isolate, script, i::StrLength(script));
+ sd = v8::ScriptData::PreCompile(v8::String::NewFromUtf8(
+ isolate, script, v8::String::kNormalString, i::StrLength(script)));
sd_data = reinterpret_cast<unsigned*>(const_cast<char*>(sd->Data()));
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryStartOffset] =
200;
@@ -14326,42 +14891,6 @@ TEST(PreCompileInvalidPreparseDataError) {
}
-// Verifies that the Handle<String> and const char* versions of the API produce
-// the same results (at least for one trivial case).
-TEST(PreCompileAPIVariationsAreSame) {
- v8::V8::Initialize();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
-
- const char* cstring = "function foo(a) { return a+1; }";
-
- v8::ScriptData* sd_from_cstring =
- v8::ScriptData::PreCompile(isolate, cstring, i::StrLength(cstring));
-
- TestAsciiResource* resource = new TestAsciiResource(cstring);
- v8::ScriptData* sd_from_external_string = v8::ScriptData::PreCompile(
- v8::String::NewExternal(resource));
-
- v8::ScriptData* sd_from_string = v8::ScriptData::PreCompile(
- v8::String::New(cstring));
-
- CHECK_EQ(sd_from_cstring->Length(), sd_from_external_string->Length());
- CHECK_EQ(0, memcmp(sd_from_cstring->Data(),
- sd_from_external_string->Data(),
- sd_from_cstring->Length()));
-
- CHECK_EQ(sd_from_cstring->Length(), sd_from_string->Length());
- CHECK_EQ(0, memcmp(sd_from_cstring->Data(),
- sd_from_string->Data(),
- sd_from_cstring->Length()));
-
-
- delete sd_from_cstring;
- delete sd_from_external_string;
- delete sd_from_string;
-}
-
-
// This tests that we do not allow dictionary load/call inline caches
// to use functions that have not yet been compiled. The potential
// problem of loading a function that has not yet been compiled can
@@ -14416,120 +14945,11 @@ THREADED_TEST(CrossContextNew) {
}
-class ApplyInterruptTest {
- public:
- ApplyInterruptTest() : block_(0) {}
- ~ApplyInterruptTest() {}
- void RunTest() {
- gc_count_ = 0;
- gc_during_apply_ = 0;
- apply_success_ = false;
- gc_success_ = false;
- GCThread gc_thread(this);
- gc_thread.Start();
- v8::Isolate* isolate = CcTest::isolate();
- v8::Locker::StartPreemption(isolate, 1);
-
- LongRunningApply();
- {
- v8::Unlocker unlock(isolate);
- gc_thread.Join();
- }
- v8::Locker::StopPreemption(isolate);
- CHECK(apply_success_);
- CHECK(gc_success_);
- }
-
- private:
- // Number of garbage collections required.
- static const int kRequiredGCs = 2;
-
- class GCThread : public i::Thread {
- public:
- explicit GCThread(ApplyInterruptTest* test)
- : Thread("GCThread"), test_(test) {}
- virtual void Run() {
- test_->CollectGarbage();
- }
- private:
- ApplyInterruptTest* test_;
- };
-
- void CollectGarbage() {
- block_.Wait();
- while (gc_during_apply_ < kRequiredGCs) {
- {
- v8::Locker lock(CcTest::isolate());
- v8::Isolate::Scope isolate_scope(CcTest::isolate());
- CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
- gc_count_++;
- }
- i::OS::Sleep(1);
- }
- gc_success_ = true;
- }
-
- void LongRunningApply() {
- block_.Signal();
- int rounds = 0;
- while (gc_during_apply_ < kRequiredGCs) {
- int gc_before = gc_count_;
- {
- const char* c_source =
- "function do_very_little(bar) {"
- " this.foo = bar;"
- "}"
- "for (var i = 0; i < 100000; i++) {"
- " do_very_little.apply(this, ['bar']);"
- "}";
- Local<String> source = String::New(c_source);
- Local<Script> script = Script::Compile(source);
- Local<Value> result = script->Run();
- // Check that no exception was thrown.
- CHECK(!result.IsEmpty());
- }
- int gc_after = gc_count_;
- gc_during_apply_ += gc_after - gc_before;
- rounds++;
- }
- apply_success_ = true;
- }
-
- i::Semaphore block_;
- int gc_count_;
- int gc_during_apply_;
- bool apply_success_;
- bool gc_success_;
-};
-
-
-// Test that nothing bad happens if we get a preemption just when we were
-// about to do an apply().
-TEST(ApplyInterruption) {
- v8::Locker lock(CcTest::isolate());
- v8::V8::Initialize();
- v8::HandleScope scope(CcTest::isolate());
- Local<Context> local_env;
- {
- LocalContext env;
- local_env = env.local();
- }
-
- // Local context should still be live.
- CHECK(!local_env.IsEmpty());
- local_env->Enter();
-
- // Should complete without problems.
- ApplyInterruptTest().RunTest();
-
- local_env->Exit();
-}
-
-
// Verify that we can clone an object
TEST(ObjectClone) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
const char* sample =
"var rv = {};" \
@@ -14544,19 +14964,19 @@ TEST(ObjectClone) {
obj->Set(v8_str("gamma"), v8_str("cloneme"));
CHECK_EQ(v8_str("hello"), obj->Get(v8_str("alpha")));
- CHECK_EQ(v8::Integer::New(123), obj->Get(v8_str("beta")));
+ CHECK_EQ(v8::Integer::New(isolate, 123), obj->Get(v8_str("beta")));
CHECK_EQ(v8_str("cloneme"), obj->Get(v8_str("gamma")));
// Clone it.
Local<v8::Object> clone = obj->Clone();
CHECK_EQ(v8_str("hello"), clone->Get(v8_str("alpha")));
- CHECK_EQ(v8::Integer::New(123), clone->Get(v8_str("beta")));
+ CHECK_EQ(v8::Integer::New(isolate, 123), clone->Get(v8_str("beta")));
CHECK_EQ(v8_str("cloneme"), clone->Get(v8_str("gamma")));
// Set a property on the clone, verify each object.
- clone->Set(v8_str("beta"), v8::Integer::New(456));
- CHECK_EQ(v8::Integer::New(123), obj->Get(v8_str("beta")));
- CHECK_EQ(v8::Integer::New(456), clone->Get(v8_str("beta")));
+ clone->Set(v8_str("beta"), v8::Integer::New(isolate, 456));
+ CHECK_EQ(v8::Integer::New(isolate, 123), obj->Get(v8_str("beta")));
+ CHECK_EQ(v8::Integer::New(isolate, 456), clone->Get(v8_str("beta")));
}
@@ -14668,11 +15088,11 @@ THREADED_TEST(MorphCompositeStringTest) {
const char* expected_slice_on_cons =
"ow is the time for all good men to come to the aid of the party"
"Now is the time for all good men to come to the aid of the part";
- CHECK_EQ(String::New(expected_cons),
+ CHECK_EQ(String::NewFromUtf8(env->GetIsolate(), expected_cons),
env->Global()->Get(v8_str("cons")));
- CHECK_EQ(String::New(expected_slice),
+ CHECK_EQ(String::NewFromUtf8(env->GetIsolate(), expected_slice),
env->Global()->Get(v8_str("slice")));
- CHECK_EQ(String::New(expected_slice_on_cons),
+ CHECK_EQ(String::NewFromUtf8(env->GetIsolate(), expected_slice_on_cons),
env->Global()->Get(v8_str("slice_on_cons")));
}
i::DeleteArray(two_byte_string);
@@ -14695,12 +15115,10 @@ TEST(CompileExternalTwoByteSource) {
// Compile the sources as external two byte strings.
for (int i = 0; ascii_sources[i] != NULL; i++) {
uint16_t* two_byte_string = AsciiToTwoByteString(ascii_sources[i]);
- UC16VectorResource uc16_resource(
- i::Vector<const uint16_t>(two_byte_string,
- i::StrLength(ascii_sources[i])));
- v8::Local<v8::String> source = v8::String::NewExternal(&uc16_resource);
+ TestResource* uc16_resource = new TestResource(two_byte_string);
+ v8::Local<v8::String> source =
+ v8::String::NewExternal(context->GetIsolate(), uc16_resource);
v8::Script::Compile(source);
- i::DeleteArray(two_byte_string);
}
}
@@ -14772,8 +15190,8 @@ TEST(RegExpInterruption) {
timeout_thread.Join();
- delete regexp_interruption_data.string_resource;
- regexp_interruption_data.string.Dispose();
+ regexp_interruption_data.string.Reset();
+ i::DeleteArray(uc16_content);
}
#endif // V8_INTERPRETED_REGEXP
@@ -14783,24 +15201,25 @@ TEST(RegExpInterruption) {
// is a read-only property in the prototype chain.
TEST(ReadOnlyPropertyInGlobalProto) {
i::FLAG_es5_readonly = true;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
LocalContext context(0, templ);
v8::Handle<v8::Object> global = context->Global();
v8::Handle<v8::Object> global_proto =
v8::Handle<v8::Object>::Cast(global->Get(v8_str("__proto__")));
- global_proto->Set(v8_str("x"), v8::Integer::New(0), v8::ReadOnly);
- global_proto->Set(v8_str("y"), v8::Integer::New(0), v8::ReadOnly);
+ global_proto->Set(v8_str("x"), v8::Integer::New(isolate, 0), v8::ReadOnly);
+ global_proto->Set(v8_str("y"), v8::Integer::New(isolate, 0), v8::ReadOnly);
// Check without 'eval' or 'with'.
v8::Handle<v8::Value> res =
CompileRun("function f() { x = 42; return x; }; f()");
- CHECK_EQ(v8::Integer::New(0), res);
+ CHECK_EQ(v8::Integer::New(isolate, 0), res);
// Check with 'eval'.
res = CompileRun("function f() { eval('1'); y = 43; return y; }; f()");
- CHECK_EQ(v8::Integer::New(0), res);
+ CHECK_EQ(v8::Integer::New(isolate, 0), res);
// Check with 'with'.
res = CompileRun("function f() { with (this) { y = 44 }; return y; }; f()");
- CHECK_EQ(v8::Integer::New(0), res);
+ CHECK_EQ(v8::Integer::New(isolate, 0), res);
}
static int force_set_set_count = 0;
@@ -14836,22 +15255,25 @@ TEST(ForceSet) {
force_set_set_count = 0;
pass_on_get = false;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
- v8::Handle<v8::String> access_property = v8::String::New("a");
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
+ v8::Handle<v8::String> access_property =
+ v8::String::NewFromUtf8(isolate, "a");
templ->SetAccessor(access_property, ForceSetGetter, ForceSetSetter);
LocalContext context(NULL, templ);
v8::Handle<v8::Object> global = context->Global();
// Ordinary properties
- v8::Handle<v8::String> simple_property = v8::String::New("p");
- global->Set(simple_property, v8::Int32::New(4), v8::ReadOnly);
+ v8::Handle<v8::String> simple_property =
+ v8::String::NewFromUtf8(isolate, "p");
+ global->Set(simple_property, v8::Int32::New(isolate, 4), v8::ReadOnly);
CHECK_EQ(4, global->Get(simple_property)->Int32Value());
// This should fail because the property is read-only
- global->Set(simple_property, v8::Int32::New(5));
+ global->Set(simple_property, v8::Int32::New(isolate, 5));
CHECK_EQ(4, global->Get(simple_property)->Int32Value());
// This should succeed even though the property is read-only
- global->ForceSet(simple_property, v8::Int32::New(6));
+ global->ForceSet(simple_property, v8::Int32::New(isolate, 6));
CHECK_EQ(6, global->Get(simple_property)->Int32Value());
// Accessors
@@ -14860,13 +15282,13 @@ TEST(ForceSet) {
CHECK_EQ(3, global->Get(access_property)->Int32Value());
// CHECK_EQ the property shouldn't override it, just call the setter
// which in this case does nothing.
- global->Set(access_property, v8::Int32::New(7));
+ global->Set(access_property, v8::Int32::New(isolate, 7));
CHECK_EQ(3, global->Get(access_property)->Int32Value());
CHECK_EQ(1, force_set_set_count);
CHECK_EQ(2, force_set_get_count);
// Forcing the property to be set should override the accessor without
// calling it
- global->ForceSet(access_property, v8::Int32::New(8));
+ global->ForceSet(access_property, v8::Int32::New(isolate, 8));
CHECK_EQ(8, global->Get(access_property)->Int32Value());
CHECK_EQ(1, force_set_set_count);
CHECK_EQ(2, force_set_get_count);
@@ -14878,19 +15300,21 @@ TEST(ForceSetWithInterceptor) {
force_set_set_count = 0;
pass_on_get = false;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(ForceSetGetter, ForceSetInterceptSetter);
LocalContext context(NULL, templ);
v8::Handle<v8::Object> global = context->Global();
- v8::Handle<v8::String> some_property = v8::String::New("a");
+ v8::Handle<v8::String> some_property =
+ v8::String::NewFromUtf8(isolate, "a");
CHECK_EQ(0, force_set_set_count);
CHECK_EQ(0, force_set_get_count);
CHECK_EQ(3, global->Get(some_property)->Int32Value());
// Setting the property shouldn't override it, just call the setter
// which in this case does nothing.
- global->Set(some_property, v8::Int32::New(7));
+ global->Set(some_property, v8::Int32::New(isolate, 7));
CHECK_EQ(3, global->Get(some_property)->Int32Value());
CHECK_EQ(1, force_set_set_count);
CHECK_EQ(2, force_set_get_count);
@@ -14903,7 +15327,7 @@ TEST(ForceSetWithInterceptor) {
CHECK_EQ(3, force_set_get_count);
// Forcing the property to be set should cause the value to be
// set locally without calling the interceptor.
- global->ForceSet(some_property, v8::Int32::New(8));
+ global->ForceSet(some_property, v8::Int32::New(isolate, 8));
CHECK_EQ(8, global->Get(some_property)->Int32Value());
CHECK_EQ(1, force_set_set_count);
CHECK_EQ(4, force_set_get_count);
@@ -14914,21 +15338,24 @@ TEST(ForceSetWithInterceptor) {
CHECK_EQ(1, force_set_set_count);
CHECK_EQ(5, force_set_get_count);
// The interceptor should also work for other properties
- CHECK_EQ(3, global->Get(v8::String::New("b"))->Int32Value());
+ CHECK_EQ(3, global->Get(v8::String::NewFromUtf8(isolate, "b"))
+ ->Int32Value());
CHECK_EQ(1, force_set_set_count);
CHECK_EQ(6, force_set_get_count);
}
THREADED_TEST(ForceDelete) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
LocalContext context(NULL, templ);
v8::Handle<v8::Object> global = context->Global();
// Ordinary properties
- v8::Handle<v8::String> simple_property = v8::String::New("p");
- global->Set(simple_property, v8::Int32::New(4), v8::DontDelete);
+ v8::Handle<v8::String> simple_property =
+ v8::String::NewFromUtf8(isolate, "p");
+ global->Set(simple_property, v8::Int32::New(isolate, 4), v8::DontDelete);
CHECK_EQ(4, global->Get(simple_property)->Int32Value());
// This should fail because the property is dont-delete.
CHECK(!global->Delete(simple_property));
@@ -14956,14 +15383,16 @@ THREADED_TEST(ForceDeleteWithInterceptor) {
force_delete_interceptor_count = 0;
pass_on_delete = false;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(0, 0, 0, ForceDeleteDeleter);
LocalContext context(NULL, templ);
v8::Handle<v8::Object> global = context->Global();
- v8::Handle<v8::String> some_property = v8::String::New("a");
- global->Set(some_property, v8::Integer::New(42), v8::DontDelete);
+ v8::Handle<v8::String> some_property =
+ v8::String::NewFromUtf8(isolate, "a");
+ global->Set(some_property, v8::Integer::New(isolate, 42), v8::DontDelete);
// Deleting a property should get intercepted and nothing should
// happen.
@@ -15028,7 +15457,8 @@ TEST(InlinedFunctionAcrossContexts) {
"f();");
CHECK_EQ(42, res->Int32Value());
ctx2->Exit();
- v8::Handle<v8::String> G_property = v8::String::New("G");
+ v8::Handle<v8::String> G_property =
+ v8::String::NewFromUtf8(CcTest::isolate(), "G");
CHECK(ctx1->Global()->ForceDelete(G_property));
ctx2->Enter();
ExpectString(
@@ -15096,7 +15526,7 @@ THREADED_TEST(GetCallingContext) {
// Create an object with a C++ callback in context0.
calling_context0->Enter();
Local<v8::FunctionTemplate> callback_templ =
- v8::FunctionTemplate::New(GetCallingContextCallback);
+ v8::FunctionTemplate::New(isolate, GetCallingContextCallback);
calling_context0->Global()->Set(v8_str("callback"),
callback_templ->GetFunction());
calling_context0->Exit();
@@ -15143,34 +15573,21 @@ THREADED_TEST(InitGlobalVarInProtoChain) {
// See http://code.google.com/p/v8/issues/detail?id=398
THREADED_TEST(ReplaceConstantFunction) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::Object> obj = v8::Object::New();
- v8::Handle<v8::FunctionTemplate> func_templ = v8::FunctionTemplate::New();
- v8::Handle<v8::String> foo_string = v8::String::New("foo");
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::Object> obj = v8::Object::New(isolate);
+ v8::Handle<v8::FunctionTemplate> func_templ =
+ v8::FunctionTemplate::New(isolate);
+ v8::Handle<v8::String> foo_string =
+ v8::String::NewFromUtf8(isolate, "foo");
obj->Set(foo_string, func_templ->GetFunction());
v8::Handle<v8::Object> obj_clone = obj->Clone();
- obj_clone->Set(foo_string, v8::String::New("Hello"));
+ obj_clone->Set(foo_string,
+ v8::String::NewFromUtf8(isolate, "Hello"));
CHECK(!obj->Get(foo_string)->IsUndefined());
}
-// Regression test for http://crbug.com/16276.
-THREADED_TEST(Regress16276) {
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- // Force the IC in f to be a dictionary load IC.
- CompileRun("function f(obj) { return obj.x; }\n"
- "var obj = { x: { foo: 42 }, y: 87 };\n"
- "var x = obj.x;\n"
- "delete obj.y;\n"
- "for (var i = 0; i < 5; i++) f(obj);");
- // Detach the global object to make 'this' refer directly to the
- // global object (not the proxy), and make sure that the dictionary
- // load IC doesn't mess up loading directly from the global object.
- context->DetachGlobal();
- CHECK_EQ(42, CompileRun("f(this).foo")->Int32Value());
-}
-
static void CheckElementValue(i::Isolate* isolate,
int expected,
i::Handle<i::Object> obj,
@@ -15187,10 +15604,10 @@ THREADED_TEST(PixelArray) {
v8::HandleScope scope(context->GetIsolate());
const int kElementCount = 260;
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
- i::Handle<i::ExternalPixelArray> pixels =
- i::Handle<i::ExternalPixelArray>::cast(
+ i::Handle<i::ExternalUint8ClampedArray> pixels =
+ i::Handle<i::ExternalUint8ClampedArray>::cast(
factory->NewExternalArray(kElementCount,
- v8::kExternalPixelArray,
+ v8::kExternalUint8ClampedArray,
pixel_data));
// Force GC to trigger verification.
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -15204,13 +15621,13 @@ THREADED_TEST(PixelArray) {
CHECK_EQ(i % 256, pixel_data[i]);
}
- v8::Handle<v8::Object> obj = v8::Object::New();
+ v8::Handle<v8::Object> obj = v8::Object::New(context->GetIsolate());
i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
// Set the elements to be the pixels.
// jsobj->set_elements(*pixels);
obj->SetIndexedPropertiesToPixelData(pixel_data, kElementCount);
CheckElementValue(isolate, 1, jsobj, 1);
- obj->Set(v8_str("field"), v8::Int32::New(1503));
+ obj->Set(v8_str("field"), v8::Int32::New(CcTest::isolate(), 1503));
context->Global()->Set(v8_str("pixels"), obj);
v8::Handle<v8::Value> result = CompileRun("pixels.field");
CHECK_EQ(1503, result->Int32Value());
@@ -15569,7 +15986,7 @@ THREADED_TEST(PixelArrayInfo) {
v8::HandleScope scope(context->GetIsolate());
for (int size = 0; size < 100; size += 10) {
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(size));
- v8::Handle<v8::Object> obj = v8::Object::New();
+ v8::Handle<v8::Object> obj = v8::Object::New(context->GetIsolate());
obj->SetIndexedPropertiesToPixelData(pixel_data, size);
CHECK(obj->HasIndexedPropertiesInPixelData());
CHECK_EQ(pixel_data, obj->GetIndexedPropertiesPixelData());
@@ -15597,18 +16014,20 @@ static void NotHandledIndexedPropertySetter(
THREADED_TEST(PixelArrayWithInterceptor) {
LocalContext context;
i::Factory* factory = CcTest::i_isolate()->factory();
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
const int kElementCount = 260;
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
- i::Handle<i::ExternalPixelArray> pixels =
- i::Handle<i::ExternalPixelArray>::cast(
+ i::Handle<i::ExternalUint8ClampedArray> pixels =
+ i::Handle<i::ExternalUint8ClampedArray>::cast(
factory->NewExternalArray(kElementCount,
- v8::kExternalPixelArray,
+ v8::kExternalUint8ClampedArray,
pixel_data));
for (int i = 0; i < kElementCount; i++) {
pixels->set(i, i % 256);
}
- v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> templ =
+ v8::ObjectTemplate::New(context->GetIsolate());
templ->SetIndexedPropertyHandler(NotHandledIndexedPropertyGetter,
NotHandledIndexedPropertySetter);
v8::Handle<v8::Object> obj = templ->NewInstance();
@@ -15630,21 +16049,21 @@ THREADED_TEST(PixelArrayWithInterceptor) {
static int ExternalArrayElementSize(v8::ExternalArrayType array_type) {
switch (array_type) {
- case v8::kExternalByteArray:
- case v8::kExternalUnsignedByteArray:
- case v8::kExternalPixelArray:
+ case v8::kExternalInt8Array:
+ case v8::kExternalUint8Array:
+ case v8::kExternalUint8ClampedArray:
return 1;
break;
- case v8::kExternalShortArray:
- case v8::kExternalUnsignedShortArray:
+ case v8::kExternalInt16Array:
+ case v8::kExternalUint16Array:
return 2;
break;
- case v8::kExternalIntArray:
- case v8::kExternalUnsignedIntArray:
- case v8::kExternalFloatArray:
+ case v8::kExternalInt32Array:
+ case v8::kExternalUint32Array:
+ case v8::kExternalFloat32Array:
return 4;
break;
- case v8::kExternalDoubleArray:
+ case v8::kExternalFloat64Array:
return 8;
break;
default:
@@ -15665,21 +16084,14 @@ static void ObjectWithExternalArrayTestHelper(
int64_t low, int64_t high) {
i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
i::Isolate* isolate = jsobj->GetIsolate();
- obj->Set(v8_str("field"), v8::Int32::New(1503));
+ obj->Set(v8_str("field"),
+ v8::Int32::New(reinterpret_cast<v8::Isolate*>(isolate), 1503));
context->Global()->Set(v8_str("ext_array"), obj);
v8::Handle<v8::Value> result = CompileRun("ext_array.field");
CHECK_EQ(1503, result->Int32Value());
result = CompileRun("ext_array[1]");
CHECK_EQ(1, result->Int32Value());
- // Check pass through of assigned smis
- result = CompileRun("var sum = 0;"
- "for (var i = 0; i < 8; i++) {"
- " sum += ext_array[i] = ext_array[i] = -i;"
- "}"
- "sum;");
- CHECK_EQ(-28, result->Int32Value());
-
// Check assigned smis
result = CompileRun("for (var i = 0; i < 8; i++) {"
" ext_array[i] = i;"
@@ -15689,7 +16101,16 @@ static void ObjectWithExternalArrayTestHelper(
" sum += ext_array[i];"
"}"
"sum;");
+
CHECK_EQ(28, result->Int32Value());
+ // Check pass through of assigned smis
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += ext_array[i] = ext_array[i] = -i;"
+ "}"
+ "sum;");
+ CHECK_EQ(-28, result->Int32Value());
+
// Check assigned smis in reverse order
result = CompileRun("for (var i = 8; --i >= 0; ) {"
@@ -15804,8 +16225,8 @@ static void ObjectWithExternalArrayTestHelper(
"}"
"ext_array[7];");
CHECK_EQ(0, result->Int32Value());
- if (array_type == v8::kExternalDoubleArray ||
- array_type == v8::kExternalFloatArray) {
+ if (array_type == v8::kExternalFloat64Array ||
+ array_type == v8::kExternalFloat32Array) {
CHECK_EQ(static_cast<int>(i::OS::nan_value()),
static_cast<int>(
jsobj->GetElement(isolate, 7)->ToObjectChecked()->Number()));
@@ -15822,8 +16243,8 @@ static void ObjectWithExternalArrayTestHelper(
static_cast<int>(
jsobj->GetElement(isolate, 6)->ToObjectChecked()->Number()));
- if (array_type != v8::kExternalFloatArray &&
- array_type != v8::kExternalDoubleArray) {
+ if (array_type != v8::kExternalFloat32Array &&
+ array_type != v8::kExternalFloat64Array) {
// Though the specification doesn't state it, be explicit about
// converting NaNs and +/-Infinity to zero.
result = CompileRun("for (var i = 0; i < 8; i++) {"
@@ -15844,7 +16265,7 @@ static void ObjectWithExternalArrayTestHelper(
"}"
"ext_array[5];");
int expected_value =
- (array_type == v8::kExternalPixelArray) ? 255 : 0;
+ (array_type == v8::kExternalUint8ClampedArray) ? 255 : 0;
CHECK_EQ(expected_value, result->Int32Value());
CheckElementValue(isolate, expected_value, jsobj, 5);
@@ -15869,10 +16290,10 @@ static void ObjectWithExternalArrayTestHelper(
"var source_data = [0.6, 10.6];"
"var expected_results = [1, 11];";
bool is_unsigned =
- (array_type == v8::kExternalUnsignedByteArray ||
- array_type == v8::kExternalUnsignedShortArray ||
- array_type == v8::kExternalUnsignedIntArray);
- bool is_pixel_data = array_type == v8::kExternalPixelArray;
+ (array_type == v8::kExternalUint8Array ||
+ array_type == v8::kExternalUint16Array ||
+ array_type == v8::kExternalUint32Array);
+ bool is_pixel_data = array_type == v8::kExternalUint8ClampedArray;
i::OS::SNPrintF(test_buf,
"%s"
@@ -15957,6 +16378,113 @@ static void ObjectWithExternalArrayTestHelper(
}
+template <class FixedTypedArrayClass,
+ i::ElementsKind elements_kind,
+ class ElementType>
+static void FixedTypedArrayTestHelper(
+ v8::ExternalArrayType array_type,
+ ElementType low,
+ ElementType high) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext context;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+ v8::HandleScope scope(context->GetIsolate());
+ const int kElementCount = 260;
+ i::Handle<FixedTypedArrayClass> fixed_array =
+ i::Handle<FixedTypedArrayClass>::cast(
+ factory->NewFixedTypedArray(kElementCount, array_type));
+ CHECK_EQ(FixedTypedArrayClass::kInstanceType,
+ fixed_array->map()->instance_type());
+ CHECK_EQ(kElementCount, fixed_array->length());
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ for (int i = 0; i < kElementCount; i++) {
+ fixed_array->set(i, static_cast<ElementType>(i));
+ }
+ // Force GC to trigger verification.
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ for (int i = 0; i < kElementCount; i++) {
+ CHECK_EQ(static_cast<int64_t>(static_cast<ElementType>(i)),
+ static_cast<int64_t>(fixed_array->get_scalar(i)));
+ }
+ v8::Handle<v8::Object> obj = v8::Object::New(CcTest::isolate());
+ i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
+ i::Handle<i::Map> fixed_array_map =
+ isolate->factory()->GetElementsTransitionMap(jsobj, elements_kind);
+ jsobj->set_map(*fixed_array_map);
+ jsobj->set_elements(*fixed_array);
+
+ ObjectWithExternalArrayTestHelper<FixedTypedArrayClass, ElementType>(
+ context.local(), obj, kElementCount, array_type,
+ static_cast<int64_t>(low),
+ static_cast<int64_t>(high));
+}
+
+
+THREADED_TEST(FixedUint8Array) {
+ FixedTypedArrayTestHelper<i::FixedUint8Array, i::UINT8_ELEMENTS, uint8_t>(
+ v8::kExternalUint8Array,
+ 0x0, 0xFF);
+}
+
+
+THREADED_TEST(FixedUint8ClampedArray) {
+ FixedTypedArrayTestHelper<i::FixedUint8ClampedArray,
+ i::UINT8_CLAMPED_ELEMENTS, uint8_t>(
+ v8::kExternalUint8ClampedArray,
+ 0x0, 0xFF);
+}
+
+
+THREADED_TEST(FixedInt8Array) {
+ FixedTypedArrayTestHelper<i::FixedInt8Array, i::INT8_ELEMENTS, int8_t>(
+ v8::kExternalInt8Array,
+ -0x80, 0x7F);
+}
+
+
+THREADED_TEST(FixedUint16Array) {
+ FixedTypedArrayTestHelper<i::FixedUint16Array, i::UINT16_ELEMENTS, uint16_t>(
+ v8::kExternalUint16Array,
+ 0x0, 0xFFFF);
+}
+
+
+THREADED_TEST(FixedInt16Array) {
+ FixedTypedArrayTestHelper<i::FixedInt16Array, i::INT16_ELEMENTS, int16_t>(
+ v8::kExternalInt16Array,
+ -0x8000, 0x7FFF);
+}
+
+
+THREADED_TEST(FixedUint32Array) {
+ FixedTypedArrayTestHelper<i::FixedUint32Array, i::UINT32_ELEMENTS, uint32_t>(
+ v8::kExternalUint32Array,
+ 0x0, UINT_MAX);
+}
+
+
+THREADED_TEST(FixedInt32Array) {
+ FixedTypedArrayTestHelper<i::FixedInt32Array, i::INT32_ELEMENTS, int32_t>(
+ v8::kExternalInt32Array,
+ INT_MIN, INT_MAX);
+}
+
+
+THREADED_TEST(FixedFloat32Array) {
+ FixedTypedArrayTestHelper<i::FixedFloat32Array, i::FLOAT32_ELEMENTS, float>(
+ v8::kExternalFloat32Array,
+ -500, 500);
+}
+
+
+THREADED_TEST(FixedFloat64Array) {
+ FixedTypedArrayTestHelper<i::FixedFloat64Array, i::FLOAT64_ELEMENTS, float>(
+ v8::kExternalFloat64Array,
+ -500, 500);
+}
+
+
template <class ExternalArrayClass, class ElementType>
static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
int64_t low,
@@ -15985,7 +16513,7 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(array_data[i]));
}
- v8::Handle<v8::Object> obj = v8::Object::New();
+ v8::Handle<v8::Object> obj = v8::Object::New(context->GetIsolate());
i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
// Set the elements to be the external array.
obj->SetIndexedPropertiesToExternalArrayData(array_data,
@@ -16010,7 +16538,7 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
const int kLargeElementCount = kXSize * kYSize * 4;
ElementType* large_array_data =
static_cast<ElementType*>(malloc(kLargeElementCount * element_size));
- v8::Handle<v8::Object> large_obj = v8::Object::New();
+ v8::Handle<v8::Object> large_obj = v8::Object::New(context->GetIsolate());
// Set the elements to be the external array.
large_obj->SetIndexedPropertiesToExternalArrayData(large_array_data,
array_type,
@@ -16083,9 +16611,10 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
// Property "" set after the external array is associated with the object.
{
- v8::Handle<v8::Object> obj2 = v8::Object::New();
- obj2->Set(v8_str("ee_test_field"), v8::Int32::New(256));
- obj2->Set(v8_str(""), v8::Int32::New(1503));
+ v8::Handle<v8::Object> obj2 = v8::Object::New(context->GetIsolate());
+ obj2->Set(v8_str("ee_test_field"),
+ v8::Int32::New(context->GetIsolate(), 256));
+ obj2->Set(v8_str(""), v8::Int32::New(context->GetIsolate(), 1503));
// Set the elements to be the external array.
obj2->SetIndexedPropertiesToExternalArrayData(array_data,
array_type,
@@ -16097,13 +16626,14 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
// Property "" set after the external array is associated with the object.
{
- v8::Handle<v8::Object> obj2 = v8::Object::New();
- obj2->Set(v8_str("ee_test_field_2"), v8::Int32::New(256));
+ v8::Handle<v8::Object> obj2 = v8::Object::New(context->GetIsolate());
+ obj2->Set(v8_str("ee_test_field_2"),
+ v8::Int32::New(context->GetIsolate(), 256));
// Set the elements to be the external array.
obj2->SetIndexedPropertiesToExternalArrayData(array_data,
array_type,
kElementCount);
- obj2->Set(v8_str(""), v8::Int32::New(1503));
+ obj2->Set(v8_str(""), v8::Int32::New(context->GetIsolate(), 1503));
context->Global()->Set(v8_str("ext_array"), obj2);
result = CompileRun("ext_array['']");
CHECK_EQ(1503, result->Int32Value());
@@ -16111,8 +16641,9 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
// Should reuse the map from previous test.
{
- v8::Handle<v8::Object> obj2 = v8::Object::New();
- obj2->Set(v8_str("ee_test_field_2"), v8::Int32::New(256));
+ v8::Handle<v8::Object> obj2 = v8::Object::New(context->GetIsolate());
+ obj2->Set(v8_str("ee_test_field_2"),
+ v8::Int32::New(context->GetIsolate(), 256));
// Set the elements to be the external array. Should re-use the map
// from previous test.
obj2->SetIndexedPropertiesToExternalArrayData(array_data,
@@ -16125,9 +16656,10 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
// Property "" is a constant function that shouldn't not be interfered with
// when an external array is set.
{
- v8::Handle<v8::Object> obj2 = v8::Object::New();
+ v8::Handle<v8::Object> obj2 = v8::Object::New(context->GetIsolate());
// Start
- obj2->Set(v8_str("ee_test_field3"), v8::Int32::New(256));
+ obj2->Set(v8_str("ee_test_field3"),
+ v8::Int32::New(context->GetIsolate(), 256));
// Add a constant function to an object.
context->Global()->Set(v8_str("ext_array"), obj2);
@@ -16136,8 +16668,9 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
// Add an external array transition to the same map that
// has the constant transition.
- v8::Handle<v8::Object> obj3 = v8::Object::New();
- obj3->Set(v8_str("ee_test_field3"), v8::Int32::New(256));
+ v8::Handle<v8::Object> obj3 = v8::Object::New(context->GetIsolate());
+ obj3->Set(v8_str("ee_test_field3"),
+ v8::Int32::New(context->GetIsolate(), 256));
obj3->SetIndexedPropertiesToExternalArrayData(array_data,
array_type,
kElementCount);
@@ -16148,16 +16681,18 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
// by a constant function.
{
// Add an external array transition.
- v8::Handle<v8::Object> obj3 = v8::Object::New();
- obj3->Set(v8_str("ee_test_field4"), v8::Int32::New(256));
+ v8::Handle<v8::Object> obj3 = v8::Object::New(context->GetIsolate());
+ obj3->Set(v8_str("ee_test_field4"),
+ v8::Int32::New(context->GetIsolate(), 256));
obj3->SetIndexedPropertiesToExternalArrayData(array_data,
array_type,
kElementCount);
// Add a constant function to the same map that just got an external array
// transition.
- v8::Handle<v8::Object> obj2 = v8::Object::New();
- obj2->Set(v8_str("ee_test_field4"), v8::Int32::New(256));
+ v8::Handle<v8::Object> obj2 = v8::Object::New(context->GetIsolate());
+ obj2->Set(v8_str("ee_test_field4"),
+ v8::Int32::New(context->GetIsolate(), 256));
context->Global()->Set(v8_str("ext_array"), obj2);
result = CompileRun("ext_array[''] = function() {return 1503;};"
"ext_array['']();");
@@ -16167,86 +16702,86 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
}
-THREADED_TEST(ExternalByteArray) {
- ExternalArrayTestHelper<i::ExternalByteArray, int8_t>(
- v8::kExternalByteArray,
+THREADED_TEST(ExternalInt8Array) {
+ ExternalArrayTestHelper<i::ExternalInt8Array, int8_t>(
+ v8::kExternalInt8Array,
-128,
127);
}
-THREADED_TEST(ExternalUnsignedByteArray) {
- ExternalArrayTestHelper<i::ExternalUnsignedByteArray, uint8_t>(
- v8::kExternalUnsignedByteArray,
+THREADED_TEST(ExternalUint8Array) {
+ ExternalArrayTestHelper<i::ExternalUint8Array, uint8_t>(
+ v8::kExternalUint8Array,
0,
255);
}
-THREADED_TEST(ExternalPixelArray) {
- ExternalArrayTestHelper<i::ExternalPixelArray, uint8_t>(
- v8::kExternalPixelArray,
+THREADED_TEST(ExternalUint8ClampedArray) {
+ ExternalArrayTestHelper<i::ExternalUint8ClampedArray, uint8_t>(
+ v8::kExternalUint8ClampedArray,
0,
255);
}
-THREADED_TEST(ExternalShortArray) {
- ExternalArrayTestHelper<i::ExternalShortArray, int16_t>(
- v8::kExternalShortArray,
+THREADED_TEST(ExternalInt16Array) {
+ ExternalArrayTestHelper<i::ExternalInt16Array, int16_t>(
+ v8::kExternalInt16Array,
-32768,
32767);
}
-THREADED_TEST(ExternalUnsignedShortArray) {
- ExternalArrayTestHelper<i::ExternalUnsignedShortArray, uint16_t>(
- v8::kExternalUnsignedShortArray,
+THREADED_TEST(ExternalUint16Array) {
+ ExternalArrayTestHelper<i::ExternalUint16Array, uint16_t>(
+ v8::kExternalUint16Array,
0,
65535);
}
-THREADED_TEST(ExternalIntArray) {
- ExternalArrayTestHelper<i::ExternalIntArray, int32_t>(
- v8::kExternalIntArray,
+THREADED_TEST(ExternalInt32Array) {
+ ExternalArrayTestHelper<i::ExternalInt32Array, int32_t>(
+ v8::kExternalInt32Array,
INT_MIN, // -2147483648
INT_MAX); // 2147483647
}
-THREADED_TEST(ExternalUnsignedIntArray) {
- ExternalArrayTestHelper<i::ExternalUnsignedIntArray, uint32_t>(
- v8::kExternalUnsignedIntArray,
+THREADED_TEST(ExternalUint32Array) {
+ ExternalArrayTestHelper<i::ExternalUint32Array, uint32_t>(
+ v8::kExternalUint32Array,
0,
UINT_MAX); // 4294967295
}
-THREADED_TEST(ExternalFloatArray) {
- ExternalArrayTestHelper<i::ExternalFloatArray, float>(
- v8::kExternalFloatArray,
+THREADED_TEST(ExternalFloat32Array) {
+ ExternalArrayTestHelper<i::ExternalFloat32Array, float>(
+ v8::kExternalFloat32Array,
-500,
500);
}
-THREADED_TEST(ExternalDoubleArray) {
- ExternalArrayTestHelper<i::ExternalDoubleArray, double>(
- v8::kExternalDoubleArray,
+THREADED_TEST(ExternalFloat64Array) {
+ ExternalArrayTestHelper<i::ExternalFloat64Array, double>(
+ v8::kExternalFloat64Array,
-500,
500);
}
THREADED_TEST(ExternalArrays) {
- TestExternalByteArray();
- TestExternalUnsignedByteArray();
- TestExternalShortArray();
- TestExternalUnsignedShortArray();
- TestExternalIntArray();
- TestExternalUnsignedIntArray();
- TestExternalFloatArray();
+ TestExternalInt8Array();
+ TestExternalUint8Array();
+ TestExternalInt16Array();
+ TestExternalUint16Array();
+ TestExternalInt32Array();
+ TestExternalUint32Array();
+ TestExternalFloat32Array();
}
@@ -16256,7 +16791,7 @@ void ExternalArrayInfoTestHelper(v8::ExternalArrayType array_type) {
for (int size = 0; size < 100; size += 10) {
int element_size = ExternalArrayElementSize(array_type);
void* external_data = malloc(size * element_size);
- v8::Handle<v8::Object> obj = v8::Object::New();
+ v8::Handle<v8::Object> obj = v8::Object::New(context->GetIsolate());
obj->SetIndexedPropertiesToExternalArrayData(
external_data, array_type, size);
CHECK(obj->HasIndexedPropertiesInExternalArrayData());
@@ -16269,20 +16804,22 @@ void ExternalArrayInfoTestHelper(v8::ExternalArrayType array_type) {
THREADED_TEST(ExternalArrayInfo) {
- ExternalArrayInfoTestHelper(v8::kExternalByteArray);
- ExternalArrayInfoTestHelper(v8::kExternalUnsignedByteArray);
- ExternalArrayInfoTestHelper(v8::kExternalShortArray);
- ExternalArrayInfoTestHelper(v8::kExternalUnsignedShortArray);
- ExternalArrayInfoTestHelper(v8::kExternalIntArray);
- ExternalArrayInfoTestHelper(v8::kExternalUnsignedIntArray);
- ExternalArrayInfoTestHelper(v8::kExternalFloatArray);
- ExternalArrayInfoTestHelper(v8::kExternalDoubleArray);
- ExternalArrayInfoTestHelper(v8::kExternalPixelArray);
-}
-
-
-void ExternalArrayLimitTestHelper(v8::ExternalArrayType array_type, int size) {
- v8::Handle<v8::Object> obj = v8::Object::New();
+ ExternalArrayInfoTestHelper(v8::kExternalInt8Array);
+ ExternalArrayInfoTestHelper(v8::kExternalUint8Array);
+ ExternalArrayInfoTestHelper(v8::kExternalInt16Array);
+ ExternalArrayInfoTestHelper(v8::kExternalUint16Array);
+ ExternalArrayInfoTestHelper(v8::kExternalInt32Array);
+ ExternalArrayInfoTestHelper(v8::kExternalUint32Array);
+ ExternalArrayInfoTestHelper(v8::kExternalFloat32Array);
+ ExternalArrayInfoTestHelper(v8::kExternalFloat64Array);
+ ExternalArrayInfoTestHelper(v8::kExternalUint8ClampedArray);
+}
+
+
+void ExtArrayLimitsHelper(v8::Isolate* isolate,
+ v8::ExternalArrayType array_type,
+ int size) {
+ v8::Handle<v8::Object> obj = v8::Object::New(isolate);
v8::V8::SetFatalErrorHandler(StoringErrorCallback);
last_location = last_message = NULL;
obj->SetIndexedPropertiesToExternalArrayData(NULL, array_type, size);
@@ -16294,25 +16831,26 @@ void ExternalArrayLimitTestHelper(v8::ExternalArrayType array_type, int size) {
TEST(ExternalArrayLimits) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- ExternalArrayLimitTestHelper(v8::kExternalByteArray, 0x40000000);
- ExternalArrayLimitTestHelper(v8::kExternalByteArray, 0xffffffff);
- ExternalArrayLimitTestHelper(v8::kExternalUnsignedByteArray, 0x40000000);
- ExternalArrayLimitTestHelper(v8::kExternalUnsignedByteArray, 0xffffffff);
- ExternalArrayLimitTestHelper(v8::kExternalShortArray, 0x40000000);
- ExternalArrayLimitTestHelper(v8::kExternalShortArray, 0xffffffff);
- ExternalArrayLimitTestHelper(v8::kExternalUnsignedShortArray, 0x40000000);
- ExternalArrayLimitTestHelper(v8::kExternalUnsignedShortArray, 0xffffffff);
- ExternalArrayLimitTestHelper(v8::kExternalIntArray, 0x40000000);
- ExternalArrayLimitTestHelper(v8::kExternalIntArray, 0xffffffff);
- ExternalArrayLimitTestHelper(v8::kExternalUnsignedIntArray, 0x40000000);
- ExternalArrayLimitTestHelper(v8::kExternalUnsignedIntArray, 0xffffffff);
- ExternalArrayLimitTestHelper(v8::kExternalFloatArray, 0x40000000);
- ExternalArrayLimitTestHelper(v8::kExternalFloatArray, 0xffffffff);
- ExternalArrayLimitTestHelper(v8::kExternalDoubleArray, 0x40000000);
- ExternalArrayLimitTestHelper(v8::kExternalDoubleArray, 0xffffffff);
- ExternalArrayLimitTestHelper(v8::kExternalPixelArray, 0x40000000);
- ExternalArrayLimitTestHelper(v8::kExternalPixelArray, 0xffffffff);
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ ExtArrayLimitsHelper(isolate, v8::kExternalInt8Array, 0x40000000);
+ ExtArrayLimitsHelper(isolate, v8::kExternalInt8Array, 0xffffffff);
+ ExtArrayLimitsHelper(isolate, v8::kExternalUint8Array, 0x40000000);
+ ExtArrayLimitsHelper(isolate, v8::kExternalUint8Array, 0xffffffff);
+ ExtArrayLimitsHelper(isolate, v8::kExternalInt16Array, 0x40000000);
+ ExtArrayLimitsHelper(isolate, v8::kExternalInt16Array, 0xffffffff);
+ ExtArrayLimitsHelper(isolate, v8::kExternalUint16Array, 0x40000000);
+ ExtArrayLimitsHelper(isolate, v8::kExternalUint16Array, 0xffffffff);
+ ExtArrayLimitsHelper(isolate, v8::kExternalInt32Array, 0x40000000);
+ ExtArrayLimitsHelper(isolate, v8::kExternalInt32Array, 0xffffffff);
+ ExtArrayLimitsHelper(isolate, v8::kExternalUint32Array, 0x40000000);
+ ExtArrayLimitsHelper(isolate, v8::kExternalUint32Array, 0xffffffff);
+ ExtArrayLimitsHelper(isolate, v8::kExternalFloat32Array, 0x40000000);
+ ExtArrayLimitsHelper(isolate, v8::kExternalFloat32Array, 0xffffffff);
+ ExtArrayLimitsHelper(isolate, v8::kExternalFloat64Array, 0x40000000);
+ ExtArrayLimitsHelper(isolate, v8::kExternalFloat64Array, 0xffffffff);
+ ExtArrayLimitsHelper(isolate, v8::kExternalUint8ClampedArray, 0x40000000);
+ ExtArrayLimitsHelper(isolate, v8::kExternalUint8ClampedArray, 0xffffffff);
}
@@ -16328,8 +16866,9 @@ void TypedArrayTestHelper(v8::ExternalArrayType array_type,
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(
- backing_store.start(), (kElementCount+2)*sizeof(ElementType));
+ Local<v8::ArrayBuffer> ab =
+ v8::ArrayBuffer::New(isolate, backing_store.start(),
+ (kElementCount + 2) * sizeof(ElementType));
Local<TypedArray> ta =
TypedArray::New(ab, 2*sizeof(ElementType), kElementCount);
CheckInternalFieldsAreZero<v8::ArrayBufferView>(ta);
@@ -16350,58 +16889,59 @@ void TypedArrayTestHelper(v8::ExternalArrayType array_type,
THREADED_TEST(Uint8Array) {
- TypedArrayTestHelper<uint8_t, v8::Uint8Array, i::ExternalUnsignedByteArray>(
- v8::kExternalUnsignedByteArray, 0, 0xFF);
+ TypedArrayTestHelper<uint8_t, v8::Uint8Array, i::ExternalUint8Array>(
+ v8::kExternalUint8Array, 0, 0xFF);
}
THREADED_TEST(Int8Array) {
- TypedArrayTestHelper<int8_t, v8::Int8Array, i::ExternalByteArray>(
- v8::kExternalByteArray, -0x80, 0x7F);
+ TypedArrayTestHelper<int8_t, v8::Int8Array, i::ExternalInt8Array>(
+ v8::kExternalInt8Array, -0x80, 0x7F);
}
THREADED_TEST(Uint16Array) {
TypedArrayTestHelper<uint16_t,
v8::Uint16Array,
- i::ExternalUnsignedShortArray>(
- v8::kExternalUnsignedShortArray, 0, 0xFFFF);
+ i::ExternalUint16Array>(
+ v8::kExternalUint16Array, 0, 0xFFFF);
}
THREADED_TEST(Int16Array) {
- TypedArrayTestHelper<int16_t, v8::Int16Array, i::ExternalShortArray>(
- v8::kExternalShortArray, -0x8000, 0x7FFF);
+ TypedArrayTestHelper<int16_t, v8::Int16Array, i::ExternalInt16Array>(
+ v8::kExternalInt16Array, -0x8000, 0x7FFF);
}
THREADED_TEST(Uint32Array) {
- TypedArrayTestHelper<uint32_t, v8::Uint32Array, i::ExternalUnsignedIntArray>(
- v8::kExternalUnsignedIntArray, 0, UINT_MAX);
+ TypedArrayTestHelper<uint32_t, v8::Uint32Array, i::ExternalUint32Array>(
+ v8::kExternalUint32Array, 0, UINT_MAX);
}
THREADED_TEST(Int32Array) {
- TypedArrayTestHelper<int32_t, v8::Int32Array, i::ExternalIntArray>(
- v8::kExternalIntArray, INT_MIN, INT_MAX);
+ TypedArrayTestHelper<int32_t, v8::Int32Array, i::ExternalInt32Array>(
+ v8::kExternalInt32Array, INT_MIN, INT_MAX);
}
THREADED_TEST(Float32Array) {
- TypedArrayTestHelper<float, v8::Float32Array, i::ExternalFloatArray>(
- v8::kExternalFloatArray, -500, 500);
+ TypedArrayTestHelper<float, v8::Float32Array, i::ExternalFloat32Array>(
+ v8::kExternalFloat32Array, -500, 500);
}
THREADED_TEST(Float64Array) {
- TypedArrayTestHelper<double, v8::Float64Array, i::ExternalDoubleArray>(
- v8::kExternalDoubleArray, -500, 500);
+ TypedArrayTestHelper<double, v8::Float64Array, i::ExternalFloat64Array>(
+ v8::kExternalFloat64Array, -500, 500);
}
THREADED_TEST(Uint8ClampedArray) {
- TypedArrayTestHelper<uint8_t, v8::Uint8ClampedArray, i::ExternalPixelArray>(
- v8::kExternalPixelArray, 0, 0xFF);
+ TypedArrayTestHelper<uint8_t,
+ v8::Uint8ClampedArray, i::ExternalUint8ClampedArray>(
+ v8::kExternalUint8ClampedArray, 0, 0xFF);
}
@@ -16414,8 +16954,8 @@ THREADED_TEST(DataView) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(
- backing_store.start(), 2 + kSize);
+ Local<v8::ArrayBuffer> ab =
+ v8::ArrayBuffer::New(isolate, backing_store.start(), 2 + kSize);
Local<v8::DataView> dv =
v8::DataView::New(ab, 2, kSize);
CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
@@ -16458,13 +16998,17 @@ THREADED_TEST(ScriptContextDependence) {
LocalContext c1;
v8::HandleScope scope(c1->GetIsolate());
const char *source = "foo";
- v8::Handle<v8::Script> dep = v8::Script::Compile(v8::String::New(source));
- v8::Handle<v8::Script> indep = v8::Script::New(v8::String::New(source));
- c1->Global()->Set(v8::String::New("foo"), v8::Integer::New(100));
+ v8::Handle<v8::Script> dep =
+ v8::Script::Compile(v8::String::NewFromUtf8(c1->GetIsolate(), source));
+ v8::Handle<v8::Script> indep =
+ v8::Script::New(v8::String::NewFromUtf8(c1->GetIsolate(), source));
+ c1->Global()->Set(v8::String::NewFromUtf8(c1->GetIsolate(), "foo"),
+ v8::Integer::New(c1->GetIsolate(), 100));
CHECK_EQ(dep->Run()->Int32Value(), 100);
CHECK_EQ(indep->Run()->Int32Value(), 100);
LocalContext c2;
- c2->Global()->Set(v8::String::New("foo"), v8::Integer::New(101));
+ c2->Global()->Set(v8::String::NewFromUtf8(c2->GetIsolate(), "foo"),
+ v8::Integer::New(c2->GetIsolate(), 101));
CHECK_EQ(dep->Run()->Int32Value(), 100);
CHECK_EQ(indep->Run()->Int32Value(), 101);
}
@@ -16475,8 +17019,10 @@ THREADED_TEST(StackTrace) {
v8::HandleScope scope(context->GetIsolate());
v8::TryCatch try_catch;
const char *source = "function foo() { FAIL.FAIL; }; foo();";
- v8::Handle<v8::String> src = v8::String::New(source);
- v8::Handle<v8::String> origin = v8::String::New("stack-trace-test");
+ v8::Handle<v8::String> src =
+ v8::String::NewFromUtf8(context->GetIsolate(), source);
+ v8::Handle<v8::String> origin =
+ v8::String::NewFromUtf8(context->GetIsolate(), "stack-trace-test");
v8::Script::New(src, origin)->Run();
CHECK(try_catch.HasCaught());
v8::String::Utf8Value stack(try_catch.StackTrace());
@@ -16516,8 +17062,8 @@ void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
int testGroup = args[0]->Int32Value();
if (testGroup == kOverviewTest) {
- v8::Handle<v8::StackTrace> stackTrace =
- v8::StackTrace::CurrentStackTrace(10, v8::StackTrace::kOverview);
+ v8::Handle<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 10, v8::StackTrace::kOverview);
CHECK_EQ(4, stackTrace->GetFrameCount());
checkStackFrame(origin, "bar", 2, 10, false, false,
stackTrace->GetFrame(0));
@@ -16532,8 +17078,8 @@ void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(stackTrace->AsArray()->IsArray());
} else if (testGroup == kDetailedTest) {
- v8::Handle<v8::StackTrace> stackTrace =
- v8::StackTrace::CurrentStackTrace(10, v8::StackTrace::kDetailed);
+ v8::Handle<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 10, v8::StackTrace::kDetailed);
CHECK_EQ(4, stackTrace->GetFrameCount());
checkStackFrame(origin, "bat", 4, 22, false, false,
stackTrace->GetFrame(0));
@@ -16561,11 +17107,13 @@ void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
// TODO(3074796): Reenable this as a THREADED_TEST once it passes.
// THREADED_TEST(CaptureStackTrace) {
TEST(CaptureStackTrace) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::String> origin = v8::String::New("capture-stack-trace-test");
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::String> origin =
+ v8::String::NewFromUtf8(isolate, "capture-stack-trace-test");
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("AnalyzeStackInNativeCode"),
- v8::FunctionTemplate::New(AnalyzeStackInNativeCode));
+ v8::FunctionTemplate::New(isolate, AnalyzeStackInNativeCode));
LocalContext context(0, templ);
// Test getting OVERVIEW information. Should ignore information that is not
@@ -16579,7 +17127,8 @@ TEST(CaptureStackTrace) {
" bar();\n"
"}\n"
"var x;eval('new foo();');";
- v8::Handle<v8::String> overview_src = v8::String::New(overview_source);
+ v8::Handle<v8::String> overview_src =
+ v8::String::NewFromUtf8(isolate, overview_source);
v8::Handle<Value> overview_result(
v8::Script::New(overview_src, origin)->Run());
CHECK(!overview_result.IsEmpty());
@@ -16594,10 +17143,11 @@ TEST(CaptureStackTrace) {
" bat();\n"
"}\n"
"eval('new baz();');";
- v8::Handle<v8::String> detailed_src = v8::String::New(detailed_source);
+ v8::Handle<v8::String> detailed_src =
+ v8::String::NewFromUtf8(isolate, detailed_source);
// Make the script using a non-zero line and column offset.
- v8::Handle<v8::Integer> line_offset = v8::Integer::New(3);
- v8::Handle<v8::Integer> column_offset = v8::Integer::New(5);
+ v8::Handle<v8::Integer> line_offset = v8::Integer::New(isolate, 3);
+ v8::Handle<v8::Integer> column_offset = v8::Integer::New(isolate, 5);
v8::ScriptOrigin detailed_origin(origin, line_offset, column_offset);
v8::Handle<v8::Script> detailed_script(
v8::Script::New(detailed_src, &detailed_origin));
@@ -16796,8 +17346,8 @@ TEST(RethrowBogusErrorStackTrace) {
void AnalyzeStackOfEvalWithSourceURL(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
- v8::Handle<v8::StackTrace> stackTrace =
- v8::StackTrace::CurrentStackTrace(10, v8::StackTrace::kDetailed);
+ v8::Handle<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 10, v8::StackTrace::kDetailed);
CHECK_EQ(5, stackTrace->GetFrameCount());
v8::Handle<v8::String> url = v8_str("eval_url");
for (int i = 0; i < 3; i++) {
@@ -16810,10 +17360,12 @@ void AnalyzeStackOfEvalWithSourceURL(
TEST(SourceURLInStackTrace) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("AnalyzeStackOfEvalWithSourceURL"),
- v8::FunctionTemplate::New(AnalyzeStackOfEvalWithSourceURL));
+ v8::FunctionTemplate::New(isolate,
+ AnalyzeStackOfEvalWithSourceURL));
LocalContext context(0, templ);
const char *source =
@@ -16842,8 +17394,8 @@ static int scriptIdInStack[2];
void AnalyzeScriptIdInStack(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
- v8::Handle<v8::StackTrace> stackTrace =
- v8::StackTrace::CurrentStackTrace(10, v8::StackTrace::kScriptId);
+ v8::Handle<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 10, v8::StackTrace::kScriptId);
CHECK_EQ(2, stackTrace->GetFrameCount());
for (int i = 0; i < 2; i++) {
scriptIdInStack[i] = stackTrace->GetFrame(i)->GetScriptId();
@@ -16852,18 +17404,21 @@ void AnalyzeScriptIdInStack(
TEST(ScriptIdInStackTrace) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("AnalyzeScriptIdInStack"),
- v8::FunctionTemplate::New(AnalyzeScriptIdInStack));
+ v8::FunctionTemplate::New(isolate, AnalyzeScriptIdInStack));
LocalContext context(0, templ);
- v8::Handle<v8::String> scriptSource = v8::String::New(
+ v8::Handle<v8::String> scriptSource = v8::String::NewFromUtf8(
+ isolate,
"function foo() {\n"
" AnalyzeScriptIdInStack();"
"}\n"
"foo();\n");
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
+ v8::ScriptOrigin origin =
+ v8::ScriptOrigin(v8::String::NewFromUtf8(isolate, "test"));
v8::Local<v8::Script> script(v8::Script::Compile(scriptSource, &origin));
script->Run();
for (int i = 0; i < 2; i++) {
@@ -16876,8 +17431,8 @@ TEST(ScriptIdInStackTrace) {
void AnalyzeStackOfInlineScriptWithSourceURL(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
- v8::Handle<v8::StackTrace> stackTrace =
- v8::StackTrace::CurrentStackTrace(10, v8::StackTrace::kDetailed);
+ v8::Handle<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 10, v8::StackTrace::kDetailed);
CHECK_EQ(4, stackTrace->GetFrameCount());
v8::Handle<v8::String> url = v8_str("url");
for (int i = 0; i < 3; i++) {
@@ -16890,11 +17445,12 @@ void AnalyzeStackOfInlineScriptWithSourceURL(
TEST(InlineScriptWithSourceURLInStackTrace) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("AnalyzeStackOfInlineScriptWithSourceURL"),
v8::FunctionTemplate::New(
- AnalyzeStackOfInlineScriptWithSourceURL));
+ CcTest::isolate(), AnalyzeStackOfInlineScriptWithSourceURL));
LocalContext context(0, templ);
const char *source =
@@ -16921,8 +17477,8 @@ TEST(InlineScriptWithSourceURLInStackTrace) {
void AnalyzeStackOfDynamicScriptWithSourceURL(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
- v8::Handle<v8::StackTrace> stackTrace =
- v8::StackTrace::CurrentStackTrace(10, v8::StackTrace::kDetailed);
+ v8::Handle<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 10, v8::StackTrace::kDetailed);
CHECK_EQ(4, stackTrace->GetFrameCount());
v8::Handle<v8::String> url = v8_str("source_url");
for (int i = 0; i < 3; i++) {
@@ -16935,11 +17491,12 @@ void AnalyzeStackOfDynamicScriptWithSourceURL(
TEST(DynamicWithSourceURLInStackTrace) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("AnalyzeStackOfDynamicScriptWithSourceURL"),
v8::FunctionTemplate::New(
- AnalyzeStackOfDynamicScriptWithSourceURL));
+ CcTest::isolate(), AnalyzeStackOfDynamicScriptWithSourceURL));
LocalContext context(0, templ);
const char *source =
@@ -16963,6 +17520,29 @@ TEST(DynamicWithSourceURLInStackTrace) {
}
+TEST(DynamicWithSourceURLInStackTraceString) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+
+ const char *source =
+ "function outer() {\n"
+ " function foo() {\n"
+ " FAIL.FAIL;\n"
+ " }\n"
+ " foo();\n"
+ "}\n"
+ "outer()\n%s";
+
+ i::ScopedVector<char> code(1024);
+ i::OS::SNPrintF(code, source, "//# sourceURL=source_url");
+ v8::TryCatch try_catch;
+ CompileRunWithOrigin(code.start(), "", 0, 0);
+ CHECK(try_catch.HasCaught());
+ v8::String::Utf8Value stack(try_catch.StackTrace());
+ CHECK(strstr(*stack, "at foo (source_url:3:5)") != NULL);
+}
+
+
static void CreateGarbageInOldSpace() {
i::Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
@@ -17066,6 +17646,14 @@ TEST(Regress2107) {
CHECK_LT(final_size, initial_size + 1);
}
+
+TEST(Regress2333) {
+ LocalContext env;
+ for (int i = 0; i < 3; i++) {
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ }
+}
+
static uint32_t* stack_limit;
static void GetStackLimitCallback(
@@ -17099,13 +17687,13 @@ TEST(SetResourceConstraints) {
// Set stack limit.
v8::ResourceConstraints constraints;
constraints.set_stack_limit(set_limit);
- CHECK(v8::SetResourceConstraints(&constraints));
+ CHECK(v8::SetResourceConstraints(CcTest::isolate(), &constraints));
// Execute a script.
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(GetStackLimitCallback);
+ v8::FunctionTemplate::New(env->GetIsolate(), GetStackLimitCallback);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("get_stack_limit"), fun);
CompileRun("get_stack_limit();");
@@ -17123,13 +17711,13 @@ TEST(SetResourceConstraintsInThread) {
// Set stack limit.
v8::ResourceConstraints constraints;
constraints.set_stack_limit(set_limit);
- CHECK(v8::SetResourceConstraints(&constraints));
+ CHECK(v8::SetResourceConstraints(CcTest::isolate(), &constraints));
// Execute a script.
v8::HandleScope scope(CcTest::isolate());
LocalContext env;
Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(GetStackLimitCallback);
+ v8::FunctionTemplate::New(CcTest::isolate(), GetStackLimitCallback);
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("get_stack_limit"), fun);
CompileRun("get_stack_limit();");
@@ -17191,6 +17779,50 @@ class VisitorImpl : public v8::ExternalResourceVisitor {
};
+TEST(ExternalizeOldSpaceTwoByteCons) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::String> cons =
+ CompileRun("'Romeo Montague ' + 'Juliet Capulet'")->ToString();
+ CHECK(v8::Utils::OpenHandle(*cons)->IsConsString());
+ CcTest::heap()->CollectAllAvailableGarbage();
+ CHECK(CcTest::heap()->old_pointer_space()->Contains(
+ *v8::Utils::OpenHandle(*cons)));
+
+ TestResource* resource = new TestResource(
+ AsciiToTwoByteString("Romeo Montague Juliet Capulet"));
+ cons->MakeExternal(resource);
+
+ CHECK(cons->IsExternal());
+ CHECK_EQ(resource, cons->GetExternalStringResource());
+ String::Encoding encoding;
+ CHECK_EQ(resource, cons->GetExternalStringResourceBase(&encoding));
+ CHECK_EQ(String::TWO_BYTE_ENCODING, encoding);
+}
+
+
+TEST(ExternalizeOldSpaceOneByteCons) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::String> cons =
+ CompileRun("'Romeo Montague ' + 'Juliet Capulet'")->ToString();
+ CHECK(v8::Utils::OpenHandle(*cons)->IsConsString());
+ CcTest::heap()->CollectAllAvailableGarbage();
+ CHECK(CcTest::heap()->old_pointer_space()->Contains(
+ *v8::Utils::OpenHandle(*cons)));
+
+ TestAsciiResource* resource =
+ new TestAsciiResource(i::StrDup("Romeo Montague Juliet Capulet"));
+ cons->MakeExternal(resource);
+
+ CHECK(cons->IsExternalAscii());
+ CHECK_EQ(resource, cons->GetExternalAsciiStringResource());
+ String::Encoding encoding;
+ CHECK_EQ(resource, cons->GetExternalStringResourceBase(&encoding));
+ CHECK_EQ(String::ONE_BYTE_ENCODING, encoding);
+}
+
+
TEST(VisitExternalStrings) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -17198,18 +17830,22 @@ TEST(VisitExternalStrings) {
uint16_t* two_byte_string = AsciiToTwoByteString(string);
TestResource* resource[4];
resource[0] = new TestResource(two_byte_string);
- v8::Local<v8::String> string0 = v8::String::NewExternal(resource[0]);
- resource[1] = new TestResource(two_byte_string);
- v8::Local<v8::String> string1 = v8::String::NewExternal(resource[1]);
+ v8::Local<v8::String> string0 =
+ v8::String::NewExternal(env->GetIsolate(), resource[0]);
+ resource[1] = new TestResource(two_byte_string, NULL, false);
+ v8::Local<v8::String> string1 =
+ v8::String::NewExternal(env->GetIsolate(), resource[1]);
// Externalized symbol.
- resource[2] = new TestResource(two_byte_string);
- v8::Local<v8::String> string2 = v8::String::NewSymbol(string);
+ resource[2] = new TestResource(two_byte_string, NULL, false);
+ v8::Local<v8::String> string2 = v8::String::NewFromUtf8(
+ env->GetIsolate(), string, v8::String::kInternalizedString);
CHECK(string2->MakeExternal(resource[2]));
// Symbolized External.
resource[3] = new TestResource(AsciiToTwoByteString("Some other string"));
- v8::Local<v8::String> string3 = v8::String::NewExternal(resource[3]);
+ v8::Local<v8::String> string3 =
+ v8::String::NewExternal(env->GetIsolate(), resource[3]);
CcTest::heap()->CollectAllAvailableGarbage(); // Tenure string.
// Turn into a symbol.
i::Handle<i::String> string3_i = v8::Utils::OpenHandle(*string3);
@@ -17228,6 +17864,75 @@ TEST(VisitExternalStrings) {
}
+TEST(ExternalStringCollectedAtTearDown) {
+ int destroyed = 0;
+ v8::Isolate* isolate = v8::Isolate::New();
+ { v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ const char* s = "One string to test them all, one string to find them.";
+ TestAsciiResource* inscription =
+ new TestAsciiResource(i::StrDup(s), &destroyed);
+ v8::Local<v8::String> ring = v8::String::NewExternal(isolate, inscription);
+ // Ring is still alive. Orcs are roaming freely across our lands.
+ CHECK_EQ(0, destroyed);
+ USE(ring);
+ }
+
+ isolate->Dispose();
+ // Ring has been destroyed. Free Peoples of Middle-earth Rejoice.
+ CHECK_EQ(1, destroyed);
+}
+
+
+TEST(ExternalInternalizedStringCollectedAtTearDown) {
+ int destroyed = 0;
+ v8::Isolate* isolate = v8::Isolate::New();
+ { v8::Isolate::Scope isolate_scope(isolate);
+ LocalContext env(isolate);
+ v8::HandleScope handle_scope(isolate);
+ CompileRun("var ring = 'One string to test them all';");
+ const char* s = "One string to test them all";
+ TestAsciiResource* inscription =
+ new TestAsciiResource(i::StrDup(s), &destroyed);
+ v8::Local<v8::String> ring = CompileRun("ring")->ToString();
+ CHECK(v8::Utils::OpenHandle(*ring)->IsInternalizedString());
+ ring->MakeExternal(inscription);
+ // Ring is still alive. Orcs are roaming freely across our lands.
+ CHECK_EQ(0, destroyed);
+ USE(ring);
+ }
+
+ isolate->Dispose();
+ // Ring has been destroyed. Free Peoples of Middle-earth Rejoice.
+ CHECK_EQ(1, destroyed);
+}
+
+
+TEST(ExternalInternalizedStringCollectedAtGC) {
+ int destroyed = 0;
+ { LocalContext env;
+ v8::HandleScope handle_scope(env->GetIsolate());
+ CompileRun("var ring = 'One string to test them all';");
+ const char* s = "One string to test them all";
+ TestAsciiResource* inscription =
+ new TestAsciiResource(i::StrDup(s), &destroyed);
+ v8::Local<v8::String> ring = CompileRun("ring")->ToString();
+ CHECK(v8::Utils::OpenHandle(*ring)->IsInternalizedString());
+ ring->MakeExternal(inscription);
+ // Ring is still alive. Orcs are roaming freely across our lands.
+ CHECK_EQ(0, destroyed);
+ USE(ring);
+ }
+
+ // Garbage collector deals swift blows to evil.
+ CcTest::i_isolate()->compilation_cache()->Clear();
+ CcTest::heap()->CollectAllAvailableGarbage();
+
+ // Ring has been destroyed. Free Peoples of Middle-earth Rejoice.
+ CHECK_EQ(1, destroyed);
+}
+
+
static double DoubleFromBits(uint64_t value) {
double target;
i::OS::MemCopy(&target, &value, sizeof(target));
@@ -17247,7 +17952,7 @@ static double DoubleToDateTime(double input) {
if (std::isnan(input) || input < -date_limit || input > date_limit) {
return i::OS::nan_value();
}
- return (input < 0) ? -(floor(-input)) : floor(input);
+ return (input < 0) ? -(std::floor(-input)) : std::floor(input);
}
@@ -17260,7 +17965,8 @@ static double DoubleFromBits(uint32_t high_bits, uint32_t low_bits) {
THREADED_TEST(QuietSignalingNaNs) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::TryCatch try_catch;
// Special double values.
@@ -17304,7 +18010,7 @@ THREADED_TEST(QuietSignalingNaNs) {
double test_value = test_values[i];
// Check that Number::New preserves non-NaNs and quiets SNaNs.
- v8::Handle<v8::Value> number = v8::Number::New(test_value);
+ v8::Handle<v8::Value> number = v8::Number::New(isolate, test_value);
double stored_number = number->NumberValue();
if (!std::isnan(test_value)) {
CHECK_EQ(test_value, stored_number);
@@ -17322,7 +18028,8 @@ THREADED_TEST(QuietSignalingNaNs) {
// Check that Date::New preserves non-NaNs in the date range and
// quiets SNaNs.
- v8::Handle<v8::Value> date = v8::Date::New(test_value);
+ v8::Handle<v8::Value> date =
+ v8::Date::New(isolate, test_value);
double expected_stored_date = DoubleToDateTime(test_value);
double stored_date = date->NumberValue();
if (!std::isnan(expected_stored_date)) {
@@ -17356,11 +18063,12 @@ static void SpaghettiIncident(
// Test that an exception can be propagated down through a spaghetti
// stack using ReThrow.
THREADED_TEST(SpaghettiStackReThrow) {
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
LocalContext context;
context->Global()->Set(
- v8::String::New("s"),
- v8::FunctionTemplate::New(SpaghettiIncident)->GetFunction());
+ v8::String::NewFromUtf8(isolate, "s"),
+ v8::FunctionTemplate::New(isolate, SpaghettiIncident)->GetFunction());
v8::TryCatch try_catch;
CompileRun(
"var i = 0;"
@@ -17400,7 +18108,7 @@ TEST(Regress528) {
v8::Local<Context> context = Context::New(isolate);
context->Enter();
- Local<v8::String> obj = v8::String::New("");
+ Local<v8::String> obj = v8::String::NewFromUtf8(isolate, "");
context->SetEmbedderData(0, obj);
CompileRun(source_simple);
context->Exit();
@@ -17472,14 +18180,15 @@ TEST(Regress528) {
THREADED_TEST(ScriptOrigin) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
- v8::Handle<v8::String> script = v8::String::New(
- "function f() {}\n\nfunction g() {}");
+ v8::ScriptOrigin origin =
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
+ v8::Handle<v8::String> script = v8::String::NewFromUtf8(
+ env->GetIsolate(), "function f() {}\n\nfunction g() {}");
v8::Script::Compile(script, &origin)->Run();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("f")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("g")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
v8::ScriptOrigin script_origin_f = f->GetScriptOrigin();
CHECK_EQ("test", *v8::String::Utf8Value(script_origin_f.ResourceName()));
@@ -17494,12 +18203,14 @@ THREADED_TEST(ScriptOrigin) {
THREADED_TEST(FunctionGetInferredName) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
- v8::Handle<v8::String> script = v8::String::New(
+ v8::ScriptOrigin origin =
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
+ v8::Handle<v8::String> script = v8::String::NewFromUtf8(
+ env->GetIsolate(),
"var foo = { bar : { baz : function() {}}}; var f = foo.bar.baz;");
v8::Script::Compile(script, &origin)->Run();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("f")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
CHECK_EQ("foo.bar.baz", *v8::String::Utf8Value(f->GetInferredName()));
}
@@ -17540,23 +18251,26 @@ THREADED_TEST(FunctionGetDisplayName) {
" arguments.callee.displayName = 'set_in_runtime';"
"}; g();"
;
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
- v8::Script::Compile(v8::String::New(code), &origin)->Run();
- v8::Local<v8::Value> error = env->Global()->Get(v8::String::New("error"));
+ v8::ScriptOrigin origin =
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), code), &origin)
+ ->Run();
+ v8::Local<v8::Value> error =
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "error"));
v8::Local<v8::Function> a = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("a")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "a")));
v8::Local<v8::Function> b = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("b")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "b")));
v8::Local<v8::Function> c = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("c")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "c")));
v8::Local<v8::Function> d = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("d")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "d")));
v8::Local<v8::Function> e = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("e")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "e")));
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("f")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("g")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
CHECK_EQ(false, error->BooleanValue());
CHECK_EQ("display_a", *v8::String::Utf8Value(a->GetDisplayName()));
CHECK_EQ("display_b", *v8::String::Utf8Value(b->GetDisplayName()));
@@ -17571,14 +18285,15 @@ THREADED_TEST(FunctionGetDisplayName) {
THREADED_TEST(ScriptLineNumber) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
- v8::Handle<v8::String> script = v8::String::New(
- "function f() {}\n\nfunction g() {}");
+ v8::ScriptOrigin origin =
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
+ v8::Handle<v8::String> script = v8::String::NewFromUtf8(
+ env->GetIsolate(), "function f() {}\n\nfunction g() {}");
v8::Script::Compile(script, &origin)->Run();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("f")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("g")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
CHECK_EQ(0, f->GetScriptLineNumber());
CHECK_EQ(2, g->GetScriptLineNumber());
}
@@ -17586,16 +18301,19 @@ THREADED_TEST(ScriptLineNumber) {
THREADED_TEST(ScriptColumnNumber) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"),
- v8::Integer::New(3), v8::Integer::New(2));
- v8::Handle<v8::String> script = v8::String::New(
- "function foo() {}\n\n function bar() {}");
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::ScriptOrigin origin =
+ v8::ScriptOrigin(v8::String::NewFromUtf8(isolate, "test"),
+ v8::Integer::New(isolate, 3),
+ v8::Integer::New(isolate, 2));
+ v8::Handle<v8::String> script = v8::String::NewFromUtf8(
+ isolate, "function foo() {}\n\n function bar() {}");
v8::Script::Compile(script, &origin)->Run();
v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("foo")));
+ env->Global()->Get(v8::String::NewFromUtf8(isolate, "foo")));
v8::Local<v8::Function> bar = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("bar")));
+ env->Global()->Get(v8::String::NewFromUtf8(isolate, "bar")));
CHECK_EQ(14, foo->GetScriptColumnNumber());
CHECK_EQ(17, bar->GetScriptColumnNumber());
}
@@ -17603,7 +18321,8 @@ THREADED_TEST(ScriptColumnNumber) {
THREADED_TEST(FunctionIsBuiltin) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::Local<v8::Function> f;
f = v8::Local<v8::Function>::Cast(CompileRun("Math.floor"));
CHECK(f->IsBuiltin());
@@ -17620,19 +18339,49 @@ THREADED_TEST(FunctionIsBuiltin) {
THREADED_TEST(FunctionGetScriptId) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"),
- v8::Integer::New(3), v8::Integer::New(2));
- v8::Handle<v8::String> scriptSource = v8::String::New(
- "function foo() {}\n\n function bar() {}");
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::ScriptOrigin origin =
+ v8::ScriptOrigin(v8::String::NewFromUtf8(isolate, "test"),
+ v8::Integer::New(isolate, 3),
+ v8::Integer::New(isolate, 2));
+ v8::Handle<v8::String> scriptSource = v8::String::NewFromUtf8(
+ isolate, "function foo() {}\n\n function bar() {}");
v8::Local<v8::Script> script(v8::Script::Compile(scriptSource, &origin));
script->Run();
v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("foo")));
+ env->Global()->Get(v8::String::NewFromUtf8(isolate, "foo")));
v8::Local<v8::Function> bar = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("bar")));
- CHECK_EQ(script->Id(), foo->GetScriptId());
- CHECK_EQ(script->Id(), bar->GetScriptId());
+ env->Global()->Get(v8::String::NewFromUtf8(isolate, "bar")));
+ CHECK_EQ(script->GetId(), foo->ScriptId());
+ CHECK_EQ(script->GetId(), bar->ScriptId());
+}
+
+
+THREADED_TEST(FunctionGetBoundFunction) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::NewFromUtf8(
+ env->GetIsolate(), "test"));
+ v8::Handle<v8::String> script = v8::String::NewFromUtf8(
+ env->GetIsolate(),
+ "var a = new Object();\n"
+ "a.x = 1;\n"
+ "function f () { return this.x };\n"
+ "var g = f.bind(a);\n"
+ "var b = g();");
+ v8::Script::Compile(script, &origin)->Run();
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
+ CHECK(g->GetBoundFunction()->IsFunction());
+ Local<v8::Function> original_function = Local<v8::Function>::Cast(
+ g->GetBoundFunction());
+ CHECK_EQ(f->GetName(), original_function->GetName());
+ CHECK_EQ(f->GetScriptLineNumber(), original_function->GetScriptLineNumber());
+ CHECK_EQ(f->GetScriptColumnNumber(),
+ original_function->GetScriptColumnNumber());
}
@@ -17676,8 +18425,9 @@ void FooSetInterceptor(Local<String> name,
TEST(SetterOnConstructorPrototype) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"),
GetterWhichReturns42,
SetterWhichSetsYOnThisTo23);
@@ -17728,8 +18478,9 @@ static void NamedPropertySetterWhichSetsYOnThisTo23(
THREADED_TEST(InterceptorOnConstructorPrototype) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(NamedPropertyGetterWhichReturns42,
NamedPropertySetterWhichSetsYOnThisTo23);
LocalContext context;
@@ -17768,11 +18519,12 @@ TEST(Regress618) {
"C1.prototype = P;";
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::Local<v8::Script> script;
// Use a simple object as prototype.
- v8::Local<v8::Object> prototype = v8::Object::New();
+ v8::Local<v8::Object> prototype = v8::Object::New(isolate);
prototype->Set(v8_str("y"), v8_num(42));
context->Global()->Set(v8_str("P"), prototype);
@@ -17789,7 +18541,7 @@ TEST(Regress618) {
}
// Use an API object with accessors as prototype.
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"),
GetterWhichReturns42,
SetterWhichSetsYOnThisTo23);
@@ -17967,8 +18719,6 @@ THREADED_TEST(AddToJSFunctionResultCache) {
}
-static const int k0CacheSize = 16;
-
THREADED_TEST(FillJSFunctionResultCache) {
i::FLAG_allow_natives_syntax = true;
LocalContext context;
@@ -18155,15 +18905,17 @@ TEST(ContainsOnlyOneByte) {
const int aligned_length = length*sizeof(uintptr_t)/sizeof(uint16_t);
i::SmartArrayPointer<uintptr_t>
aligned_contents(new uintptr_t[aligned_length]);
- uint16_t* string_contents = reinterpret_cast<uint16_t*>(*aligned_contents);
+ uint16_t* string_contents =
+ reinterpret_cast<uint16_t*>(aligned_contents.get());
// Set to contain only one byte.
for (int i = 0; i < length-1; i++) {
string_contents[i] = 0x41;
}
string_contents[length-1] = 0;
// Simple case.
- Handle<String> string;
- string = String::NewExternal(new TestResource(string_contents));
+ Handle<String> string =
+ String::NewExternal(isolate,
+ new TestResource(string_contents, NULL, false));
CHECK(!string->IsOneByte() && string->ContainsOnlyOneByte());
// Counter example.
string = String::NewFromTwoByte(isolate, string_contents);
@@ -18180,7 +18932,9 @@ TEST(ContainsOnlyOneByte) {
balanced = String::Concat(balanced, right);
Handle<String> cons_strings[] = {left, balanced, right};
Handle<String> two_byte =
- String::NewExternal(new TestResource(string_contents));
+ String::NewExternal(isolate,
+ new TestResource(string_contents, NULL, false));
+ USE(two_byte); USE(cons_strings);
for (size_t i = 0; i < ARRAY_SIZE(cons_strings); i++) {
// Base assumptions.
string = cons_strings[i];
@@ -18200,8 +18954,9 @@ TEST(ContainsOnlyOneByte) {
for (int i = 0; i < size; i++) {
int shift = 8 + (i % 7);
string_contents[alignment + i] = 1 << shift;
- string =
- String::NewExternal(new TestResource(string_contents + alignment));
+ string = String::NewExternal(
+ isolate,
+ new TestResource(string_contents + alignment, NULL, false));
CHECK_EQ(size, string->Length());
CHECK(!string->ContainsOnlyOneByte());
string_contents[alignment + i] = 0x41;
@@ -18227,11 +18982,13 @@ TEST(GCInFailedAccessCheckCallback) {
v8::V8::Initialize();
v8::V8::SetFailedAccessCheckCallbackFunction(&FailedAccessCheckCallbackGC);
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
// Create an ObjectTemplate for global objects and install access
// check callbacks that will block access.
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallbacks(NamedGetAccessBlocker,
IndexedGetAccessBlocker,
v8::Handle<v8::Value>(),
@@ -18316,18 +19073,20 @@ TEST(IsolateNewDispose) {
UNINITIALIZED_TEST(DisposeIsolateWhenInUse) {
v8::Isolate* isolate = v8::Isolate::New();
- CHECK(isolate);
- isolate->Enter();
- v8::HandleScope scope(isolate);
- LocalContext context(isolate);
- // Run something in this isolate.
- ExpectTrue("true");
- v8::V8::SetFatalErrorHandler(StoringErrorCallback);
- last_location = last_message = NULL;
- // Still entered, should fail.
+ {
+ v8::Isolate::Scope i_scope(isolate);
+ v8::HandleScope scope(isolate);
+ LocalContext context(isolate);
+ // Run something in this isolate.
+ ExpectTrue("true");
+ v8::V8::SetFatalErrorHandler(StoringErrorCallback);
+ last_location = last_message = NULL;
+ // Still entered, should fail.
+ isolate->Dispose();
+ CHECK_NE(last_location, NULL);
+ CHECK_NE(last_message, NULL);
+ }
isolate->Dispose();
- CHECK_NE(last_location, NULL);
- CHECK_NE(last_message, NULL);
}
@@ -18428,10 +19187,10 @@ TEST(RunTwoIsolatesOnSingleThread) {
{
v8::Isolate::Scope iscope(isolate2);
- context2.Dispose();
+ context2.Reset();
}
- context1.Dispose();
+ context1.Reset();
isolate1->Exit();
v8::V8::SetFatalErrorHandler(StoringErrorCallback);
@@ -18542,6 +19301,7 @@ TEST(IsolateDifferentContexts) {
CHECK(v->IsNumber());
CHECK_EQ(22, static_cast<int>(v->NumberValue()));
}
+ isolate->Dispose();
}
class InitDefaultIsolateThread : public v8::internal::Thread {
@@ -18573,7 +19333,7 @@ class InitDefaultIsolateThread : public v8::internal::Thread {
v8::ResourceConstraints constraints;
constraints.set_max_young_space_size(256 * K);
constraints.set_max_old_space_size(4 * K * K);
- v8::SetResourceConstraints(&constraints);
+ v8::SetResourceConstraints(CcTest::isolate(), &constraints);
break;
}
@@ -18837,7 +19597,7 @@ TEST(PersistentHandleVisitor) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Persistent<v8::Object> object(isolate, v8::Object::New());
+ v8::Persistent<v8::Object> object(isolate, v8::Object::New(isolate));
CHECK_EQ(0, object.WrapperClassId());
object.SetWrapperClassId(42);
CHECK_EQ(42, object.WrapperClassId());
@@ -18846,7 +19606,7 @@ TEST(PersistentHandleVisitor) {
v8::V8::VisitHandlesWithClassIds(&visitor);
CHECK_EQ(1, visitor.counter_);
- object.Dispose();
+ object.Reset();
}
@@ -18854,11 +19614,11 @@ TEST(WrapperClassId) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Persistent<v8::Object> object(isolate, v8::Object::New());
+ v8::Persistent<v8::Object> object(isolate, v8::Object::New(isolate));
CHECK_EQ(0, object.WrapperClassId());
object.SetWrapperClassId(65535);
CHECK_EQ(65535, object.WrapperClassId());
- object.Dispose();
+ object.Reset();
}
@@ -18866,14 +19626,14 @@ TEST(PersistentHandleInNewSpaceVisitor) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Persistent<v8::Object> object1(isolate, v8::Object::New());
+ v8::Persistent<v8::Object> object1(isolate, v8::Object::New(isolate));
CHECK_EQ(0, object1.WrapperClassId());
object1.SetWrapperClassId(42);
CHECK_EQ(42, object1.WrapperClassId());
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
- v8::Persistent<v8::Object> object2(isolate, v8::Object::New());
+ v8::Persistent<v8::Object> object2(isolate, v8::Object::New(isolate));
CHECK_EQ(0, object2.WrapperClassId());
object2.SetWrapperClassId(42);
CHECK_EQ(42, object2.WrapperClassId());
@@ -18882,8 +19642,8 @@ TEST(PersistentHandleInNewSpaceVisitor) {
v8::V8::VisitHandlesForPartialDependence(isolate, &visitor);
CHECK_EQ(1, visitor.counter_);
- object1.Dispose();
- object2.Dispose();
+ object1.Reset();
+ object2.Reset();
}
@@ -18944,7 +19704,7 @@ TEST(RegExp) {
ExpectTrue("re.test('FoobarbaZ')");
// RegExps are objects on which you can set properties.
- re->Set(v8_str("property"), v8::Integer::New(32));
+ re->Set(v8_str("property"), v8::Integer::New(context->GetIsolate(), 32));
v8::Handle<v8::Value> value(CompileRun("re.property"));
CHECK_EQ(32, value->Int32Value());
@@ -18983,7 +19743,7 @@ static void Getter(v8::Local<v8::String> property,
static void Enumerator(const v8::PropertyCallbackInfo<v8::Array>& info) {
- v8::Handle<v8::Array> result = v8::Array::New();
+ v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate());
result->Set(0, v8_str("universalAnswer"));
info.GetReturnValue().Set(result);
}
@@ -18991,10 +19751,11 @@ static void Enumerator(const v8::PropertyCallbackInfo<v8::Array>& info) {
TEST(NamedEnumeratorAndForIn) {
LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(context.local());
- v8::Handle<v8::ObjectTemplate> tmpl = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> tmpl = v8::ObjectTemplate::New(isolate);
tmpl->SetNamedPropertyHandler(Getter, NULL, NULL, NULL, Enumerator);
context->Global()->Set(v8_str("o"), tmpl->NewInstance());
v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun(
@@ -19023,7 +19784,7 @@ TEST(DefinePropertyPostDetach) {
static void InstallContextId(v8::Handle<Context> context, int id) {
Context::Scope scope(context);
CompileRun("Object.prototype").As<Object>()->
- Set(v8_str("context_id"), v8::Integer::New(id));
+ Set(v8_str("context_id"), v8::Integer::New(context->GetIsolate(), id));
}
@@ -19033,21 +19794,22 @@ static void CheckContextId(v8::Handle<Object> object, int expected) {
THREADED_TEST(CreationContext) {
- HandleScope handle_scope(CcTest::isolate());
- Handle<Context> context1 = Context::New(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ HandleScope handle_scope(isolate);
+ Handle<Context> context1 = Context::New(isolate);
InstallContextId(context1, 1);
- Handle<Context> context2 = Context::New(CcTest::isolate());
+ Handle<Context> context2 = Context::New(isolate);
InstallContextId(context2, 2);
- Handle<Context> context3 = Context::New(CcTest::isolate());
+ Handle<Context> context3 = Context::New(isolate);
InstallContextId(context3, 3);
- Local<v8::FunctionTemplate> tmpl = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> tmpl = v8::FunctionTemplate::New(isolate);
Local<Object> object1;
Local<Function> func1;
{
Context::Scope scope(context1);
- object1 = Object::New();
+ object1 = Object::New(isolate);
func1 = tmpl->GetFunction();
}
@@ -19055,7 +19817,7 @@ THREADED_TEST(CreationContext) {
Local<Function> func2;
{
Context::Scope scope(context2);
- object2 = Object::New();
+ object2 = Object::New(isolate);
func2 = tmpl->GetFunction();
}
@@ -19174,7 +19936,8 @@ void HasOwnPropertyAccessorGetter(
TEST(HasOwnProperty) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
{ // Check normal properties and defined getters.
Handle<Value> value = CompileRun(
"function Foo() {"
@@ -19197,7 +19960,7 @@ TEST(HasOwnProperty) {
CHECK(object->HasOwnProperty(v8_str("bla")));
}
{ // Check named getter interceptors.
- Handle<ObjectTemplate> templ = ObjectTemplate::New();
+ Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(HasOwnPropertyNamedPropertyGetter);
Handle<Object> instance = templ->NewInstance();
CHECK(!instance->HasOwnProperty(v8_str("42")));
@@ -19205,7 +19968,7 @@ TEST(HasOwnProperty) {
CHECK(!instance->HasOwnProperty(v8_str("bar")));
}
{ // Check indexed getter interceptors.
- Handle<ObjectTemplate> templ = ObjectTemplate::New();
+ Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(HasOwnPropertyIndexedPropertyGetter);
Handle<Object> instance = templ->NewInstance();
CHECK(instance->HasOwnProperty(v8_str("42")));
@@ -19213,28 +19976,28 @@ TEST(HasOwnProperty) {
CHECK(!instance->HasOwnProperty(v8_str("foo")));
}
{ // Check named query interceptors.
- Handle<ObjectTemplate> templ = ObjectTemplate::New();
+ Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(0, 0, HasOwnPropertyNamedPropertyQuery);
Handle<Object> instance = templ->NewInstance();
CHECK(instance->HasOwnProperty(v8_str("foo")));
CHECK(!instance->HasOwnProperty(v8_str("bar")));
}
{ // Check indexed query interceptors.
- Handle<ObjectTemplate> templ = ObjectTemplate::New();
+ Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(0, 0, HasOwnPropertyIndexedPropertyQuery);
Handle<Object> instance = templ->NewInstance();
CHECK(instance->HasOwnProperty(v8_str("42")));
CHECK(!instance->HasOwnProperty(v8_str("41")));
}
{ // Check callbacks.
- Handle<ObjectTemplate> templ = ObjectTemplate::New();
+ Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("foo"), HasOwnPropertyAccessorGetter);
Handle<Object> instance = templ->NewInstance();
CHECK(instance->HasOwnProperty(v8_str("foo")));
CHECK(!instance->HasOwnProperty(v8_str("bar")));
}
{ // Check that query wins on disagreement.
- Handle<ObjectTemplate> templ = ObjectTemplate::New();
+ Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetNamedPropertyHandler(HasOwnPropertyNamedPropertyGetter,
0,
HasOwnPropertyNamedPropertyQuery2);
@@ -19246,8 +20009,9 @@ TEST(HasOwnProperty) {
TEST(IndexedInterceptorWithStringProto) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetIndexedPropertyHandler(NULL,
NULL,
HasOwnPropertyIndexedPropertyQuery);
@@ -19361,8 +20125,10 @@ static void NonObjectThis(const v8::FunctionCallbackInfo<v8::Value>& args) {
THREADED_TEST(CallAPIFunctionOnNonObject) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Handle<FunctionTemplate> templ = v8::FunctionTemplate::New(NonObjectThis);
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Handle<FunctionTemplate> templ =
+ v8::FunctionTemplate::New(isolate, NonObjectThis);
Handle<Function> function = templ->GetFunction();
context->Global()->Set(v8_str("f"), function);
TryCatch try_catch;
@@ -19372,8 +20138,9 @@ THREADED_TEST(CallAPIFunctionOnNonObject) {
// Regression test for issue 1470.
THREADED_TEST(ReadOnlyIndexedProperties) {
- v8::HandleScope scope(CcTest::isolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
LocalContext context;
Local<v8::Object> obj = templ->NewInstance();
@@ -19441,18 +20208,19 @@ THREADED_TEST(Regress93759) {
HandleScope scope(isolate);
// Template for object with security check.
- Local<ObjectTemplate> no_proto_template = v8::ObjectTemplate::New();
+ Local<ObjectTemplate> no_proto_template = v8::ObjectTemplate::New(isolate);
// We don't do indexing, so any callback can be used for that.
no_proto_template->SetAccessCheckCallbacks(
BlockProtoNamedSecurityTestCallback,
IndexedSecurityTestCallback);
// Templates for objects with hidden prototypes and possibly security check.
- Local<FunctionTemplate> hidden_proto_template = v8::FunctionTemplate::New();
+ Local<FunctionTemplate> hidden_proto_template =
+ v8::FunctionTemplate::New(isolate);
hidden_proto_template->SetHiddenPrototype(true);
Local<FunctionTemplate> protected_hidden_proto_template =
- v8::FunctionTemplate::New();
+ v8::FunctionTemplate::New(isolate);
protected_hidden_proto_template->InstanceTemplate()->SetAccessCheckCallbacks(
BlockProtoNamedSecurityTestCallback,
IndexedSecurityTestCallback);
@@ -19463,7 +20231,7 @@ THREADED_TEST(Regress93759) {
context->Enter();
// Plain object, no security check.
- Local<Object> simple_object = Object::New();
+ Local<Object> simple_object = Object::New(isolate);
// Object with explicit security check.
Local<Object> protected_object =
@@ -19481,21 +20249,21 @@ THREADED_TEST(Regress93759) {
Local<Object> hidden_prototype =
hidden_proto_template->GetFunction()->NewInstance();
Local<Object> object_with_hidden =
- Object::New();
+ Object::New(isolate);
object_with_hidden->SetPrototype(hidden_prototype);
// Hidden prototype with security check on the hidden prototype.
Local<Object> protected_hidden_prototype =
protected_hidden_proto_template->GetFunction()->NewInstance();
Local<Object> object_with_protected_hidden =
- Object::New();
+ Object::New(isolate);
object_with_protected_hidden->SetPrototype(protected_hidden_prototype);
context->Exit();
// Template for object for second context. Values to test are put on it as
// properties.
- Local<ObjectTemplate> global_template = ObjectTemplate::New();
+ Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
global_template->Set(v8_str("simple"), simple_object);
global_template->Set(v8_str("protected"), protected_object);
global_template->Set(v8_str("global"), global_object);
@@ -19528,7 +20296,7 @@ THREADED_TEST(Regress93759) {
THREADED_TEST(Regress125988) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> intercept = FunctionTemplate::New();
+ Handle<FunctionTemplate> intercept = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(intercept, EmptyInterceptorGetter, EmptyInterceptorSetter);
LocalContext env;
env->Global()->Set(v8_str("Intercept"), intercept->GetFunction());
@@ -19645,11 +20413,10 @@ THREADED_TEST(ForeignFunctionReceiver) {
CHECK(i->Equals(CompileRun("'abcbd'.replace(/b/g,func)[1]")));
CHECK(i->Equals(CompileRun("'abcbd'.replace(/b/g,func)[3]")));
- // TODO(1547): Make the following also return "i".
// Calling with environment record as base.
- TestReceiver(o, context->Global(), "func()");
+ TestReceiver(i, foreign_context->Global(), "func()");
// Calling with no base.
- TestReceiver(o, context->Global(), "(1,func)()");
+ TestReceiver(i, foreign_context->Global(), "(1,func)()");
}
@@ -19690,7 +20457,7 @@ TEST(CallCompletedCallback) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::Handle<v8::FunctionTemplate> recursive_runtime =
- v8::FunctionTemplate::New(RecursiveCall);
+ v8::FunctionTemplate::New(env->GetIsolate(), RecursiveCall);
env->Global()->Set(v8_str("recursion"),
recursive_runtime->GetFunction());
// Adding the same callback a second time has no effect.
@@ -19698,8 +20465,8 @@ TEST(CallCompletedCallback) {
v8::V8::AddCallCompletedCallback(CallCompletedCallback1);
v8::V8::AddCallCompletedCallback(CallCompletedCallback2);
i::OS::Print("--- Script (1) ---\n");
- Local<Script> script =
- v8::Script::Compile(v8::String::New("recursion(0)"));
+ Local<Script> script = v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), "recursion(0)"));
script->Run();
CHECK_EQ(3, callback_fired);
@@ -19747,6 +20514,102 @@ TEST(CallCompletedCallbackTwoExceptions) {
}
+static void MicrotaskOne(const v8::FunctionCallbackInfo<Value>& info) {
+ v8::HandleScope scope(info.GetIsolate());
+ CompileRun("ext1Calls++;");
+}
+
+
+static void MicrotaskTwo(const v8::FunctionCallbackInfo<Value>& info) {
+ v8::HandleScope scope(info.GetIsolate());
+ CompileRun("ext2Calls++;");
+}
+
+
+TEST(EnqueueMicrotask) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ CompileRun(
+ "var ext1Calls = 0;"
+ "var ext2Calls = 0;");
+ CompileRun("1+1;");
+ CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskOne));
+ CompileRun("1+1;");
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskOne));
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+}
+
+
+TEST(SetAutorunMicrotasks) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ CompileRun(
+ "var ext1Calls = 0;"
+ "var ext2Calls = 0;");
+ CompileRun("1+1;");
+ CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskOne));
+ CompileRun("1+1;");
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ V8::SetAutorunMicrotasks(env->GetIsolate(), false);
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskOne));
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ V8::RunMicrotasks(env->GetIsolate());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value());
+
+ V8::RunMicrotasks(env->GetIsolate());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+
+ V8::SetAutorunMicrotasks(env->GetIsolate(), true);
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value());
+}
+
+
static int probes_counter = 0;
static int misses_counter = 0;
static int updates_counter = 0;
@@ -19800,7 +20663,11 @@ static void StubCacheHelper(bool primary) {
int updates = updates_counter - initial_updates;
CHECK_LT(updates, 10);
CHECK_LT(misses, 10);
- CHECK_GE(probes, 10000);
+ // TODO(verwaest): Update this test to overflow the degree of polymorphism
+ // before megamorphism. The number of probes will only work once we teach the
+ // serializer to embed references to counters in the stubs, given that the
+ // megamorphic_stub_cache_probes is updated in a snapshot-generated stub.
+ CHECK_GE(probes, 0);
#endif
}
@@ -19815,6 +20682,34 @@ TEST(PrimaryStubCache) {
}
+static int cow_arrays_created_runtime = 0;
+
+
+static int* LookupCounterCOWArrays(const char* name) {
+ if (strcmp(name, "c:V8.COWArraysCreatedRuntime") == 0) {
+ return &cow_arrays_created_runtime;
+ }
+ return NULL;
+}
+
+
+TEST(CheckCOWArraysCreatedRuntimeCounter) {
+ V8::SetCounterFunction(LookupCounterCOWArrays);
+#ifdef DEBUG
+ i::FLAG_native_code_counters = true;
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ int initial_cow_arrays = cow_arrays_created_runtime;
+ CompileRun("var o = [1, 2, 3];");
+ CHECK_EQ(1, cow_arrays_created_runtime - initial_cow_arrays);
+ CompileRun("var o = {foo: [4, 5, 6], bar: [3, 0]};");
+ CHECK_EQ(3, cow_arrays_created_runtime - initial_cow_arrays);
+ CompileRun("var o = {foo: [1, 2, 3, [4, 5, 6]], bar: 'hi'};");
+ CHECK_EQ(4, cow_arrays_created_runtime - initial_cow_arrays);
+#endif
+}
+
+
TEST(StaticGetters) {
LocalContext context;
i::Factory* factory = CcTest::i_isolate()->factory();
@@ -19836,16 +20731,28 @@ UNINITIALIZED_TEST(IsolateEmbedderData) {
v8::Isolate* isolate = v8::Isolate::New();
isolate->Enter();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- CHECK_EQ(NULL, isolate->GetData());
- CHECK_EQ(NULL, i_isolate->GetData());
- static void* data1 = reinterpret_cast<void*>(0xacce55ed);
- isolate->SetData(data1);
- CHECK_EQ(data1, isolate->GetData());
- CHECK_EQ(data1, i_isolate->GetData());
- static void* data2 = reinterpret_cast<void*>(0xdecea5ed);
- i_isolate->SetData(data2);
- CHECK_EQ(data2, isolate->GetData());
- CHECK_EQ(data2, i_isolate->GetData());
+ for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
+ CHECK_EQ(NULL, isolate->GetData(slot));
+ CHECK_EQ(NULL, i_isolate->GetData(slot));
+ }
+ for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
+ void* data = reinterpret_cast<void*>(0xacce55ed + slot);
+ isolate->SetData(slot, data);
+ }
+ for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
+ void* data = reinterpret_cast<void*>(0xacce55ed + slot);
+ CHECK_EQ(data, isolate->GetData(slot));
+ CHECK_EQ(data, i_isolate->GetData(slot));
+ }
+ for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
+ void* data = reinterpret_cast<void*>(0xdecea5ed + slot);
+ isolate->SetData(slot, data);
+ }
+ for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
+ void* data = reinterpret_cast<void*>(0xdecea5ed + slot);
+ CHECK_EQ(data, isolate->GetData(slot));
+ CHECK_EQ(data, i_isolate->GetData(slot));
+ }
isolate->Exit();
isolate->Dispose();
}
@@ -19857,7 +20764,6 @@ TEST(StringEmpty) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
i::Handle<i::Object> empty_string = factory->empty_string();
- CHECK(*v8::Utils::OpenHandle(*v8::String::Empty()) == *empty_string);
CHECK(*v8::Utils::OpenHandle(*v8::String::Empty(isolate)) == *empty_string);
}
@@ -19947,14 +20853,14 @@ THREADED_TEST(InstanceCheckOnInstanceAccessor) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Local<FunctionTemplate> templ = FunctionTemplate::New();
+ Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
Local<ObjectTemplate> inst = templ->InstanceTemplate();
inst->SetAccessor(v8_str("foo"),
InstanceCheckedGetter, InstanceCheckedSetter,
Handle<Value>(),
v8::DEFAULT,
v8::None,
- v8::AccessorSignature::New(templ));
+ v8::AccessorSignature::New(context->GetIsolate(), templ));
context->Global()->Set(v8_str("f"), templ->GetFunction());
printf("Testing positive ...\n");
@@ -19975,7 +20881,7 @@ THREADED_TEST(InstanceCheckOnInstanceAccessorWithInterceptor) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Local<FunctionTemplate> templ = FunctionTemplate::New();
+ Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
Local<ObjectTemplate> inst = templ->InstanceTemplate();
AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
inst->SetAccessor(v8_str("foo"),
@@ -19983,7 +20889,7 @@ THREADED_TEST(InstanceCheckOnInstanceAccessorWithInterceptor) {
Handle<Value>(),
v8::DEFAULT,
v8::None,
- v8::AccessorSignature::New(templ));
+ v8::AccessorSignature::New(context->GetIsolate(), templ));
context->Global()->Set(v8_str("f"), templ->GetFunction());
printf("Testing positive ...\n");
@@ -20004,14 +20910,14 @@ THREADED_TEST(InstanceCheckOnPrototypeAccessor) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Local<FunctionTemplate> templ = FunctionTemplate::New();
+ Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
Local<ObjectTemplate> proto = templ->PrototypeTemplate();
proto->SetAccessor(v8_str("foo"),
InstanceCheckedGetter, InstanceCheckedSetter,
Handle<Value>(),
v8::DEFAULT,
v8::None,
- v8::AccessorSignature::New(templ));
+ v8::AccessorSignature::New(context->GetIsolate(), templ));
context->Global()->Set(v8_str("f"), templ->GetFunction());
printf("Testing positive ...\n");
@@ -20083,7 +20989,7 @@ static void Helper137002(bool do_store,
bool remove_accessor,
bool interceptor) {
LocalContext context;
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ Local<ObjectTemplate> templ = ObjectTemplate::New(context->GetIsolate());
if (interceptor) {
templ->SetNamedPropertyHandler(FooGetInterceptor, FooSetInterceptor);
} else {
@@ -20138,8 +21044,9 @@ THREADED_TEST(Regress137002a) {
THREADED_TEST(Regress137002b) {
i::FLAG_allow_natives_syntax = true;
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("foo"),
GetterWhichReturns42,
SetterWhichSetsYOnThisTo23);
@@ -20206,8 +21113,9 @@ THREADED_TEST(Regress137002b) {
THREADED_TEST(Regress142088) {
i::FLAG_allow_natives_syntax = true;
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("foo"),
GetterWhichReturns42,
SetterWhichSetsYOnThisTo23);
@@ -20237,7 +21145,7 @@ THREADED_TEST(Regress137496) {
THREADED_TEST(Regress149912) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New();
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
context->Global()->Set(v8_str("Bug"), templ->GetFunction());
CompileRun("Number.prototype.__proto__ = new Bug; var x = 0; x.foo();");
@@ -20246,8 +21154,9 @@ THREADED_TEST(Regress149912) {
THREADED_TEST(Regress157124) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<ObjectTemplate> templ = ObjectTemplate::New();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
Local<Object> obj = templ->NewInstance();
obj->GetIdentityHash();
obj->DeleteHiddenValue(v8_str("Bug"));
@@ -20271,8 +21180,8 @@ THREADED_TEST(Regress2746) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- Local<Object> obj = Object::New();
- Local<String> key = String::New("key");
+ Local<Object> obj = Object::New(isolate);
+ Local<String> key = String::NewFromUtf8(context->GetIsolate(), "key");
obj->SetHiddenValue(key, v8::Undefined(isolate));
Local<Value> value = obj->GetHiddenValue(key);
CHECK(!value.IsEmpty());
@@ -20282,8 +21191,10 @@ THREADED_TEST(Regress2746) {
THREADED_TEST(Regress260106) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<FunctionTemplate> templ = FunctionTemplate::New(DummyCallHandler);
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<FunctionTemplate> templ = FunctionTemplate::New(isolate,
+ DummyCallHandler);
CompileRun("for (var i = 0; i < 128; i++) Object.prototype[i] = 0;");
Local<Function> function = templ->GetFunction();
CHECK(!function.IsEmpty());
@@ -20372,6 +21283,9 @@ THREADED_TEST(SemaphoreInterruption) {
}
+#endif // V8_OS_POSIX
+
+
static bool NamedAccessAlwaysBlocked(Local<v8::Object> global,
Local<Value> name,
v8::AccessType type,
@@ -20397,11 +21311,13 @@ void UnreachableCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(JSONStringifyAccessCheck) {
v8::V8::Initialize();
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
// Create an ObjectTemplate for global objects and install access
// check callbacks that will block access.
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallbacks(NamedAccessAlwaysBlocked,
IndexAccessAlwaysBlocked);
@@ -20415,7 +21331,7 @@ TEST(JSONStringifyAccessCheck) {
if (i == 1) {
// Install a toJSON function on the second run.
v8::Handle<v8::FunctionTemplate> toJSON =
- v8::FunctionTemplate::New(UnreachableCallback);
+ v8::FunctionTemplate::New(isolate, UnreachableCallback);
global0->Set(v8_str("toJSON"), toJSON->GetFunction());
}
@@ -20430,7 +21346,7 @@ TEST(JSONStringifyAccessCheck) {
ExpectString("JSON.stringify([other, 'b', 'c'])",
"[{},\"b\",\"c\"]");
- v8::Handle<v8::Array> array = v8::Array::New(2);
+ v8::Handle<v8::Array> array = v8::Array::New(isolate, 2);
array->Set(0, v8_str("a"));
array->Set(1, v8_str("b"));
context1->Global()->Set(v8_str("array"), array);
@@ -20495,11 +21411,13 @@ TEST(AccessCheckThrows) {
i::FLAG_allow_natives_syntax = true;
v8::V8::Initialize();
v8::V8::SetFailedAccessCheckCallbackFunction(&FailedAccessCheckThrows);
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
// Create an ObjectTemplate for global objects and install access
// check callbacks that will block access.
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallbacks(NamedAccessAlwaysBlocked,
IndexAccessAlwaysBlocked);
@@ -20514,11 +21432,11 @@ TEST(AccessCheckThrows) {
context1->Global()->Set(v8_str("other"), global0);
v8::Handle<v8::FunctionTemplate> catcher_fun =
- v8::FunctionTemplate::New(CatcherCallback);
+ v8::FunctionTemplate::New(isolate, CatcherCallback);
context1->Global()->Set(v8_str("catcher"), catcher_fun->GetFunction());
v8::Handle<v8::FunctionTemplate> has_own_property_fun =
- v8::FunctionTemplate::New(HasOwnPropertyCallback);
+ v8::FunctionTemplate::New(isolate, HasOwnPropertyCallback);
context1->Global()->Set(v8_str("has_own_property"),
has_own_property_fun->GetFunction());
@@ -20543,7 +21461,8 @@ TEST(AccessCheckThrows) {
CheckCorrectThrow("%HasElement(other, 1)");
CheckCorrectThrow("%IsPropertyEnumerable(other, 'x')");
CheckCorrectThrow("%GetPropertyNames(other)");
- CheckCorrectThrow("%GetLocalPropertyNames(other, true)");
+ // PROPERTY_ATTRIBUTES_NONE = 0
+ CheckCorrectThrow("%GetLocalPropertyNames(other, 0)");
CheckCorrectThrow("%DefineOrRedefineAccessorProperty("
"other, 'x', null, null, 1)");
@@ -20557,7 +21476,7 @@ THREADED_TEST(Regress256330) {
i::FLAG_allow_natives_syntax = true;
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New();
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
context->Global()->Set(v8_str("Bug"), templ->GetFunction());
CompileRun("\"use strict\"; var o = new Bug;"
@@ -20572,7 +21491,7 @@ THREADED_TEST(Regress256330) {
THREADED_TEST(CrankshaftInterceptorSetter) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New();
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
env->Global()->Set(v8_str("Obj"), templ->GetFunction());
@@ -20598,7 +21517,7 @@ THREADED_TEST(CrankshaftInterceptorSetter) {
THREADED_TEST(CrankshaftInterceptorGetter) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New();
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
env->Global()->Set(v8_str("Obj"), templ->GetFunction());
@@ -20621,7 +21540,7 @@ THREADED_TEST(CrankshaftInterceptorGetter) {
THREADED_TEST(CrankshaftInterceptorFieldRead) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New();
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
env->Global()->Set(v8_str("Obj"), templ->GetFunction());
@@ -20641,7 +21560,7 @@ THREADED_TEST(CrankshaftInterceptorFieldRead) {
THREADED_TEST(CrankshaftInterceptorFieldWrite) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New();
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
env->Global()->Set(v8_str("Obj"), templ->GetFunction());
@@ -20658,7 +21577,259 @@ THREADED_TEST(CrankshaftInterceptorFieldWrite) {
}
-#endif // V8_OS_POSIX
+class RequestInterruptTestBase {
+ public:
+ RequestInterruptTestBase()
+ : env_(),
+ isolate_(env_->GetIsolate()),
+ sem_(0),
+ warmup_(20000),
+ should_continue_(true) {
+ }
+
+ virtual ~RequestInterruptTestBase() { }
+
+ virtual void TestBody() = 0;
+
+ void RunTest() {
+ InterruptThread i_thread(this);
+ i_thread.Start();
+
+ v8::HandleScope handle_scope(isolate_);
+
+ TestBody();
+
+ isolate_->ClearInterrupt();
+
+ // Verify we arrived here because interruptor was called
+ // not due to a bug causing us to exit the loop too early.
+ CHECK(!should_continue());
+ }
+
+ void WakeUpInterruptor() {
+ sem_.Signal();
+ }
+
+ bool should_continue() const { return should_continue_; }
+
+ bool ShouldContinue() {
+ if (warmup_ > 0) {
+ if (--warmup_ == 0) {
+ WakeUpInterruptor();
+ }
+ }
+
+ return should_continue_;
+ }
+
+ protected:
+ static void ShouldContinueCallback(
+ const v8::FunctionCallbackInfo<Value>& info) {
+ RequestInterruptTestBase* test =
+ reinterpret_cast<RequestInterruptTestBase*>(
+ info.Data().As<v8::External>()->Value());
+ info.GetReturnValue().Set(test->ShouldContinue());
+ }
+
+ class InterruptThread : public i::Thread {
+ public:
+ explicit InterruptThread(RequestInterruptTestBase* test)
+ : Thread("RequestInterruptTest"), test_(test) {}
+
+ virtual void Run() {
+ test_->sem_.Wait();
+ test_->isolate_->RequestInterrupt(&OnInterrupt, test_);
+ }
+
+ static void OnInterrupt(v8::Isolate* isolate, void* data) {
+ reinterpret_cast<RequestInterruptTestBase*>(data)->
+ should_continue_ = false;
+ }
+
+ private:
+ RequestInterruptTestBase* test_;
+ };
+
+ LocalContext env_;
+ v8::Isolate* isolate_;
+ i::Semaphore sem_;
+ int warmup_;
+ bool should_continue_;
+};
+
+
+class RequestInterruptTestWithFunctionCall : public RequestInterruptTestBase {
+ public:
+ virtual void TestBody() {
+ Local<Function> func = Function::New(
+ isolate_, ShouldContinueCallback, v8::External::New(isolate_, this));
+ env_->Global()->Set(v8_str("ShouldContinue"), func);
+
+ CompileRun("while (ShouldContinue()) { }");
+ }
+};
+
+
+class RequestInterruptTestWithMethodCall : public RequestInterruptTestBase {
+ public:
+ virtual void TestBody() {
+ v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
+ v8::Local<v8::Template> proto = t->PrototypeTemplate();
+ proto->Set(v8_str("shouldContinue"), Function::New(
+ isolate_, ShouldContinueCallback, v8::External::New(isolate_, this)));
+ env_->Global()->Set(v8_str("Klass"), t->GetFunction());
+
+ CompileRun("var obj = new Klass; while (obj.shouldContinue()) { }");
+ }
+};
+
+
+class RequestInterruptTestWithAccessor : public RequestInterruptTestBase {
+ public:
+ virtual void TestBody() {
+ v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
+ v8::Local<v8::Template> proto = t->PrototypeTemplate();
+ proto->SetAccessorProperty(v8_str("shouldContinue"), FunctionTemplate::New(
+ isolate_, ShouldContinueCallback, v8::External::New(isolate_, this)));
+ env_->Global()->Set(v8_str("Klass"), t->GetFunction());
+
+ CompileRun("var obj = new Klass; while (obj.shouldContinue) { }");
+ }
+};
+
+
+class RequestInterruptTestWithNativeAccessor : public RequestInterruptTestBase {
+ public:
+ virtual void TestBody() {
+ v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
+ t->InstanceTemplate()->SetNativeDataProperty(
+ v8_str("shouldContinue"),
+ &ShouldContinueNativeGetter,
+ NULL,
+ v8::External::New(isolate_, this));
+ env_->Global()->Set(v8_str("Klass"), t->GetFunction());
+
+ CompileRun("var obj = new Klass; while (obj.shouldContinue) { }");
+ }
+
+ private:
+ static void ShouldContinueNativeGetter(
+ Local<String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ RequestInterruptTestBase* test =
+ reinterpret_cast<RequestInterruptTestBase*>(
+ info.Data().As<v8::External>()->Value());
+ info.GetReturnValue().Set(test->ShouldContinue());
+ }
+};
+
+
+class RequestInterruptTestWithMethodCallAndInterceptor
+ : public RequestInterruptTestBase {
+ public:
+ virtual void TestBody() {
+ v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
+ v8::Local<v8::Template> proto = t->PrototypeTemplate();
+ proto->Set(v8_str("shouldContinue"), Function::New(
+ isolate_, ShouldContinueCallback, v8::External::New(isolate_, this)));
+ v8::Local<v8::ObjectTemplate> instance_template = t->InstanceTemplate();
+ instance_template->SetNamedPropertyHandler(EmptyInterceptor);
+
+ env_->Global()->Set(v8_str("Klass"), t->GetFunction());
+
+ CompileRun("var obj = new Klass; while (obj.shouldContinue()) { }");
+ }
+
+ private:
+ static void EmptyInterceptor(
+ Local<String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ }
+};
+
+
+class RequestInterruptTestWithMathAbs : public RequestInterruptTestBase {
+ public:
+ virtual void TestBody() {
+ env_->Global()->Set(v8_str("WakeUpInterruptor"), Function::New(
+ isolate_,
+ WakeUpInterruptorCallback,
+ v8::External::New(isolate_, this)));
+
+ env_->Global()->Set(v8_str("ShouldContinue"), Function::New(
+ isolate_,
+ ShouldContinueCallback,
+ v8::External::New(isolate_, this)));
+
+ i::FLAG_allow_natives_syntax = true;
+ CompileRun("function loopish(o) {"
+ " var pre = 10;"
+ " while (o.abs(1) > 0) {"
+ " if (o.abs(1) >= 0 && !ShouldContinue()) break;"
+ " if (pre > 0) {"
+ " if (--pre === 0) WakeUpInterruptor(o === Math);"
+ " }"
+ " }"
+ "}"
+ "var i = 50;"
+ "var obj = {abs: function () { return i-- }, x: null};"
+ "delete obj.x;"
+ "loopish(obj);"
+ "%OptimizeFunctionOnNextCall(loopish);"
+ "loopish(Math);");
+
+ i::FLAG_allow_natives_syntax = false;
+ }
+
+ private:
+ static void WakeUpInterruptorCallback(
+ const v8::FunctionCallbackInfo<Value>& info) {
+ if (!info[0]->BooleanValue()) return;
+
+ RequestInterruptTestBase* test =
+ reinterpret_cast<RequestInterruptTestBase*>(
+ info.Data().As<v8::External>()->Value());
+ test->WakeUpInterruptor();
+ }
+
+ static void ShouldContinueCallback(
+ const v8::FunctionCallbackInfo<Value>& info) {
+ RequestInterruptTestBase* test =
+ reinterpret_cast<RequestInterruptTestBase*>(
+ info.Data().As<v8::External>()->Value());
+ info.GetReturnValue().Set(test->should_continue());
+ }
+};
+
+
+TEST(RequestInterruptTestWithFunctionCall) {
+ RequestInterruptTestWithFunctionCall().RunTest();
+}
+
+
+TEST(RequestInterruptTestWithMethodCall) {
+ RequestInterruptTestWithMethodCall().RunTest();
+}
+
+
+TEST(RequestInterruptTestWithAccessor) {
+ RequestInterruptTestWithAccessor().RunTest();
+}
+
+
+TEST(RequestInterruptTestWithNativeAccessor) {
+ RequestInterruptTestWithNativeAccessor().RunTest();
+}
+
+
+TEST(RequestInterruptTestWithMethodCallAndInterceptor) {
+ RequestInterruptTestWithMethodCallAndInterceptor().RunTest();
+}
+
+
+TEST(RequestInterruptTestWithMathAbs) {
+ RequestInterruptTestWithMathAbs().RunTest();
+}
static Local<Value> function_new_expected_env;
@@ -20672,12 +21843,12 @@ THREADED_TEST(FunctionNew) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- Local<Object> data = v8::Object::New();
+ Local<Object> data = v8::Object::New(isolate);
function_new_expected_env = data;
Local<Function> func = Function::New(isolate, FunctionNewCallback, data);
env->Global()->Set(v8_str("func"), func);
Local<Value> result = CompileRun("func();");
- CHECK_EQ(v8::Integer::New(17, isolate), result);
+ CHECK_EQ(v8::Integer::New(isolate, 17), result);
// Verify function not cached
int serial_number =
i::Smi::cast(v8::Utils::OpenHandle(*func)
@@ -20687,14 +21858,14 @@ THREADED_TEST(FunctionNew) {
->GetElementNoExceptionThrown(i_isolate, serial_number);
CHECK(elm->IsUndefined());
// Verify that each Function::New creates a new function instance
- Local<Object> data2 = v8::Object::New();
+ Local<Object> data2 = v8::Object::New(isolate);
function_new_expected_env = data2;
Local<Function> func2 = Function::New(isolate, FunctionNewCallback, data2);
CHECK(!func2->IsNull());
CHECK_NE(func, func2);
env->Global()->Set(v8_str("func2"), func2);
Local<Value> result2 = CompileRun("func2();");
- CHECK_EQ(v8::Integer::New(17, isolate), result2);
+ CHECK_EQ(v8::Integer::New(isolate, 17), result2);
}
@@ -20718,3 +21889,230 @@ TEST(EscapeableHandleScope) {
}
}
}
+
+
+static void SetterWhichExpectsThisAndHolderToDiffer(
+ Local<String>, Local<Value>, const v8::PropertyCallbackInfo<void>& info) {
+ CHECK(info.Holder() != info.This());
+}
+
+
+TEST(Regress239669) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetAccessor(v8_str("x"), 0, SetterWhichExpectsThisAndHolderToDiffer);
+ context->Global()->Set(v8_str("P"), templ->NewInstance());
+ CompileRun(
+ "function C1() {"
+ " this.x = 23;"
+ "};"
+ "C1.prototype = P;"
+ "for (var i = 0; i < 4; i++ ) {"
+ " new C1();"
+ "}");
+}
+
+
+class ApiCallOptimizationChecker {
+ private:
+ static Local<Object> data;
+ static Local<Object> receiver;
+ static Local<Object> holder;
+ static Local<Object> callee;
+ static int count;
+
+ static void OptimizationCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ CHECK(callee == info.Callee());
+ CHECK(data == info.Data());
+ CHECK(receiver == info.This());
+ if (info.Length() == 1) {
+ CHECK_EQ(v8_num(1), info[0]);
+ }
+ CHECK(holder == info.Holder());
+ count++;
+ info.GetReturnValue().Set(v8_str("returned"));
+ }
+
+ // TODO(dcarney): move this to v8.h
+ static void SetAccessorProperty(Local<Object> object,
+ Local<String> name,
+ Local<Function> getter,
+ Local<Function> setter = Local<Function>()) {
+ i::Isolate* isolate = CcTest::i_isolate();
+ v8::AccessControl settings = v8::DEFAULT;
+ v8::PropertyAttribute attribute = v8::None;
+ i::Handle<i::Object> getter_i = v8::Utils::OpenHandle(*getter);
+ i::Handle<i::Object> setter_i = v8::Utils::OpenHandle(*setter, true);
+ if (setter_i.is_null()) setter_i = isolate->factory()->null_value();
+ i::JSObject::DefineAccessor(v8::Utils::OpenHandle(*object),
+ v8::Utils::OpenHandle(*name),
+ getter_i,
+ setter_i,
+ static_cast<PropertyAttributes>(attribute),
+ settings);
+ }
+
+ public:
+ enum SignatureType {
+ kNoSignature,
+ kSignatureOnReceiver,
+ kSignatureOnPrototype
+ };
+
+ void RunAll() {
+ SignatureType signature_types[] =
+ {kNoSignature, kSignatureOnReceiver, kSignatureOnPrototype};
+ for (unsigned i = 0; i < ARRAY_SIZE(signature_types); i++) {
+ SignatureType signature_type = signature_types[i];
+ for (int j = 0; j < 2; j++) {
+ bool global = j == 0;
+ int key = signature_type +
+ ARRAY_SIZE(signature_types) * (global ? 1 : 0);
+ Run(signature_type, global, key);
+ }
+ }
+ }
+
+ void Run(SignatureType signature_type, bool global, int key) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ // Build a template for signature checks.
+ Local<v8::ObjectTemplate> signature_template;
+ Local<v8::Signature> signature;
+ {
+ Local<v8::FunctionTemplate> parent_template =
+ FunctionTemplate::New(isolate);
+ parent_template->SetHiddenPrototype(true);
+ Local<v8::FunctionTemplate> function_template
+ = FunctionTemplate::New(isolate);
+ function_template->Inherit(parent_template);
+ switch (signature_type) {
+ case kNoSignature:
+ break;
+ case kSignatureOnReceiver:
+ signature = v8::Signature::New(isolate, function_template);
+ break;
+ case kSignatureOnPrototype:
+ signature = v8::Signature::New(isolate, parent_template);
+ break;
+ }
+ signature_template = function_template->InstanceTemplate();
+ }
+ // Global object must pass checks.
+ Local<v8::Context> context =
+ v8::Context::New(isolate, NULL, signature_template);
+ v8::Context::Scope context_scope(context);
+ // Install regular object that can pass signature checks.
+ Local<Object> function_receiver = signature_template->NewInstance();
+ context->Global()->Set(v8_str("function_receiver"), function_receiver);
+ // Get the holder objects.
+ Local<Object> inner_global =
+ Local<Object>::Cast(context->Global()->GetPrototype());
+ // Install functions on hidden prototype object if there is one.
+ data = Object::New(isolate);
+ Local<FunctionTemplate> function_template = FunctionTemplate::New(
+ isolate, OptimizationCallback, data, signature);
+ Local<Function> function = function_template->GetFunction();
+ Local<Object> global_holder = inner_global;
+ Local<Object> function_holder = function_receiver;
+ if (signature_type == kSignatureOnPrototype) {
+ function_holder = Local<Object>::Cast(function_holder->GetPrototype());
+ global_holder = Local<Object>::Cast(global_holder->GetPrototype());
+ }
+ global_holder->Set(v8_str("g_f"), function);
+ SetAccessorProperty(global_holder, v8_str("g_acc"), function, function);
+ function_holder->Set(v8_str("f"), function);
+ SetAccessorProperty(function_holder, v8_str("acc"), function, function);
+ // Initialize expected values.
+ callee = function;
+ count = 0;
+ if (global) {
+ receiver = context->Global();
+ holder = inner_global;
+ } else {
+ holder = function_receiver;
+ // If not using a signature, add something else to the prototype chain
+ // to test the case that holder != receiver
+ if (signature_type == kNoSignature) {
+ receiver = Local<Object>::Cast(CompileRun(
+ "var receiver_subclass = {};\n"
+ "receiver_subclass.__proto__ = function_receiver;\n"
+ "receiver_subclass"));
+ } else {
+ receiver = Local<Object>::Cast(CompileRun(
+ "var receiver_subclass = function_receiver;\n"
+ "receiver_subclass"));
+ }
+ }
+ // With no signature, the holder is not set.
+ if (signature_type == kNoSignature) holder = receiver;
+ // build wrap_function
+ i::ScopedVector<char> wrap_function(200);
+ if (global) {
+ i::OS::SNPrintF(
+ wrap_function,
+ "function wrap_f_%d() { var f = g_f; return f(); }\n"
+ "function wrap_get_%d() { return this.g_acc; }\n"
+ "function wrap_set_%d() { return this.g_acc = 1; }\n",
+ key, key, key);
+ } else {
+ i::OS::SNPrintF(
+ wrap_function,
+ "function wrap_f_%d() { return receiver_subclass.f(); }\n"
+ "function wrap_get_%d() { return receiver_subclass.acc; }\n"
+ "function wrap_set_%d() { return receiver_subclass.acc = 1; }\n",
+ key, key, key);
+ }
+ // build source string
+ i::ScopedVector<char> source(1000);
+ i::OS::SNPrintF(
+ source,
+ "%s\n" // wrap functions
+ "function wrap_f() { return wrap_f_%d(); }\n"
+ "function wrap_get() { return wrap_get_%d(); }\n"
+ "function wrap_set() { return wrap_set_%d(); }\n"
+ "check = function(returned) {\n"
+ " if (returned !== 'returned') { throw returned; }\n"
+ "}\n"
+ "\n"
+ "check(wrap_f());\n"
+ "check(wrap_f());\n"
+ "%%OptimizeFunctionOnNextCall(wrap_f_%d);\n"
+ "check(wrap_f());\n"
+ "\n"
+ "check(wrap_get());\n"
+ "check(wrap_get());\n"
+ "%%OptimizeFunctionOnNextCall(wrap_get_%d);\n"
+ "check(wrap_get());\n"
+ "\n"
+ "check = function(returned) {\n"
+ " if (returned !== 1) { throw returned; }\n"
+ "}\n"
+ "check(wrap_set());\n"
+ "check(wrap_set());\n"
+ "%%OptimizeFunctionOnNextCall(wrap_set_%d);\n"
+ "check(wrap_set());\n",
+ wrap_function.start(), key, key, key, key, key, key);
+ v8::TryCatch try_catch;
+ CompileRun(source.start());
+ ASSERT(!try_catch.HasCaught());
+ CHECK_EQ(9, count);
+ }
+};
+
+
+Local<Object> ApiCallOptimizationChecker::data;
+Local<Object> ApiCallOptimizationChecker::receiver;
+Local<Object> ApiCallOptimizationChecker::holder;
+Local<Object> ApiCallOptimizationChecker::callee;
+int ApiCallOptimizationChecker::count = 0;
+
+
+TEST(TestFunctionCallOptimization) {
+ i::FLAG_allow_natives_syntax = true;
+ ApiCallOptimizationChecker checker;
+ checker.RunAll();
+}
diff --git a/deps/v8/test/cctest/test-assembler-a64.cc b/deps/v8/test/cctest/test-assembler-a64.cc
new file mode 100644
index 0000000000..656f3691ca
--- /dev/null
+++ b/deps/v8/test/cctest/test-assembler-a64.cc
@@ -0,0 +1,9803 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <cmath>
+#include <limits>
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "a64/simulator-a64.h"
+#include "a64/disasm-a64.h"
+#include "a64/utils-a64.h"
+#include "cctest.h"
+#include "test-utils-a64.h"
+
+using namespace v8::internal;
+
+// Test infrastructure.
+//
+// Tests are functions which accept no parameters and have no return values.
+// The testing code should not perform an explicit return once completed. For
+// example to test the mov immediate instruction a very simple test would be:
+//
+// TEST(mov_x0_one) {
+// SETUP();
+//
+// START();
+// __ mov(x0, Operand(1));
+// END();
+//
+// RUN();
+//
+// ASSERT_EQUAL_64(1, x0);
+//
+// TEARDOWN();
+// }
+//
+// Within a START ... END block all registers but sp can be modified. sp has to
+// be explicitly saved/restored. The END() macro replaces the function return
+// so it may appear multiple times in a test if the test has multiple exit
+// points.
+//
+// Once the test has been run all integer and floating point registers as well
+// as flags are accessible through a RegisterDump instance, see
+// utils-a64.cc for more info on RegisterDump.
+//
+// We provide some helper assert to handle common cases:
+//
+// ASSERT_EQUAL_32(int32_t, int_32t)
+// ASSERT_EQUAL_FP32(float, float)
+// ASSERT_EQUAL_32(int32_t, W register)
+// ASSERT_EQUAL_FP32(float, S register)
+// ASSERT_EQUAL_64(int64_t, int_64t)
+// ASSERT_EQUAL_FP64(double, double)
+// ASSERT_EQUAL_64(int64_t, X register)
+// ASSERT_EQUAL_64(X register, X register)
+// ASSERT_EQUAL_FP64(double, D register)
+//
+// e.g. ASSERT_EQUAL_64(0.5, d30);
+//
+// If more advance computation is required before the assert then access the
+// RegisterDump named core directly:
+//
+// ASSERT_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
+
+
+#if 0 // TODO(all): enable.
+static v8::Persistent<v8::Context> env;
+
+static void InitializeVM() {
+ if (env.IsEmpty()) {
+ env = v8::Context::New();
+ }
+}
+#endif
+
+#define __ masm.
+
+#define BUF_SIZE 8192
+#define SETUP() SETUP_SIZE(BUF_SIZE)
+
+#define INIT_V8() \
+ CcTest::InitializeVM(); \
+
+#ifdef USE_SIMULATOR
+
+// Run tests with the simulator.
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ ASSERT(isolate != NULL); \
+ byte* buf = new byte[buf_size]; \
+ MacroAssembler masm(isolate, buf, buf_size); \
+ Decoder decoder; \
+ Simulator simulator(&decoder); \
+ PrintDisassembler* pdis = NULL; \
+ RegisterDump core;
+
+/* if (Cctest::trace_sim()) { \
+ pdis = new PrintDisassembler(stdout); \
+ decoder.PrependVisitor(pdis); \
+ } \
+ */
+
+// Reset the assembler and simulator, so that instructions can be generated,
+// but don't actually emit any code. This can be used by tests that need to
+// emit instructions at the start of the buffer. Note that START_AFTER_RESET
+// must be called before any callee-saved register is modified, and before an
+// END is encountered.
+//
+// Most tests should call START, rather than call RESET directly.
+#define RESET() \
+ __ Reset(); \
+ simulator.ResetState();
+
+#define START_AFTER_RESET() \
+ __ SetStackPointer(csp); \
+ __ PushCalleeSavedRegisters(); \
+ __ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL);
+
+#define START() \
+ RESET(); \
+ START_AFTER_RESET();
+
+#define RUN() \
+ simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
+
+#define END() \
+ __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ __ GetCode(NULL);
+
+#define TEARDOWN() \
+ delete pdis; \
+ delete[] buf;
+
+#else // ifdef USE_SIMULATOR.
+// Run the test on real hardware or models.
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ ASSERT(isolate != NULL); \
+ byte* buf = new byte[buf_size]; \
+ MacroAssembler masm(isolate, buf, buf_size); \
+ RegisterDump core; \
+ CPU::SetUp();
+
+#define RESET() \
+ __ Reset();
+
+#define START_AFTER_RESET() \
+ __ SetStackPointer(csp); \
+ __ PushCalleeSavedRegisters();
+
+#define START() \
+ RESET(); \
+ START_AFTER_RESET();
+
+#define RUN() \
+ CPU::FlushICache(buf, masm.SizeOfGeneratedCode()); \
+ { \
+ void (*test_function)(void); \
+ memcpy(&test_function, &buf, sizeof(buf)); \
+ test_function(); \
+ }
+
+#define END() \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ __ GetCode(NULL);
+
+#define TEARDOWN() \
+ delete[] buf;
+
+#endif // ifdef USE_SIMULATOR.
+
+#define ASSERT_EQUAL_NZCV(expected) \
+ CHECK(EqualNzcv(expected, core.flags_nzcv()))
+
+#define ASSERT_EQUAL_REGISTERS(expected) \
+ CHECK(EqualRegisters(&expected, &core))
+
+#define ASSERT_EQUAL_32(expected, result) \
+ CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
+
+#define ASSERT_EQUAL_FP32(expected, result) \
+ CHECK(EqualFP32(expected, &core, result))
+
+#define ASSERT_EQUAL_64(expected, result) \
+ CHECK(Equal64(expected, &core, result))
+
+#define ASSERT_EQUAL_FP64(expected, result) \
+ CHECK(EqualFP64(expected, &core, result))
+
+#ifdef DEBUG
+#define ASSERT_LITERAL_POOL_SIZE(expected) \
+ CHECK((expected) == (__ LiteralPoolSize()))
+#else
+#define ASSERT_LITERAL_POOL_SIZE(expected) \
+ ((void) 0)
+#endif
+
+
+TEST(stack_ops) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ // save csp.
+ __ Mov(x29, csp);
+
+ // Set the csp to a known value.
+ __ Mov(x16, 0x1000);
+ __ Mov(csp, x16);
+ __ Mov(x0, csp);
+
+ // Add immediate to the csp, and move the result to a normal register.
+ __ Add(csp, csp, Operand(0x50));
+ __ Mov(x1, csp);
+
+ // Add extended to the csp, and move the result to a normal register.
+ __ Mov(x17, 0xfff);
+ __ Add(csp, csp, Operand(x17, SXTB));
+ __ Mov(x2, csp);
+
+ // Create an csp using a logical instruction, and move to normal register.
+ __ Orr(csp, xzr, Operand(0x1fff));
+ __ Mov(x3, csp);
+
+ // Write wcsp using a logical instruction.
+ __ Orr(wcsp, wzr, Operand(0xfffffff8L));
+ __ Mov(x4, csp);
+
+ // Write csp, and read back wcsp.
+ __ Orr(csp, xzr, Operand(0xfffffff8L));
+ __ Mov(w5, wcsp);
+
+ // restore csp.
+ __ Mov(csp, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1000, x0);
+ ASSERT_EQUAL_64(0x1050, x1);
+ ASSERT_EQUAL_64(0x104f, x2);
+ ASSERT_EQUAL_64(0x1fff, x3);
+ ASSERT_EQUAL_64(0xfffffff8, x4);
+ ASSERT_EQUAL_64(0xfffffff8, x5);
+
+ TEARDOWN();
+}
+
+
+TEST(mvn) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mvn(w0, 0xfff);
+ __ Mvn(x1, 0xfff);
+ __ Mvn(w2, Operand(w0, LSL, 1));
+ __ Mvn(x3, Operand(x1, LSL, 2));
+ __ Mvn(w4, Operand(w0, LSR, 3));
+ __ Mvn(x5, Operand(x1, LSR, 4));
+ __ Mvn(w6, Operand(w0, ASR, 11));
+ __ Mvn(x7, Operand(x1, ASR, 12));
+ __ Mvn(w8, Operand(w0, ROR, 13));
+ __ Mvn(x9, Operand(x1, ROR, 14));
+ __ Mvn(w10, Operand(w2, UXTB));
+ __ Mvn(x11, Operand(x2, SXTB, 1));
+ __ Mvn(w12, Operand(w2, UXTH, 2));
+ __ Mvn(x13, Operand(x2, SXTH, 3));
+ __ Mvn(x14, Operand(w2, UXTW, 4));
+ __ Mvn(x15, Operand(w2, SXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfffff000, x0);
+ ASSERT_EQUAL_64(0xfffffffffffff000UL, x1);
+ ASSERT_EQUAL_64(0x00001fff, x2);
+ ASSERT_EQUAL_64(0x0000000000003fffUL, x3);
+ ASSERT_EQUAL_64(0xe00001ff, x4);
+ ASSERT_EQUAL_64(0xf0000000000000ffUL, x5);
+ ASSERT_EQUAL_64(0x00000001, x6);
+ ASSERT_EQUAL_64(0x0, x7);
+ ASSERT_EQUAL_64(0x7ff80000, x8);
+ ASSERT_EQUAL_64(0x3ffc000000000000UL, x9);
+ ASSERT_EQUAL_64(0xffffff00, x10);
+ ASSERT_EQUAL_64(0x0000000000000001UL, x11);
+ ASSERT_EQUAL_64(0xffff8003, x12);
+ ASSERT_EQUAL_64(0xffffffffffff0007UL, x13);
+ ASSERT_EQUAL_64(0xfffffffffffe000fUL, x14);
+ ASSERT_EQUAL_64(0xfffffffffffe000fUL, x15);
+
+ TEARDOWN();
+}
+
+
+TEST(mov) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffL);
+ __ Mov(x1, 0xffffffffffffffffL);
+ __ Mov(x2, 0xffffffffffffffffL);
+ __ Mov(x3, 0xffffffffffffffffL);
+
+ __ Mov(x0, 0x0123456789abcdefL);
+
+ __ movz(x1, 0xabcdL << 16);
+ __ movk(x2, 0xabcdL << 32);
+ __ movn(x3, 0xabcdL << 48);
+
+ __ Mov(x4, 0x0123456789abcdefL);
+ __ Mov(x5, x4);
+
+ __ Mov(w6, -1);
+
+ // Test that moves back to the same register have the desired effect. This
+ // is a no-op for X registers, and a truncation for W registers.
+ __ Mov(x7, 0x0123456789abcdefL);
+ __ Mov(x7, x7);
+ __ Mov(x8, 0x0123456789abcdefL);
+ __ Mov(w8, w8);
+ __ Mov(x9, 0x0123456789abcdefL);
+ __ Mov(x9, Operand(x9));
+ __ Mov(x10, 0x0123456789abcdefL);
+ __ Mov(w10, Operand(w10));
+
+ __ Mov(w11, 0xfff);
+ __ Mov(x12, 0xfff);
+ __ Mov(w13, Operand(w11, LSL, 1));
+ __ Mov(x14, Operand(x12, LSL, 2));
+ __ Mov(w15, Operand(w11, LSR, 3));
+ __ Mov(x18, Operand(x12, LSR, 4));
+ __ Mov(w19, Operand(w11, ASR, 11));
+ __ Mov(x20, Operand(x12, ASR, 12));
+ __ Mov(w21, Operand(w11, ROR, 13));
+ __ Mov(x22, Operand(x12, ROR, 14));
+ __ Mov(w23, Operand(w13, UXTB));
+ __ Mov(x24, Operand(x13, SXTB, 1));
+ __ Mov(w25, Operand(w13, UXTH, 2));
+ __ Mov(x26, Operand(x13, SXTH, 3));
+ __ Mov(x27, Operand(w13, UXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x0);
+ ASSERT_EQUAL_64(0x00000000abcd0000L, x1);
+ ASSERT_EQUAL_64(0xffffabcdffffffffL, x2);
+ ASSERT_EQUAL_64(0x5432ffffffffffffL, x3);
+ ASSERT_EQUAL_64(x4, x5);
+ ASSERT_EQUAL_32(-1, w6);
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x7);
+ ASSERT_EQUAL_32(0x89abcdefL, w8);
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x9);
+ ASSERT_EQUAL_32(0x89abcdefL, w10);
+ ASSERT_EQUAL_64(0x00000fff, x11);
+ ASSERT_EQUAL_64(0x0000000000000fffUL, x12);
+ ASSERT_EQUAL_64(0x00001ffe, x13);
+ ASSERT_EQUAL_64(0x0000000000003ffcUL, x14);
+ ASSERT_EQUAL_64(0x000001ff, x15);
+ ASSERT_EQUAL_64(0x00000000000000ffUL, x18);
+ ASSERT_EQUAL_64(0x00000001, x19);
+ ASSERT_EQUAL_64(0x0, x20);
+ ASSERT_EQUAL_64(0x7ff80000, x21);
+ ASSERT_EQUAL_64(0x3ffc000000000000UL, x22);
+ ASSERT_EQUAL_64(0x000000fe, x23);
+ ASSERT_EQUAL_64(0xfffffffffffffffcUL, x24);
+ ASSERT_EQUAL_64(0x00007ff8, x25);
+ ASSERT_EQUAL_64(0x000000000000fff0UL, x26);
+ ASSERT_EQUAL_64(0x000000000001ffe0UL, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(mov_imm_w) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w0, 0xffffffffL);
+ __ Mov(w1, 0xffff1234L);
+ __ Mov(w2, 0x1234ffffL);
+ __ Mov(w3, 0x00000000L);
+ __ Mov(w4, 0x00001234L);
+ __ Mov(w5, 0x12340000L);
+ __ Mov(w6, 0x12345678L);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffL, x0);
+ ASSERT_EQUAL_64(0xffff1234L, x1);
+ ASSERT_EQUAL_64(0x1234ffffL, x2);
+ ASSERT_EQUAL_64(0x00000000L, x3);
+ ASSERT_EQUAL_64(0x00001234L, x4);
+ ASSERT_EQUAL_64(0x12340000L, x5);
+ ASSERT_EQUAL_64(0x12345678L, x6);
+
+ TEARDOWN();
+}
+
+
+TEST(mov_imm_x) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffL);
+ __ Mov(x1, 0xffffffffffff1234L);
+ __ Mov(x2, 0xffffffff12345678L);
+ __ Mov(x3, 0xffff1234ffff5678L);
+ __ Mov(x4, 0x1234ffffffff5678L);
+ __ Mov(x5, 0x1234ffff5678ffffL);
+ __ Mov(x6, 0x12345678ffffffffL);
+ __ Mov(x7, 0x1234ffffffffffffL);
+ __ Mov(x8, 0x123456789abcffffL);
+ __ Mov(x9, 0x12345678ffff9abcL);
+ __ Mov(x10, 0x1234ffff56789abcL);
+ __ Mov(x11, 0xffff123456789abcL);
+ __ Mov(x12, 0x0000000000000000L);
+ __ Mov(x13, 0x0000000000001234L);
+ __ Mov(x14, 0x0000000012345678L);
+ __ Mov(x15, 0x0000123400005678L);
+ __ Mov(x18, 0x1234000000005678L);
+ __ Mov(x19, 0x1234000056780000L);
+ __ Mov(x20, 0x1234567800000000L);
+ __ Mov(x21, 0x1234000000000000L);
+ __ Mov(x22, 0x123456789abc0000L);
+ __ Mov(x23, 0x1234567800009abcL);
+ __ Mov(x24, 0x1234000056789abcL);
+ __ Mov(x25, 0x0000123456789abcL);
+ __ Mov(x26, 0x123456789abcdef0L);
+ __ Mov(x27, 0xffff000000000001L);
+ __ Mov(x28, 0x8000ffff00000000L);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffffff1234L, x1);
+ ASSERT_EQUAL_64(0xffffffff12345678L, x2);
+ ASSERT_EQUAL_64(0xffff1234ffff5678L, x3);
+ ASSERT_EQUAL_64(0x1234ffffffff5678L, x4);
+ ASSERT_EQUAL_64(0x1234ffff5678ffffL, x5);
+ ASSERT_EQUAL_64(0x12345678ffffffffL, x6);
+ ASSERT_EQUAL_64(0x1234ffffffffffffL, x7);
+ ASSERT_EQUAL_64(0x123456789abcffffL, x8);
+ ASSERT_EQUAL_64(0x12345678ffff9abcL, x9);
+ ASSERT_EQUAL_64(0x1234ffff56789abcL, x10);
+ ASSERT_EQUAL_64(0xffff123456789abcL, x11);
+ ASSERT_EQUAL_64(0x0000000000000000L, x12);
+ ASSERT_EQUAL_64(0x0000000000001234L, x13);
+ ASSERT_EQUAL_64(0x0000000012345678L, x14);
+ ASSERT_EQUAL_64(0x0000123400005678L, x15);
+ ASSERT_EQUAL_64(0x1234000000005678L, x18);
+ ASSERT_EQUAL_64(0x1234000056780000L, x19);
+ ASSERT_EQUAL_64(0x1234567800000000L, x20);
+ ASSERT_EQUAL_64(0x1234000000000000L, x21);
+ ASSERT_EQUAL_64(0x123456789abc0000L, x22);
+ ASSERT_EQUAL_64(0x1234567800009abcL, x23);
+ ASSERT_EQUAL_64(0x1234000056789abcL, x24);
+ ASSERT_EQUAL_64(0x0000123456789abcL, x25);
+ ASSERT_EQUAL_64(0x123456789abcdef0L, x26);
+ ASSERT_EQUAL_64(0xffff000000000001L, x27);
+ ASSERT_EQUAL_64(0x8000ffff00000000L, x28);
+
+ TEARDOWN();
+}
+
+
+TEST(orr) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xf0f0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Orr(x2, x0, Operand(x1));
+ __ Orr(w3, w0, Operand(w1, LSL, 28));
+ __ Orr(x4, x0, Operand(x1, LSL, 32));
+ __ Orr(x5, x0, Operand(x1, LSR, 4));
+ __ Orr(w6, w0, Operand(w1, ASR, 4));
+ __ Orr(x7, x0, Operand(x1, ASR, 4));
+ __ Orr(w8, w0, Operand(w1, ROR, 12));
+ __ Orr(x9, x0, Operand(x1, ROR, 12));
+ __ Orr(w10, w0, Operand(0xf));
+ __ Orr(x11, x0, Operand(0xf0000000f0000000L));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xf000f0ff, x2);
+ ASSERT_EQUAL_64(0xf000f0f0, x3);
+ ASSERT_EQUAL_64(0xf00000ff0000f0f0L, x4);
+ ASSERT_EQUAL_64(0x0f00f0ff, x5);
+ ASSERT_EQUAL_64(0xff00f0ff, x6);
+ ASSERT_EQUAL_64(0x0f00f0ff, x7);
+ ASSERT_EQUAL_64(0x0ffff0f0, x8);
+ ASSERT_EQUAL_64(0x0ff00000000ff0f0L, x9);
+ ASSERT_EQUAL_64(0xf0ff, x10);
+ ASSERT_EQUAL_64(0xf0000000f000f0f0L, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(orr_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x8000000080008080UL);
+ __ Orr(w6, w0, Operand(w1, UXTB));
+ __ Orr(x7, x0, Operand(x1, UXTH, 1));
+ __ Orr(w8, w0, Operand(w1, UXTW, 2));
+ __ Orr(x9, x0, Operand(x1, UXTX, 3));
+ __ Orr(w10, w0, Operand(w1, SXTB));
+ __ Orr(x11, x0, Operand(x1, SXTH, 1));
+ __ Orr(x12, x0, Operand(x1, SXTW, 2));
+ __ Orr(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000081, x6);
+ ASSERT_EQUAL_64(0x00010101, x7);
+ ASSERT_EQUAL_64(0x00020201, x8);
+ ASSERT_EQUAL_64(0x0000000400040401UL, x9);
+ ASSERT_EQUAL_64(0x00000000ffffff81UL, x10);
+ ASSERT_EQUAL_64(0xffffffffffff0101UL, x11);
+ ASSERT_EQUAL_64(0xfffffffe00020201UL, x12);
+ ASSERT_EQUAL_64(0x0000000400040401UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(bitwise_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0xf0f0f0f0f0f0f0f0UL);
+
+ __ Orr(x10, x0, Operand(0x1234567890abcdefUL));
+ __ Orr(w11, w1, Operand(0x90abcdef));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
+ ASSERT_EQUAL_64(0xf0fbfdffUL, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(orn) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xf0f0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Orn(x2, x0, Operand(x1));
+ __ Orn(w3, w0, Operand(w1, LSL, 4));
+ __ Orn(x4, x0, Operand(x1, LSL, 4));
+ __ Orn(x5, x0, Operand(x1, LSR, 1));
+ __ Orn(w6, w0, Operand(w1, ASR, 1));
+ __ Orn(x7, x0, Operand(x1, ASR, 1));
+ __ Orn(w8, w0, Operand(w1, ROR, 16));
+ __ Orn(x9, x0, Operand(x1, ROR, 16));
+ __ Orn(w10, w0, Operand(0xffff));
+ __ Orn(x11, x0, Operand(0xffff0000ffffL));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffff0ffffff0L, x2);
+ ASSERT_EQUAL_64(0xfffff0ff, x3);
+ ASSERT_EQUAL_64(0xfffffff0fffff0ffL, x4);
+ ASSERT_EQUAL_64(0xffffffff87fffff0L, x5);
+ ASSERT_EQUAL_64(0x07fffff0, x6);
+ ASSERT_EQUAL_64(0xffffffff87fffff0L, x7);
+ ASSERT_EQUAL_64(0xff00ffff, x8);
+ ASSERT_EQUAL_64(0xff00ffffffffffffL, x9);
+ ASSERT_EQUAL_64(0xfffff0f0, x10);
+ ASSERT_EQUAL_64(0xffff0000fffff0f0L, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(orn_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Orn(w6, w0, Operand(w1, UXTB));
+ __ Orn(x7, x0, Operand(x1, UXTH, 1));
+ __ Orn(w8, w0, Operand(w1, UXTW, 2));
+ __ Orn(x9, x0, Operand(x1, UXTX, 3));
+ __ Orn(w10, w0, Operand(w1, SXTB));
+ __ Orn(x11, x0, Operand(x1, SXTH, 1));
+ __ Orn(x12, x0, Operand(x1, SXTW, 2));
+ __ Orn(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffff7f, x6);
+ ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
+ ASSERT_EQUAL_64(0xfffdfdfb, x8);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
+ ASSERT_EQUAL_64(0x0000007f, x10);
+ ASSERT_EQUAL_64(0x0000fefd, x11);
+ ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(and_) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ And(x2, x0, Operand(x1));
+ __ And(w3, w0, Operand(w1, LSL, 4));
+ __ And(x4, x0, Operand(x1, LSL, 4));
+ __ And(x5, x0, Operand(x1, LSR, 1));
+ __ And(w6, w0, Operand(w1, ASR, 20));
+ __ And(x7, x0, Operand(x1, ASR, 20));
+ __ And(w8, w0, Operand(w1, ROR, 28));
+ __ And(x9, x0, Operand(x1, ROR, 28));
+ __ And(w10, w0, Operand(0xff00));
+ __ And(x11, x0, Operand(0xff));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x000000f0, x2);
+ ASSERT_EQUAL_64(0x00000ff0, x3);
+ ASSERT_EQUAL_64(0x00000ff0, x4);
+ ASSERT_EQUAL_64(0x00000070, x5);
+ ASSERT_EQUAL_64(0x0000ff00, x6);
+ ASSERT_EQUAL_64(0x00000f00, x7);
+ ASSERT_EQUAL_64(0x00000ff0, x8);
+ ASSERT_EQUAL_64(0x00000000, x9);
+ ASSERT_EQUAL_64(0x0000ff00, x10);
+ ASSERT_EQUAL_64(0x000000f0, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(and_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffUL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ And(w6, w0, Operand(w1, UXTB));
+ __ And(x7, x0, Operand(x1, UXTH, 1));
+ __ And(w8, w0, Operand(w1, UXTW, 2));
+ __ And(x9, x0, Operand(x1, UXTX, 3));
+ __ And(w10, w0, Operand(w1, SXTB));
+ __ And(x11, x0, Operand(x1, SXTH, 1));
+ __ And(x12, x0, Operand(x1, SXTW, 2));
+ __ And(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000081, x6);
+ ASSERT_EQUAL_64(0x00010102, x7);
+ ASSERT_EQUAL_64(0x00020204, x8);
+ ASSERT_EQUAL_64(0x0000000400040408UL, x9);
+ ASSERT_EQUAL_64(0xffffff81, x10);
+ ASSERT_EQUAL_64(0xffffffffffff0102UL, x11);
+ ASSERT_EQUAL_64(0xfffffffe00020204UL, x12);
+ ASSERT_EQUAL_64(0x0000000400040408UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(ands) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0xf00000ff);
+ __ Ands(w0, w1, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0xf00000ff, x0);
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+ __ Ands(w0, w0, Operand(w1, LSR, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0x8000000000000000L);
+ __ Mov(x1, 0x00000001);
+ __ Ands(x0, x0, Operand(x1, ROR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x0);
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Ands(w0, w0, Operand(0xf));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0xff000000);
+ __ Ands(w0, w0, Operand(0x80000000));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x80000000, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(bic) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Bic(x2, x0, Operand(x1));
+ __ Bic(w3, w0, Operand(w1, LSL, 4));
+ __ Bic(x4, x0, Operand(x1, LSL, 4));
+ __ Bic(x5, x0, Operand(x1, LSR, 1));
+ __ Bic(w6, w0, Operand(w1, ASR, 20));
+ __ Bic(x7, x0, Operand(x1, ASR, 20));
+ __ Bic(w8, w0, Operand(w1, ROR, 28));
+ __ Bic(x9, x0, Operand(x1, ROR, 24));
+ __ Bic(x10, x0, Operand(0x1f));
+ __ Bic(x11, x0, Operand(0x100));
+
+ // Test bic into csp when the constant cannot be encoded in the immediate
+ // field.
+ // Use x20 to preserve csp. We check for the result via x21 because the
+ // test infrastructure requires that csp be restored to its original value.
+ __ Mov(x20, csp);
+ __ Mov(x0, 0xffffff);
+ __ Bic(csp, x0, Operand(0xabcdef));
+ __ Mov(x21, csp);
+ __ Mov(csp, x20);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0000ff00, x2);
+ ASSERT_EQUAL_64(0x0000f000, x3);
+ ASSERT_EQUAL_64(0x0000f000, x4);
+ ASSERT_EQUAL_64(0x0000ff80, x5);
+ ASSERT_EQUAL_64(0x000000f0, x6);
+ ASSERT_EQUAL_64(0x0000f0f0, x7);
+ ASSERT_EQUAL_64(0x0000f000, x8);
+ ASSERT_EQUAL_64(0x0000ff00, x9);
+ ASSERT_EQUAL_64(0x0000ffe0, x10);
+ ASSERT_EQUAL_64(0x0000fef0, x11);
+
+ ASSERT_EQUAL_64(0x543210, x21);
+
+ TEARDOWN();
+}
+
+
+TEST(bic_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffUL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Bic(w6, w0, Operand(w1, UXTB));
+ __ Bic(x7, x0, Operand(x1, UXTH, 1));
+ __ Bic(w8, w0, Operand(w1, UXTW, 2));
+ __ Bic(x9, x0, Operand(x1, UXTX, 3));
+ __ Bic(w10, w0, Operand(w1, SXTB));
+ __ Bic(x11, x0, Operand(x1, SXTH, 1));
+ __ Bic(x12, x0, Operand(x1, SXTW, 2));
+ __ Bic(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffff7e, x6);
+ ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
+ ASSERT_EQUAL_64(0xfffdfdfb, x8);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
+ ASSERT_EQUAL_64(0x0000007e, x10);
+ ASSERT_EQUAL_64(0x0000fefd, x11);
+ ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(bics) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0xffff);
+ __ Bics(w0, w1, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0xffffffff);
+ __ Bics(w0, w0, Operand(w0, LSR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x80000000, x0);
+
+ START();
+ __ Mov(x0, 0x8000000000000000L);
+ __ Mov(x1, 0x00000001);
+ __ Bics(x0, x0, Operand(x1, ROR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffL);
+ __ Bics(x0, x0, Operand(0x7fffffffffffffffL));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x0);
+
+ START();
+ __ Mov(w0, 0xffff0000);
+ __ Bics(w0, w0, Operand(0xfffffff0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(eor) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Eor(x2, x0, Operand(x1));
+ __ Eor(w3, w0, Operand(w1, LSL, 4));
+ __ Eor(x4, x0, Operand(x1, LSL, 4));
+ __ Eor(x5, x0, Operand(x1, LSR, 1));
+ __ Eor(w6, w0, Operand(w1, ASR, 20));
+ __ Eor(x7, x0, Operand(x1, ASR, 20));
+ __ Eor(w8, w0, Operand(w1, ROR, 28));
+ __ Eor(x9, x0, Operand(x1, ROR, 28));
+ __ Eor(w10, w0, Operand(0xff00ff00));
+ __ Eor(x11, x0, Operand(0xff00ff00ff00ff00L));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xf000ff0f, x2);
+ ASSERT_EQUAL_64(0x0000f000, x3);
+ ASSERT_EQUAL_64(0x0000000f0000f000L, x4);
+ ASSERT_EQUAL_64(0x7800ff8f, x5);
+ ASSERT_EQUAL_64(0xffff00f0, x6);
+ ASSERT_EQUAL_64(0x0000f0f0, x7);
+ ASSERT_EQUAL_64(0x0000f00f, x8);
+ ASSERT_EQUAL_64(0x00000ff00000ffffL, x9);
+ ASSERT_EQUAL_64(0xff0000f0, x10);
+ ASSERT_EQUAL_64(0xff00ff00ff0000f0L, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(eor_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x1111111111111111UL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Eor(w6, w0, Operand(w1, UXTB));
+ __ Eor(x7, x0, Operand(x1, UXTH, 1));
+ __ Eor(w8, w0, Operand(w1, UXTW, 2));
+ __ Eor(x9, x0, Operand(x1, UXTX, 3));
+ __ Eor(w10, w0, Operand(w1, SXTB));
+ __ Eor(x11, x0, Operand(x1, SXTH, 1));
+ __ Eor(x12, x0, Operand(x1, SXTW, 2));
+ __ Eor(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x11111190, x6);
+ ASSERT_EQUAL_64(0x1111111111101013UL, x7);
+ ASSERT_EQUAL_64(0x11131315, x8);
+ ASSERT_EQUAL_64(0x1111111511151519UL, x9);
+ ASSERT_EQUAL_64(0xeeeeee90, x10);
+ ASSERT_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
+ ASSERT_EQUAL_64(0xeeeeeeef11131315UL, x12);
+ ASSERT_EQUAL_64(0x1111111511151519UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(eon) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Eon(x2, x0, Operand(x1));
+ __ Eon(w3, w0, Operand(w1, LSL, 4));
+ __ Eon(x4, x0, Operand(x1, LSL, 4));
+ __ Eon(x5, x0, Operand(x1, LSR, 1));
+ __ Eon(w6, w0, Operand(w1, ASR, 20));
+ __ Eon(x7, x0, Operand(x1, ASR, 20));
+ __ Eon(w8, w0, Operand(w1, ROR, 28));
+ __ Eon(x9, x0, Operand(x1, ROR, 28));
+ __ Eon(w10, w0, Operand(0x03c003c0));
+ __ Eon(x11, x0, Operand(0x0000100000001000L));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffff0fff00f0L, x2);
+ ASSERT_EQUAL_64(0xffff0fff, x3);
+ ASSERT_EQUAL_64(0xfffffff0ffff0fffL, x4);
+ ASSERT_EQUAL_64(0xffffffff87ff0070L, x5);
+ ASSERT_EQUAL_64(0x0000ff0f, x6);
+ ASSERT_EQUAL_64(0xffffffffffff0f0fL, x7);
+ ASSERT_EQUAL_64(0xffff0ff0, x8);
+ ASSERT_EQUAL_64(0xfffff00fffff0000L, x9);
+ ASSERT_EQUAL_64(0xfc3f03cf, x10);
+ ASSERT_EQUAL_64(0xffffefffffff100fL, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(eon_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x1111111111111111UL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Eon(w6, w0, Operand(w1, UXTB));
+ __ Eon(x7, x0, Operand(x1, UXTH, 1));
+ __ Eon(w8, w0, Operand(w1, UXTW, 2));
+ __ Eon(x9, x0, Operand(x1, UXTX, 3));
+ __ Eon(w10, w0, Operand(w1, SXTB));
+ __ Eon(x11, x0, Operand(x1, SXTH, 1));
+ __ Eon(x12, x0, Operand(x1, SXTW, 2));
+ __ Eon(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xeeeeee6f, x6);
+ ASSERT_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
+ ASSERT_EQUAL_64(0xeeececea, x8);
+ ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
+ ASSERT_EQUAL_64(0x1111116f, x10);
+ ASSERT_EQUAL_64(0x111111111111efecUL, x11);
+ ASSERT_EQUAL_64(0x11111110eeececeaUL, x12);
+ ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(mul) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+
+ __ Mul(w0, w16, w16);
+ __ Mul(w1, w16, w17);
+ __ Mul(w2, w17, w18);
+ __ Mul(w3, w18, w19);
+ __ Mul(x4, x16, x16);
+ __ Mul(x5, x17, x18);
+ __ Mul(x6, x18, x19);
+ __ Mul(x7, x19, x19);
+ __ Smull(x8, w17, w18);
+ __ Smull(x9, w18, w18);
+ __ Smull(x10, w19, w19);
+ __ Mneg(w11, w16, w16);
+ __ Mneg(w12, w16, w17);
+ __ Mneg(w13, w17, w18);
+ __ Mneg(w14, w18, w19);
+ __ Mneg(x20, x16, x16);
+ __ Mneg(x21, x17, x18);
+ __ Mneg(x22, x18, x19);
+ __ Mneg(x23, x19, x19);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(0xffffffff, x2);
+ ASSERT_EQUAL_64(1, x3);
+ ASSERT_EQUAL_64(0, x4);
+ ASSERT_EQUAL_64(0xffffffff, x5);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x6);
+ ASSERT_EQUAL_64(1, x7);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0, x12);
+ ASSERT_EQUAL_64(1, x13);
+ ASSERT_EQUAL_64(0xffffffff, x14);
+ ASSERT_EQUAL_64(0, x20);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x21);
+ ASSERT_EQUAL_64(0xffffffff, x22);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x23);
+
+ TEARDOWN();
+}
+
+
+static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
+ SETUP();
+ START();
+ __ Mov(w0, a);
+ __ Mov(w1, b);
+ __ Smull(x2, w0, w1);
+ END();
+ RUN();
+ ASSERT_EQUAL_64(expected, x2);
+ TEARDOWN();
+}
+
+
+TEST(smull) {
+ INIT_V8();
+ SmullHelper(0, 0, 0);
+ SmullHelper(1, 1, 1);
+ SmullHelper(-1, -1, 1);
+ SmullHelper(1, -1, -1);
+ SmullHelper(0xffffffff80000000, 0x80000000, 1);
+ SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
+}
+
+
+TEST(madd) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+
+ __ Madd(w0, w16, w16, w16);
+ __ Madd(w1, w16, w16, w17);
+ __ Madd(w2, w16, w16, w18);
+ __ Madd(w3, w16, w16, w19);
+ __ Madd(w4, w16, w17, w17);
+ __ Madd(w5, w17, w17, w18);
+ __ Madd(w6, w17, w17, w19);
+ __ Madd(w7, w17, w18, w16);
+ __ Madd(w8, w17, w18, w18);
+ __ Madd(w9, w18, w18, w17);
+ __ Madd(w10, w18, w19, w18);
+ __ Madd(w11, w19, w19, w19);
+
+ __ Madd(x12, x16, x16, x16);
+ __ Madd(x13, x16, x16, x17);
+ __ Madd(x14, x16, x16, x18);
+ __ Madd(x15, x16, x16, x19);
+ __ Madd(x20, x16, x17, x17);
+ __ Madd(x21, x17, x17, x18);
+ __ Madd(x22, x17, x17, x19);
+ __ Madd(x23, x17, x18, x16);
+ __ Madd(x24, x17, x18, x18);
+ __ Madd(x25, x18, x18, x17);
+ __ Madd(x26, x18, x19, x18);
+ __ Madd(x27, x19, x19, x19);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(0xffffffff, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0, x6);
+ ASSERT_EQUAL_64(0xffffffff, x7);
+ ASSERT_EQUAL_64(0xfffffffe, x8);
+ ASSERT_EQUAL_64(2, x9);
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(0, x11);
+
+ ASSERT_EQUAL_64(0, x12);
+ ASSERT_EQUAL_64(1, x13);
+ ASSERT_EQUAL_64(0xffffffff, x14);
+ ASSERT_EQUAL_64(0xffffffffffffffff, x15);
+ ASSERT_EQUAL_64(1, x20);
+ ASSERT_EQUAL_64(0x100000000UL, x21);
+ ASSERT_EQUAL_64(0, x22);
+ ASSERT_EQUAL_64(0xffffffff, x23);
+ ASSERT_EQUAL_64(0x1fffffffe, x24);
+ ASSERT_EQUAL_64(0xfffffffe00000002UL, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(msub) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+
+ __ Msub(w0, w16, w16, w16);
+ __ Msub(w1, w16, w16, w17);
+ __ Msub(w2, w16, w16, w18);
+ __ Msub(w3, w16, w16, w19);
+ __ Msub(w4, w16, w17, w17);
+ __ Msub(w5, w17, w17, w18);
+ __ Msub(w6, w17, w17, w19);
+ __ Msub(w7, w17, w18, w16);
+ __ Msub(w8, w17, w18, w18);
+ __ Msub(w9, w18, w18, w17);
+ __ Msub(w10, w18, w19, w18);
+ __ Msub(w11, w19, w19, w19);
+
+ __ Msub(x12, x16, x16, x16);
+ __ Msub(x13, x16, x16, x17);
+ __ Msub(x14, x16, x16, x18);
+ __ Msub(x15, x16, x16, x19);
+ __ Msub(x20, x16, x17, x17);
+ __ Msub(x21, x17, x17, x18);
+ __ Msub(x22, x17, x17, x19);
+ __ Msub(x23, x17, x18, x16);
+ __ Msub(x24, x17, x18, x18);
+ __ Msub(x25, x18, x18, x17);
+ __ Msub(x26, x18, x19, x18);
+ __ Msub(x27, x19, x19, x19);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(0xffffffff, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(0xfffffffe, x5);
+ ASSERT_EQUAL_64(0xfffffffe, x6);
+ ASSERT_EQUAL_64(1, x7);
+ ASSERT_EQUAL_64(0, x8);
+ ASSERT_EQUAL_64(0, x9);
+ ASSERT_EQUAL_64(0xfffffffe, x10);
+ ASSERT_EQUAL_64(0xfffffffe, x11);
+
+ ASSERT_EQUAL_64(0, x12);
+ ASSERT_EQUAL_64(1, x13);
+ ASSERT_EQUAL_64(0xffffffff, x14);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x15);
+ ASSERT_EQUAL_64(1, x20);
+ ASSERT_EQUAL_64(0xfffffffeUL, x21);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x22);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x23);
+ ASSERT_EQUAL_64(0, x24);
+ ASSERT_EQUAL_64(0x200000000UL, x25);
+ ASSERT_EQUAL_64(0x1fffffffeUL, x26);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(smulh) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x20, 0);
+ __ Mov(x21, 1);
+ __ Mov(x22, 0x0000000100000000L);
+ __ Mov(x23, 0x12345678);
+ __ Mov(x24, 0x0123456789abcdefL);
+ __ Mov(x25, 0x0000000200000000L);
+ __ Mov(x26, 0x8000000000000000UL);
+ __ Mov(x27, 0xffffffffffffffffUL);
+ __ Mov(x28, 0x5555555555555555UL);
+ __ Mov(x29, 0xaaaaaaaaaaaaaaaaUL);
+
+ __ Smulh(x0, x20, x24);
+ __ Smulh(x1, x21, x24);
+ __ Smulh(x2, x22, x23);
+ __ Smulh(x3, x22, x24);
+ __ Smulh(x4, x24, x25);
+ __ Smulh(x5, x23, x27);
+ __ Smulh(x6, x26, x26);
+ __ Smulh(x7, x26, x27);
+ __ Smulh(x8, x27, x27);
+ __ Smulh(x9, x28, x28);
+ __ Smulh(x10, x28, x29);
+ __ Smulh(x11, x29, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(0, x2);
+ ASSERT_EQUAL_64(0x01234567, x3);
+ ASSERT_EQUAL_64(0x02468acf, x4);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x5);
+ ASSERT_EQUAL_64(0x4000000000000000UL, x6);
+ ASSERT_EQUAL_64(0, x7);
+ ASSERT_EQUAL_64(0, x8);
+ ASSERT_EQUAL_64(0x1c71c71c71c71c71UL, x9);
+ ASSERT_EQUAL_64(0xe38e38e38e38e38eUL, x10);
+ ASSERT_EQUAL_64(0x1c71c71c71c71c72UL, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(smaddl_umaddl) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x20, 4);
+ __ Mov(x21, 0x200000000UL);
+
+ __ Smaddl(x9, w17, w18, x20);
+ __ Smaddl(x10, w18, w18, x20);
+ __ Smaddl(x11, w19, w19, x20);
+ __ Smaddl(x12, w19, w19, x21);
+ __ Umaddl(x13, w17, w18, x20);
+ __ Umaddl(x14, w18, w18, x20);
+ __ Umaddl(x15, w19, w19, x20);
+ __ Umaddl(x22, w19, w19, x21);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(3, x9);
+ ASSERT_EQUAL_64(5, x10);
+ ASSERT_EQUAL_64(5, x11);
+ ASSERT_EQUAL_64(0x200000001UL, x12);
+ ASSERT_EQUAL_64(0x100000003UL, x13);
+ ASSERT_EQUAL_64(0xfffffffe00000005UL, x14);
+ ASSERT_EQUAL_64(0xfffffffe00000005UL, x15);
+ ASSERT_EQUAL_64(0x1, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(smsubl_umsubl) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x20, 4);
+ __ Mov(x21, 0x200000000UL);
+
+ __ Smsubl(x9, w17, w18, x20);
+ __ Smsubl(x10, w18, w18, x20);
+ __ Smsubl(x11, w19, w19, x20);
+ __ Smsubl(x12, w19, w19, x21);
+ __ Umsubl(x13, w17, w18, x20);
+ __ Umsubl(x14, w18, w18, x20);
+ __ Umsubl(x15, w19, w19, x20);
+ __ Umsubl(x22, w19, w19, x21);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(5, x9);
+ ASSERT_EQUAL_64(3, x10);
+ ASSERT_EQUAL_64(3, x11);
+ ASSERT_EQUAL_64(0x1ffffffffUL, x12);
+ ASSERT_EQUAL_64(0xffffffff00000005UL, x13);
+ ASSERT_EQUAL_64(0x200000003UL, x14);
+ ASSERT_EQUAL_64(0x200000003UL, x15);
+ ASSERT_EQUAL_64(0x3ffffffffUL, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(div) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 1);
+ __ Mov(x17, 0xffffffff);
+ __ Mov(x18, 0xffffffffffffffffUL);
+ __ Mov(x19, 0x80000000);
+ __ Mov(x20, 0x8000000000000000UL);
+ __ Mov(x21, 2);
+
+ __ Udiv(w0, w16, w16);
+ __ Udiv(w1, w17, w16);
+ __ Sdiv(w2, w16, w16);
+ __ Sdiv(w3, w16, w17);
+ __ Sdiv(w4, w17, w18);
+
+ __ Udiv(x5, x16, x16);
+ __ Udiv(x6, x17, x18);
+ __ Sdiv(x7, x16, x16);
+ __ Sdiv(x8, x16, x17);
+ __ Sdiv(x9, x17, x18);
+
+ __ Udiv(w10, w19, w21);
+ __ Sdiv(w11, w19, w21);
+ __ Udiv(x12, x19, x21);
+ __ Sdiv(x13, x19, x21);
+ __ Udiv(x14, x20, x21);
+ __ Sdiv(x15, x20, x21);
+
+ __ Udiv(w22, w19, w17);
+ __ Sdiv(w23, w19, w17);
+ __ Udiv(x24, x20, x18);
+ __ Sdiv(x25, x20, x18);
+
+ __ Udiv(x26, x16, x21);
+ __ Sdiv(x27, x16, x21);
+ __ Udiv(x28, x18, x21);
+ __ Sdiv(x29, x18, x21);
+
+ __ Mov(x17, 0);
+ __ Udiv(w18, w16, w17);
+ __ Sdiv(w19, w16, w17);
+ __ Udiv(x20, x16, x17);
+ __ Sdiv(x21, x16, x17);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0xffffffff, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(1, x5);
+ ASSERT_EQUAL_64(0, x6);
+ ASSERT_EQUAL_64(1, x7);
+ ASSERT_EQUAL_64(0, x8);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x9);
+ ASSERT_EQUAL_64(0x40000000, x10);
+ ASSERT_EQUAL_64(0xC0000000, x11);
+ ASSERT_EQUAL_64(0x40000000, x12);
+ ASSERT_EQUAL_64(0x40000000, x13);
+ ASSERT_EQUAL_64(0x4000000000000000UL, x14);
+ ASSERT_EQUAL_64(0xC000000000000000UL, x15);
+ ASSERT_EQUAL_64(0, x22);
+ ASSERT_EQUAL_64(0x80000000, x23);
+ ASSERT_EQUAL_64(0, x24);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0, x27);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x28);
+ ASSERT_EQUAL_64(0, x29);
+ ASSERT_EQUAL_64(0, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0, x20);
+ ASSERT_EQUAL_64(0, x21);
+
+ TEARDOWN();
+}
+
+
+TEST(rbit_rev) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x24, 0xfedcba9876543210UL);
+ __ Rbit(w0, w24);
+ __ Rbit(x1, x24);
+ __ Rev16(w2, w24);
+ __ Rev16(x3, x24);
+ __ Rev(w4, w24);
+ __ Rev32(x5, x24);
+ __ Rev(x6, x24);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x084c2a6e, x0);
+ ASSERT_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
+ ASSERT_EQUAL_64(0x54761032, x2);
+ ASSERT_EQUAL_64(0xdcfe98ba54761032UL, x3);
+ ASSERT_EQUAL_64(0x10325476, x4);
+ ASSERT_EQUAL_64(0x98badcfe10325476UL, x5);
+ ASSERT_EQUAL_64(0x1032547698badcfeUL, x6);
+
+ TEARDOWN();
+}
+
+
+TEST(clz_cls) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x24, 0x0008000000800000UL);
+ __ Mov(x25, 0xff800000fff80000UL);
+ __ Mov(x26, 0);
+ __ Clz(w0, w24);
+ __ Clz(x1, x24);
+ __ Clz(w2, w25);
+ __ Clz(x3, x25);
+ __ Clz(w4, w26);
+ __ Clz(x5, x26);
+ __ Cls(w6, w24);
+ __ Cls(x7, x24);
+ __ Cls(w8, w25);
+ __ Cls(x9, x25);
+ __ Cls(w10, w26);
+ __ Cls(x11, x26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(8, x0);
+ ASSERT_EQUAL_64(12, x1);
+ ASSERT_EQUAL_64(0, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(32, x4);
+ ASSERT_EQUAL_64(64, x5);
+ ASSERT_EQUAL_64(7, x6);
+ ASSERT_EQUAL_64(11, x7);
+ ASSERT_EQUAL_64(12, x8);
+ ASSERT_EQUAL_64(8, x9);
+ ASSERT_EQUAL_64(31, x10);
+ ASSERT_EQUAL_64(63, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(label) {
+ INIT_V8();
+ SETUP();
+
+ Label label_1, label_2, label_3, label_4;
+
+ START();
+ __ Mov(x0, 0x1);
+ __ Mov(x1, 0x0);
+ __ Mov(x22, lr); // Save lr.
+
+ __ B(&label_1);
+ __ B(&label_1);
+ __ B(&label_1); // Multiple branches to the same label.
+ __ Mov(x0, 0x0);
+ __ Bind(&label_2);
+ __ B(&label_3); // Forward branch.
+ __ Mov(x0, 0x0);
+ __ Bind(&label_1);
+ __ B(&label_2); // Backward branch.
+ __ Mov(x0, 0x0);
+ __ Bind(&label_3);
+ __ Bl(&label_4);
+ END();
+
+ __ Bind(&label_4);
+ __ Mov(x1, 0x1);
+ __ Mov(lr, x22);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_at_start) {
+ INIT_V8();
+ SETUP();
+
+ Label good, exit;
+
+ // Test that branches can exist at the start of the buffer. (This is a
+ // boundary condition in the label-handling code.) To achieve this, we have
+ // to work around the code generated by START.
+ RESET();
+ __ B(&good);
+
+ START_AFTER_RESET();
+ __ Mov(x0, 0x0);
+ END();
+
+ __ Bind(&exit);
+ START_AFTER_RESET();
+ __ Mov(x0, 0x1);
+ END();
+
+ __ Bind(&good);
+ __ B(&exit);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1, x0);
+ TEARDOWN();
+}
+
+
+TEST(adr) {
+ INIT_V8();
+ SETUP();
+
+ Label label_1, label_2, label_3, label_4;
+
+ START();
+ __ Mov(x0, 0x0); // Set to non-zero to indicate failure.
+ __ Adr(x1, &label_3); // Set to zero to indicate success.
+
+ __ Adr(x2, &label_1); // Multiple forward references to the same label.
+ __ Adr(x3, &label_1);
+ __ Adr(x4, &label_1);
+
+ __ Bind(&label_2);
+ __ Eor(x5, x2, Operand(x3)); // Ensure that x2,x3 and x4 are identical.
+ __ Eor(x6, x2, Operand(x4));
+ __ Orr(x0, x0, Operand(x5));
+ __ Orr(x0, x0, Operand(x6));
+ __ Br(x2); // label_1, label_3
+
+ __ Bind(&label_3);
+ __ Adr(x2, &label_3); // Self-reference (offset 0).
+ __ Eor(x1, x1, Operand(x2));
+ __ Adr(x2, &label_4); // Simple forward reference.
+ __ Br(x2); // label_4
+
+ __ Bind(&label_1);
+ __ Adr(x2, &label_3); // Multiple reverse references to the same label.
+ __ Adr(x3, &label_3);
+ __ Adr(x4, &label_3);
+ __ Adr(x5, &label_2); // Simple reverse reference.
+ __ Br(x5); // label_2
+
+ __ Bind(&label_4);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0, x0);
+ ASSERT_EQUAL_64(0x0, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_cond) {
+ INIT_V8();
+ SETUP();
+
+ Label wrong;
+
+ START();
+ __ Mov(x0, 0x1);
+ __ Mov(x1, 0x1);
+ __ Mov(x2, 0x8000000000000000L);
+
+ // For each 'cmp' instruction below, condition codes other than the ones
+ // following it would branch.
+
+ __ Cmp(x1, 0);
+ __ B(&wrong, eq);
+ __ B(&wrong, lo);
+ __ B(&wrong, mi);
+ __ B(&wrong, vs);
+ __ B(&wrong, ls);
+ __ B(&wrong, lt);
+ __ B(&wrong, le);
+ Label ok_1;
+ __ B(&ok_1, ne);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_1);
+
+ __ Cmp(x1, 1);
+ __ B(&wrong, ne);
+ __ B(&wrong, lo);
+ __ B(&wrong, mi);
+ __ B(&wrong, vs);
+ __ B(&wrong, hi);
+ __ B(&wrong, lt);
+ __ B(&wrong, gt);
+ Label ok_2;
+ __ B(&ok_2, pl);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_2);
+
+ __ Cmp(x1, 2);
+ __ B(&wrong, eq);
+ __ B(&wrong, hs);
+ __ B(&wrong, pl);
+ __ B(&wrong, vs);
+ __ B(&wrong, hi);
+ __ B(&wrong, ge);
+ __ B(&wrong, gt);
+ Label ok_3;
+ __ B(&ok_3, vc);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_3);
+
+ __ Cmp(x2, 1);
+ __ B(&wrong, eq);
+ __ B(&wrong, lo);
+ __ B(&wrong, mi);
+ __ B(&wrong, vc);
+ __ B(&wrong, ls);
+ __ B(&wrong, ge);
+ __ B(&wrong, gt);
+ Label ok_4;
+ __ B(&ok_4, le);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_4);
+
+ Label ok_5;
+ __ b(&ok_5, al);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_5);
+
+ Label ok_6;
+ __ b(&ok_6, nv);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_6);
+
+ END();
+
+ __ Bind(&wrong);
+ __ Mov(x0, 0x0);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_to_reg) {
+ INIT_V8();
+ SETUP();
+
+ // Test br.
+ Label fn1, after_fn1;
+
+ START();
+ __ Mov(x29, lr);
+
+ __ Mov(x1, 0);
+ __ B(&after_fn1);
+
+ __ Bind(&fn1);
+ __ Mov(x0, lr);
+ __ Mov(x1, 42);
+ __ Br(x0);
+
+ __ Bind(&after_fn1);
+ __ Bl(&fn1);
+
+ // Test blr.
+ Label fn2, after_fn2;
+
+ __ Mov(x2, 0);
+ __ B(&after_fn2);
+
+ __ Bind(&fn2);
+ __ Mov(x0, lr);
+ __ Mov(x2, 84);
+ __ Blr(x0);
+
+ __ Bind(&after_fn2);
+ __ Bl(&fn2);
+ __ Mov(x3, lr);
+
+ __ Mov(lr, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
+ ASSERT_EQUAL_64(42, x1);
+ ASSERT_EQUAL_64(84, x2);
+
+ TEARDOWN();
+}
+
+
+TEST(compare_branch) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0);
+ __ Mov(x2, 0);
+ __ Mov(x3, 0);
+ __ Mov(x4, 0);
+ __ Mov(x5, 0);
+ __ Mov(x16, 0);
+ __ Mov(x17, 42);
+
+ Label zt, zt_end;
+ __ Cbz(w16, &zt);
+ __ B(&zt_end);
+ __ Bind(&zt);
+ __ Mov(x0, 1);
+ __ Bind(&zt_end);
+
+ Label zf, zf_end;
+ __ Cbz(x17, &zf);
+ __ B(&zf_end);
+ __ Bind(&zf);
+ __ Mov(x1, 1);
+ __ Bind(&zf_end);
+
+ Label nzt, nzt_end;
+ __ Cbnz(w17, &nzt);
+ __ B(&nzt_end);
+ __ Bind(&nzt);
+ __ Mov(x2, 1);
+ __ Bind(&nzt_end);
+
+ Label nzf, nzf_end;
+ __ Cbnz(x16, &nzf);
+ __ B(&nzf_end);
+ __ Bind(&nzf);
+ __ Mov(x3, 1);
+ __ Bind(&nzf_end);
+
+ __ Mov(x18, 0xffffffff00000000UL);
+
+ Label a, a_end;
+ __ Cbz(w18, &a);
+ __ B(&a_end);
+ __ Bind(&a);
+ __ Mov(x4, 1);
+ __ Bind(&a_end);
+
+ Label b, b_end;
+ __ Cbnz(w18, &b);
+ __ B(&b_end);
+ __ Bind(&b);
+ __ Mov(x5, 1);
+ __ Bind(&b_end);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(0, x5);
+
+ TEARDOWN();
+}
+
+
+TEST(test_branch) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0);
+ __ Mov(x2, 0);
+ __ Mov(x3, 0);
+ __ Mov(x16, 0xaaaaaaaaaaaaaaaaUL);
+
+ Label bz, bz_end;
+ __ Tbz(w16, 0, &bz);
+ __ B(&bz_end);
+ __ Bind(&bz);
+ __ Mov(x0, 1);
+ __ Bind(&bz_end);
+
+ Label bo, bo_end;
+ __ Tbz(x16, 63, &bo);
+ __ B(&bo_end);
+ __ Bind(&bo);
+ __ Mov(x1, 1);
+ __ Bind(&bo_end);
+
+ Label nbz, nbz_end;
+ __ Tbnz(x16, 61, &nbz);
+ __ B(&nbz_end);
+ __ Bind(&nbz);
+ __ Mov(x2, 1);
+ __ Bind(&nbz_end);
+
+ Label nbo, nbo_end;
+ __ Tbnz(w16, 2, &nbo);
+ __ B(&nbo_end);
+ __ Bind(&nbo);
+ __ Mov(x3, 1);
+ __ Bind(&nbo_end);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_backward) {
+ INIT_V8();
+
+ // Test that the MacroAssembler correctly resolves backward branches to labels
+ // that are outside the immediate range of branch instructions.
+ int max_range =
+ std::max(Instruction::ImmBranchRange(TestBranchType),
+ std::max(Instruction::ImmBranchRange(CompareBranchType),
+ Instruction::ImmBranchRange(CondBranchType)));
+
+ SETUP_SIZE(max_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label done, fail;
+ Label test_tbz, test_cbz, test_bcond;
+ Label success_tbz, success_cbz, success_bcond;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ __ B(&test_tbz);
+ __ Bind(&success_tbz);
+ __ Orr(x0, x0, 1 << 0);
+ __ B(&test_cbz);
+ __ Bind(&success_cbz);
+ __ Orr(x0, x0, 1 << 1);
+ __ B(&test_bcond);
+ __ Bind(&success_bcond);
+ __ Orr(x0, x0, 1 << 2);
+
+ __ B(&done);
+
+ // Generate enough code to overflow the immediate range of the three types of
+ // branches below.
+ for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ if (i % 100 == 0) {
+ // If we do land in this code, we do not want to execute so many nops
+ // before reaching the end of test (especially if tracing is activated).
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+ __ B(&fail);
+
+ __ Bind(&test_tbz);
+ __ Tbz(x10, 7, &success_tbz);
+ __ Bind(&test_cbz);
+ __ Cbz(x10, &success_cbz);
+ __ Bind(&test_bcond);
+ __ Cmp(x10, 0);
+ __ B(eq, &success_bcond);
+
+ // For each out-of-range branch instructions, at least two instructions should
+ // have been generated.
+ CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
+
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x7, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_simple_veneer) {
+ INIT_V8();
+
+ // Test that the MacroAssembler correctly emits veneers for forward branches
+ // to labels that are outside the immediate range of branch instructions.
+ int max_range =
+ std::max(Instruction::ImmBranchRange(TestBranchType),
+ std::max(Instruction::ImmBranchRange(CompareBranchType),
+ Instruction::ImmBranchRange(CondBranchType)));
+
+ SETUP_SIZE(max_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label done, fail;
+ Label test_tbz, test_cbz, test_bcond;
+ Label success_tbz, success_cbz, success_bcond;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ __ Bind(&test_tbz);
+ __ Tbz(x10, 7, &success_tbz);
+ __ Bind(&test_cbz);
+ __ Cbz(x10, &success_cbz);
+ __ Bind(&test_bcond);
+ __ Cmp(x10, 0);
+ __ B(eq, &success_bcond);
+
+ // Generate enough code to overflow the immediate range of the three types of
+ // branches below.
+ for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ if (i % 100 == 0) {
+ // If we do land in this code, we do not want to execute so many nops
+ // before reaching the end of test (especially if tracing is activated).
+ // Also, the branches give the MacroAssembler the opportunity to emit the
+ // veneers.
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+ __ B(&fail);
+
+ __ Bind(&success_tbz);
+ __ Orr(x0, x0, 1 << 0);
+ __ B(&test_cbz);
+ __ Bind(&success_cbz);
+ __ Orr(x0, x0, 1 << 1);
+ __ B(&test_bcond);
+ __ Bind(&success_bcond);
+ __ Orr(x0, x0, 1 << 2);
+
+ __ B(&done);
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x7, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_veneer_link_chain) {
+ INIT_V8();
+
+ // Test that the MacroAssembler correctly emits veneers for forward branches
+ // that target out-of-range labels and are part of multiple instructions
+ // jumping to that label.
+ //
+ // We test the three situations with the different types of instruction:
+ // (1)- When the branch is at the start of the chain with tbz.
+ // (2)- When the branch is in the middle of the chain with cbz.
+ // (3)- When the branch is at the end of the chain with bcond.
+ int max_range =
+ std::max(Instruction::ImmBranchRange(TestBranchType),
+ std::max(Instruction::ImmBranchRange(CompareBranchType),
+ Instruction::ImmBranchRange(CondBranchType)));
+
+ SETUP_SIZE(max_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label skip, fail, done;
+ Label test_tbz, test_cbz, test_bcond;
+ Label success_tbz, success_cbz, success_bcond;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ __ B(&skip);
+ // Branches at the start of the chain for situations (2) and (3).
+ __ B(&success_cbz);
+ __ B(&success_bcond);
+ __ Nop();
+ __ B(&success_bcond);
+ __ B(&success_cbz);
+ __ Bind(&skip);
+
+ __ Bind(&test_tbz);
+ __ Tbz(x10, 7, &success_tbz);
+ __ Bind(&test_cbz);
+ __ Cbz(x10, &success_cbz);
+ __ Bind(&test_bcond);
+ __ Cmp(x10, 0);
+ __ B(eq, &success_bcond);
+
+ skip.Unuse();
+ __ B(&skip);
+ // Branches at the end of the chain for situations (1) and (2).
+ __ B(&success_cbz);
+ __ B(&success_tbz);
+ __ Nop();
+ __ B(&success_tbz);
+ __ B(&success_cbz);
+ __ Bind(&skip);
+
+ // Generate enough code to overflow the immediate range of the three types of
+ // branches below.
+ for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ if (i % 100 == 0) {
+ // If we do land in this code, we do not want to execute so many nops
+ // before reaching the end of test (especially if tracing is activated).
+ // Also, the branches give the MacroAssembler the opportunity to emit the
+ // veneers.
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+ __ B(&fail);
+
+ __ Bind(&success_tbz);
+ __ Orr(x0, x0, 1 << 0);
+ __ B(&test_cbz);
+ __ Bind(&success_cbz);
+ __ Orr(x0, x0, 1 << 1);
+ __ B(&test_bcond);
+ __ Bind(&success_bcond);
+ __ Orr(x0, x0, 1 << 2);
+
+ __ B(&done);
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x7, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_veneer_broken_link_chain) {
+ INIT_V8();
+
+ // Check that the MacroAssembler correctly handles the situation when removing
+ // a branch from the link chain of a label and the two links on each side of
+ // the removed branch cannot be linked together (out of range).
+ //
+ // We test with tbz because it has a small range.
+ int max_range = Instruction::ImmBranchRange(TestBranchType);
+ int inter_range = max_range / 2 + max_range / 10;
+
+ SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label skip, fail, done;
+ Label test_1, test_2, test_3;
+ Label far_target;
+
+ __ Mov(x0, 0); // Indicates the origin of the branch.
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ // First instruction in the label chain.
+ __ Bind(&test_1);
+ __ Mov(x0, 1);
+ __ B(&far_target);
+
+ for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ if (i % 100 == 0) {
+ // Do not allow generating veneers. They should not be needed.
+ __ b(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+
+ // Will need a veneer to point to reach the target.
+ __ Bind(&test_2);
+ __ Mov(x0, 2);
+ __ Tbz(x10, 7, &far_target);
+
+ for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ if (i % 100 == 0) {
+ // Do not allow generating veneers. They should not be needed.
+ __ b(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+
+ // Does not need a veneer to reach the target, but the initial branch
+ // instruction is out of range.
+ __ Bind(&test_3);
+ __ Mov(x0, 3);
+ __ Tbz(x10, 7, &far_target);
+
+ for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ if (i % 100 == 0) {
+ // Allow generating veneers.
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+
+ __ B(&fail);
+
+ __ Bind(&far_target);
+ __ Cmp(x0, 1);
+ __ B(eq, &test_2);
+ __ Cmp(x0, 2);
+ __ B(eq, &test_3);
+
+ __ B(&done);
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x3, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_offset) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Ldr(w0, MemOperand(x17));
+ __ Str(w0, MemOperand(x18));
+ __ Ldr(w1, MemOperand(x17, 4));
+ __ Str(w1, MemOperand(x18, 12));
+ __ Ldr(x2, MemOperand(x17, 8));
+ __ Str(x2, MemOperand(x18, 16));
+ __ Ldrb(w3, MemOperand(x17, 1));
+ __ Strb(w3, MemOperand(x18, 25));
+ __ Ldrh(w4, MemOperand(x17, 2));
+ __ Strh(w4, MemOperand(x18, 33));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x76543210, x0);
+ ASSERT_EQUAL_64(0x76543210, dst[0]);
+ ASSERT_EQUAL_64(0xfedcba98, x1);
+ ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ ASSERT_EQUAL_64(0x32, x3);
+ ASSERT_EQUAL_64(0x3200, dst[3]);
+ ASSERT_EQUAL_64(0x7654, x4);
+ ASSERT_EQUAL_64(0x765400, dst[4]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base, x18);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_wide) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[8192];
+ uint32_t dst[8192];
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+ memset(src, 0xaa, 8192 * sizeof(src[0]));
+ memset(dst, 0xaa, 8192 * sizeof(dst[0]));
+ src[0] = 0;
+ src[6144] = 6144;
+ src[8191] = 8191;
+
+ START();
+ __ Mov(x22, src_base);
+ __ Mov(x23, dst_base);
+ __ Mov(x24, src_base);
+ __ Mov(x25, dst_base);
+ __ Mov(x26, src_base);
+ __ Mov(x27, dst_base);
+
+ __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
+ __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
+ __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
+ __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
+ __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
+ __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(8191, w0);
+ ASSERT_EQUAL_32(8191, dst[8191]);
+ ASSERT_EQUAL_64(src_base, x22);
+ ASSERT_EQUAL_64(dst_base, x23);
+ ASSERT_EQUAL_32(0, w1);
+ ASSERT_EQUAL_32(0, dst[0]);
+ ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
+ ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
+ ASSERT_EQUAL_32(6144, w2);
+ ASSERT_EQUAL_32(6144, dst[6144]);
+ ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
+ ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_preindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base);
+ __ Mov(x20, dst_base);
+ __ Mov(x21, src_base + 16);
+ __ Mov(x22, dst_base + 40);
+ __ Mov(x23, src_base);
+ __ Mov(x24, dst_base);
+ __ Mov(x25, src_base);
+ __ Mov(x26, dst_base);
+ __ Ldr(w0, MemOperand(x17, 4, PreIndex));
+ __ Str(w0, MemOperand(x18, 12, PreIndex));
+ __ Ldr(x1, MemOperand(x19, 8, PreIndex));
+ __ Str(x1, MemOperand(x20, 16, PreIndex));
+ __ Ldr(w2, MemOperand(x21, -4, PreIndex));
+ __ Str(w2, MemOperand(x22, -4, PreIndex));
+ __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
+ __ Strb(w3, MemOperand(x24, 25, PreIndex));
+ __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
+ __ Strh(w4, MemOperand(x26, 41, PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfedcba98, x0);
+ ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ ASSERT_EQUAL_64(0x01234567, x2);
+ ASSERT_EQUAL_64(0x0123456700000000UL, dst[4]);
+ ASSERT_EQUAL_64(0x32, x3);
+ ASSERT_EQUAL_64(0x3200, dst[3]);
+ ASSERT_EQUAL_64(0x9876, x4);
+ ASSERT_EQUAL_64(0x987600, dst[5]);
+ ASSERT_EQUAL_64(src_base + 4, x17);
+ ASSERT_EQUAL_64(dst_base + 12, x18);
+ ASSERT_EQUAL_64(src_base + 8, x19);
+ ASSERT_EQUAL_64(dst_base + 16, x20);
+ ASSERT_EQUAL_64(src_base + 12, x21);
+ ASSERT_EQUAL_64(dst_base + 36, x22);
+ ASSERT_EQUAL_64(src_base + 1, x23);
+ ASSERT_EQUAL_64(dst_base + 25, x24);
+ ASSERT_EQUAL_64(src_base + 3, x25);
+ ASSERT_EQUAL_64(dst_base + 41, x26);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_postindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base + 4);
+ __ Mov(x18, dst_base + 12);
+ __ Mov(x19, src_base + 8);
+ __ Mov(x20, dst_base + 16);
+ __ Mov(x21, src_base + 8);
+ __ Mov(x22, dst_base + 32);
+ __ Mov(x23, src_base + 1);
+ __ Mov(x24, dst_base + 25);
+ __ Mov(x25, src_base + 3);
+ __ Mov(x26, dst_base + 41);
+ __ Ldr(w0, MemOperand(x17, 4, PostIndex));
+ __ Str(w0, MemOperand(x18, 12, PostIndex));
+ __ Ldr(x1, MemOperand(x19, 8, PostIndex));
+ __ Str(x1, MemOperand(x20, 16, PostIndex));
+ __ Ldr(x2, MemOperand(x21, -8, PostIndex));
+ __ Str(x2, MemOperand(x22, -32, PostIndex));
+ __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
+ __ Strb(w3, MemOperand(x24, 5, PostIndex));
+ __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
+ __ Strh(w4, MemOperand(x26, -41, PostIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfedcba98, x0);
+ ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[4]);
+ ASSERT_EQUAL_64(0x32, x3);
+ ASSERT_EQUAL_64(0x3200, dst[3]);
+ ASSERT_EQUAL_64(0x9876, x4);
+ ASSERT_EQUAL_64(0x987600, dst[5]);
+ ASSERT_EQUAL_64(src_base + 8, x17);
+ ASSERT_EQUAL_64(dst_base + 24, x18);
+ ASSERT_EQUAL_64(src_base + 16, x19);
+ ASSERT_EQUAL_64(dst_base + 32, x20);
+ ASSERT_EQUAL_64(src_base, x21);
+ ASSERT_EQUAL_64(dst_base, x22);
+ ASSERT_EQUAL_64(src_base + 2, x23);
+ ASSERT_EQUAL_64(dst_base + 30, x24);
+ ASSERT_EQUAL_64(src_base, x25);
+ ASSERT_EQUAL_64(dst_base, x26);
+
+ TEARDOWN();
+}
+
+
+TEST(load_signed) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[2] = {0x80008080, 0x7fff7f7f};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+
+ START();
+ __ Mov(x24, src_base);
+ __ Ldrsb(w0, MemOperand(x24));
+ __ Ldrsb(w1, MemOperand(x24, 4));
+ __ Ldrsh(w2, MemOperand(x24));
+ __ Ldrsh(w3, MemOperand(x24, 4));
+ __ Ldrsb(x4, MemOperand(x24));
+ __ Ldrsb(x5, MemOperand(x24, 4));
+ __ Ldrsh(x6, MemOperand(x24));
+ __ Ldrsh(x7, MemOperand(x24, 4));
+ __ Ldrsw(x8, MemOperand(x24));
+ __ Ldrsw(x9, MemOperand(x24, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffff80, x0);
+ ASSERT_EQUAL_64(0x0000007f, x1);
+ ASSERT_EQUAL_64(0xffff8080, x2);
+ ASSERT_EQUAL_64(0x00007f7f, x3);
+ ASSERT_EQUAL_64(0xffffffffffffff80UL, x4);
+ ASSERT_EQUAL_64(0x000000000000007fUL, x5);
+ ASSERT_EQUAL_64(0xffffffffffff8080UL, x6);
+ ASSERT_EQUAL_64(0x0000000000007f7fUL, x7);
+ ASSERT_EQUAL_64(0xffffffff80008080UL, x8);
+ ASSERT_EQUAL_64(0x000000007fff7f7fUL, x9);
+
+ TEARDOWN();
+}
+
+
+TEST(load_store_regoffset) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[3] = {1, 2, 3};
+ uint32_t dst[4] = {0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, src_base + 3 * sizeof(src[0]));
+ __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
+ __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
+ __ Mov(x24, 0);
+ __ Mov(x25, 4);
+ __ Mov(x26, -4);
+ __ Mov(x27, 0xfffffffc); // 32-bit -4.
+ __ Mov(x28, 0xfffffffe); // 32-bit -2.
+ __ Mov(x29, 0xffffffff); // 32-bit -1.
+
+ __ Ldr(w0, MemOperand(x16, x24));
+ __ Ldr(x1, MemOperand(x16, x25));
+ __ Ldr(w2, MemOperand(x18, x26));
+ __ Ldr(w3, MemOperand(x18, x27, SXTW));
+ __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
+ __ Str(w0, MemOperand(x17, x24));
+ __ Str(x1, MemOperand(x17, x25));
+ __ Str(w2, MemOperand(x20, x29, SXTW, 2));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0x0000000300000002UL, x1);
+ ASSERT_EQUAL_64(3, x2);
+ ASSERT_EQUAL_64(3, x3);
+ ASSERT_EQUAL_64(2, x4);
+ ASSERT_EQUAL_32(1, dst[0]);
+ ASSERT_EQUAL_32(2, dst[1]);
+ ASSERT_EQUAL_32(3, dst[2]);
+ ASSERT_EQUAL_32(3, dst[3]);
+
+ TEARDOWN();
+}
+
+
+TEST(load_store_float) {
+ INIT_V8();
+ SETUP();
+
+ float src[3] = {1.0, 2.0, 3.0};
+ float dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base);
+ __ Mov(x20, dst_base);
+ __ Mov(x21, src_base);
+ __ Mov(x22, dst_base);
+ __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
+ __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
+ __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
+ __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
+ __ Str(s2, MemOperand(x22, sizeof(dst[0])));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(2.0, s0);
+ ASSERT_EQUAL_FP32(2.0, dst[0]);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(1.0, dst[2]);
+ ASSERT_EQUAL_FP32(3.0, s2);
+ ASSERT_EQUAL_FP32(3.0, dst[1]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
+ ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
+ ASSERT_EQUAL_64(dst_base, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(load_store_double) {
+ INIT_V8();
+ SETUP();
+
+ double src[3] = {1.0, 2.0, 3.0};
+ double dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base);
+ __ Mov(x20, dst_base);
+ __ Mov(x21, src_base);
+ __ Mov(x22, dst_base);
+ __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
+ __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
+ __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
+ __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
+ __ Str(d2, MemOperand(x22, sizeof(dst[0])));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(2.0, d0);
+ ASSERT_EQUAL_FP64(2.0, dst[0]);
+ ASSERT_EQUAL_FP64(1.0, d1);
+ ASSERT_EQUAL_FP64(1.0, dst[2]);
+ ASSERT_EQUAL_FP64(3.0, d2);
+ ASSERT_EQUAL_FP64(3.0, dst[1]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
+ ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
+ ASSERT_EQUAL_64(dst_base, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_float) {
+ INIT_V8();
+ SETUP();
+
+ float src[2] = {1.0, 2.0};
+ float dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
+ __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s31);
+ ASSERT_EQUAL_FP32(2.0, s0);
+ ASSERT_EQUAL_FP32(0.0, dst[0]);
+ ASSERT_EQUAL_FP32(2.0, dst[1]);
+ ASSERT_EQUAL_FP32(1.0, dst[2]);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_double) {
+ INIT_V8();
+ SETUP();
+
+ double src[2] = {1.0, 2.0};
+ double dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
+ __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(1.0, d31);
+ ASSERT_EQUAL_FP64(2.0, d0);
+ ASSERT_EQUAL_FP64(0.0, dst[0]);
+ ASSERT_EQUAL_FP64(2.0, dst[1]);
+ ASSERT_EQUAL_FP64(1.0, dst[2]);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_offset) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL};
+ uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, src_base + 24);
+ __ Mov(x19, dst_base + 56);
+ __ Ldp(w0, w1, MemOperand(x16));
+ __ Ldp(w2, w3, MemOperand(x16, 4));
+ __ Ldp(x4, x5, MemOperand(x16, 8));
+ __ Ldp(w6, w7, MemOperand(x18, -12));
+ __ Ldp(x8, x9, MemOperand(x18, -16));
+ __ Stp(w0, w1, MemOperand(x17));
+ __ Stp(w2, w3, MemOperand(x17, 8));
+ __ Stp(x4, x5, MemOperand(x17, 16));
+ __ Stp(w6, w7, MemOperand(x19, -24));
+ __ Stp(x8, x9, MemOperand(x19, -16));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x44556677, x0);
+ ASSERT_EQUAL_64(0x00112233, x1);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
+ ASSERT_EQUAL_64(0x00112233, x2);
+ ASSERT_EQUAL_64(0xccddeeff, x3);
+ ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
+ ASSERT_EQUAL_64(0x8899aabb, x6);
+ ASSERT_EQUAL_64(0xbbaa9988, x7);
+ ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(src_base + 24, x18);
+ ASSERT_EQUAL_64(dst_base + 56, x19);
+
+ TEARDOWN();
+}
+
+
+TEST(ldnp_stnp_offset) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL};
+ uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, src_base + 24);
+ __ Mov(x19, dst_base + 56);
+ __ Ldnp(w0, w1, MemOperand(x16));
+ __ Ldnp(w2, w3, MemOperand(x16, 4));
+ __ Ldnp(x4, x5, MemOperand(x16, 8));
+ __ Ldnp(w6, w7, MemOperand(x18, -12));
+ __ Ldnp(x8, x9, MemOperand(x18, -16));
+ __ Stnp(w0, w1, MemOperand(x17));
+ __ Stnp(w2, w3, MemOperand(x17, 8));
+ __ Stnp(x4, x5, MemOperand(x17, 16));
+ __ Stnp(w6, w7, MemOperand(x19, -24));
+ __ Stnp(x8, x9, MemOperand(x19, -16));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x44556677, x0);
+ ASSERT_EQUAL_64(0x00112233, x1);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
+ ASSERT_EQUAL_64(0x00112233, x2);
+ ASSERT_EQUAL_64(0xccddeeff, x3);
+ ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
+ ASSERT_EQUAL_64(0x8899aabb, x6);
+ ASSERT_EQUAL_64(0xbbaa9988, x7);
+ ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(src_base + 24, x18);
+ ASSERT_EQUAL_64(dst_base + 56, x19);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_preindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL};
+ uint64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, dst_base + 16);
+ __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
+ __ Mov(x19, x16);
+ __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
+ __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
+ __ Mov(x20, x17);
+ __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
+ __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
+ __ Mov(x21, x16);
+ __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
+ __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
+ __ Mov(x22, x18);
+ __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00112233, x0);
+ ASSERT_EQUAL_64(0xccddeeff, x1);
+ ASSERT_EQUAL_64(0x44556677, x2);
+ ASSERT_EQUAL_64(0x00112233, x3);
+ ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[0]);
+ ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ ASSERT_EQUAL_64(0x0011223344556677UL, x6);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x7);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(dst_base + 16, x18);
+ ASSERT_EQUAL_64(src_base + 4, x19);
+ ASSERT_EQUAL_64(dst_base + 4, x20);
+ ASSERT_EQUAL_64(src_base + 8, x21);
+ ASSERT_EQUAL_64(dst_base + 24, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_postindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[4] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL, 0x7766554433221100UL};
+ uint64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, dst_base + 16);
+ __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
+ __ Mov(x19, x16);
+ __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
+ __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
+ __ Mov(x20, x17);
+ __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
+ __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
+ __ Mov(x21, x16);
+ __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
+ __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
+ __ Mov(x22, x18);
+ __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x44556677, x0);
+ ASSERT_EQUAL_64(0x00112233, x1);
+ ASSERT_EQUAL_64(0x00112233, x2);
+ ASSERT_EQUAL_64(0xccddeeff, x3);
+ ASSERT_EQUAL_64(0x4455667700112233UL, dst[0]);
+ ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x0011223344556677UL, x4);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x5);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x6);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x7);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(dst_base + 16, x18);
+ ASSERT_EQUAL_64(src_base + 4, x19);
+ ASSERT_EQUAL_64(dst_base + 4, x20);
+ ASSERT_EQUAL_64(src_base + 8, x21);
+ ASSERT_EQUAL_64(dst_base + 24, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_sign_extend) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[2] = {0x80000000, 0x7fffffff};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+
+ START();
+ __ Mov(x24, src_base);
+ __ Ldpsw(x0, x1, MemOperand(x24));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffff80000000UL, x0);
+ ASSERT_EQUAL_64(0x000000007fffffffUL, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(ldur_stur) {
+ INIT_V8();
+ SETUP();
+
+ int64_t src[2] = {0x0123456789abcdefUL, 0x0123456789abcdefUL};
+ int64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base + 16);
+ __ Mov(x20, dst_base + 32);
+ __ Mov(x21, dst_base + 40);
+ __ Ldr(w0, MemOperand(x17, 1));
+ __ Str(w0, MemOperand(x18, 2));
+ __ Ldr(x1, MemOperand(x17, 3));
+ __ Str(x1, MemOperand(x18, 9));
+ __ Ldr(w2, MemOperand(x19, -9));
+ __ Str(w2, MemOperand(x20, -5));
+ __ Ldrb(w3, MemOperand(x19, -1));
+ __ Strb(w3, MemOperand(x21, -1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x6789abcd, x0);
+ ASSERT_EQUAL_64(0x6789abcd0000L, dst[0]);
+ ASSERT_EQUAL_64(0xabcdef0123456789L, x1);
+ ASSERT_EQUAL_64(0xcdef012345678900L, dst[1]);
+ ASSERT_EQUAL_64(0x000000ab, dst[2]);
+ ASSERT_EQUAL_64(0xabcdef01, x2);
+ ASSERT_EQUAL_64(0x00abcdef01000000L, dst[3]);
+ ASSERT_EQUAL_64(0x00000001, x3);
+ ASSERT_EQUAL_64(0x0100000000000000L, dst[4]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base, x18);
+ ASSERT_EQUAL_64(src_base + 16, x19);
+ ASSERT_EQUAL_64(dst_base + 32, x20);
+
+ TEARDOWN();
+}
+
+
+#if 0 // TODO(all) enable.
+// TODO(rodolph): Adapt w16 Literal tests for RelocInfo.
+TEST(ldr_literal) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Ldr(x2, 0x1234567890abcdefUL);
+ __ Ldr(w3, 0xfedcba09);
+ __ Ldr(d13, 1.234);
+ __ Ldr(s25, 2.5);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x2);
+ ASSERT_EQUAL_64(0xfedcba09, x3);
+ ASSERT_EQUAL_FP64(1.234, d13);
+ ASSERT_EQUAL_FP32(2.5, s25);
+
+ TEARDOWN();
+}
+
+
+static void LdrLiteralRangeHelper(ptrdiff_t range_,
+ LiteralPoolEmitOption option,
+ bool expect_dump) {
+ ASSERT(range_ > 0);
+ SETUP_SIZE(range_ + 1024);
+
+ Label label_1, label_2;
+
+ size_t range = static_cast<size_t>(range_);
+ size_t code_size = 0;
+ size_t pool_guard_size;
+
+ if (option == NoJumpRequired) {
+ // Space for an explicit branch.
+ pool_guard_size = sizeof(Instr);
+ } else {
+ pool_guard_size = 0;
+ }
+
+ START();
+ // Force a pool dump so the pool starts off empty.
+ __ EmitLiteralPool(JumpRequired);
+ ASSERT_LITERAL_POOL_SIZE(0);
+
+ __ Ldr(x0, 0x1234567890abcdefUL);
+ __ Ldr(w1, 0xfedcba09);
+ __ Ldr(d0, 1.234);
+ __ Ldr(s1, 2.5);
+ ASSERT_LITERAL_POOL_SIZE(4);
+
+ code_size += 4 * sizeof(Instr);
+
+ // Check that the requested range (allowing space for a branch over the pool)
+ // can be handled by this test.
+ ASSERT((code_size + pool_guard_size) <= range);
+
+ // Emit NOPs up to 'range', leaving space for the pool guard.
+ while ((code_size + pool_guard_size) < range) {
+ __ Nop();
+ code_size += sizeof(Instr);
+ }
+
+ // Emit the guard sequence before the literal pool.
+ if (option == NoJumpRequired) {
+ __ B(&label_1);
+ code_size += sizeof(Instr);
+ }
+
+ ASSERT(code_size == range);
+ ASSERT_LITERAL_POOL_SIZE(4);
+
+ // Possibly generate a literal pool.
+ __ CheckLiteralPool(option);
+ __ Bind(&label_1);
+ if (expect_dump) {
+ ASSERT_LITERAL_POOL_SIZE(0);
+ } else {
+ ASSERT_LITERAL_POOL_SIZE(4);
+ }
+
+ // Force a pool flush to check that a second pool functions correctly.
+ __ EmitLiteralPool(JumpRequired);
+ ASSERT_LITERAL_POOL_SIZE(0);
+
+ // These loads should be after the pool (and will require a new one).
+ __ Ldr(x4, 0x34567890abcdef12UL);
+ __ Ldr(w5, 0xdcba09fe);
+ __ Ldr(d4, 123.4);
+ __ Ldr(s5, 250.0);
+ ASSERT_LITERAL_POOL_SIZE(4);
+ END();
+
+ RUN();
+
+ // Check that the literals loaded correctly.
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x0);
+ ASSERT_EQUAL_64(0xfedcba09, x1);
+ ASSERT_EQUAL_FP64(1.234, d0);
+ ASSERT_EQUAL_FP32(2.5, s1);
+ ASSERT_EQUAL_64(0x34567890abcdef12UL, x4);
+ ASSERT_EQUAL_64(0xdcba09fe, x5);
+ ASSERT_EQUAL_FP64(123.4, d4);
+ ASSERT_EQUAL_FP32(250.0, s5);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_literal_range_1) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kRecommendedLiteralPoolRange,
+ NoJumpRequired,
+ true);
+}
+
+
+TEST(ldr_literal_range_2) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kRecommendedLiteralPoolRange-sizeof(Instr),
+ NoJumpRequired,
+ false);
+}
+
+
+TEST(ldr_literal_range_3) {
+ INIT_V8();
+ LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange,
+ JumpRequired,
+ true);
+}
+
+
+TEST(ldr_literal_range_4) {
+ INIT_V8();
+ LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange-sizeof(Instr),
+ JumpRequired,
+ false);
+}
+
+
+TEST(ldr_literal_range_5) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kLiteralPoolCheckInterval,
+ JumpRequired,
+ false);
+}
+
+
+TEST(ldr_literal_range_6) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kLiteralPoolCheckInterval-sizeof(Instr),
+ JumpRequired,
+ false);
+}
+#endif
+
+TEST(add_sub_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x0);
+ __ Mov(x1, 0x1111);
+ __ Mov(x2, 0xffffffffffffffffL);
+ __ Mov(x3, 0x8000000000000000L);
+
+ __ Add(x10, x0, Operand(0x123));
+ __ Add(x11, x1, Operand(0x122000));
+ __ Add(x12, x0, Operand(0xabc << 12));
+ __ Add(x13, x2, Operand(1));
+
+ __ Add(w14, w0, Operand(0x123));
+ __ Add(w15, w1, Operand(0x122000));
+ __ Add(w16, w0, Operand(0xabc << 12));
+ __ Add(w17, w2, Operand(1));
+
+ __ Sub(x20, x0, Operand(0x1));
+ __ Sub(x21, x1, Operand(0x111));
+ __ Sub(x22, x1, Operand(0x1 << 12));
+ __ Sub(x23, x3, Operand(1));
+
+ __ Sub(w24, w0, Operand(0x1));
+ __ Sub(w25, w1, Operand(0x111));
+ __ Sub(w26, w1, Operand(0x1 << 12));
+ __ Sub(w27, w3, Operand(1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x123, x10);
+ ASSERT_EQUAL_64(0x123111, x11);
+ ASSERT_EQUAL_64(0xabc000, x12);
+ ASSERT_EQUAL_64(0x0, x13);
+
+ ASSERT_EQUAL_32(0x123, w14);
+ ASSERT_EQUAL_32(0x123111, w15);
+ ASSERT_EQUAL_32(0xabc000, w16);
+ ASSERT_EQUAL_32(0x0, w17);
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x20);
+ ASSERT_EQUAL_64(0x1000, x21);
+ ASSERT_EQUAL_64(0x111, x22);
+ ASSERT_EQUAL_64(0x7fffffffffffffffL, x23);
+
+ ASSERT_EQUAL_32(0xffffffff, w24);
+ ASSERT_EQUAL_32(0x1000, w25);
+ ASSERT_EQUAL_32(0x111, w26);
+ ASSERT_EQUAL_32(0xffffffff, w27);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x0);
+ __ Mov(x1, 0x1);
+
+ __ Add(x10, x0, Operand(0x1234567890abcdefUL));
+ __ Add(x11, x1, Operand(0xffffffff));
+
+ __ Add(w12, w0, Operand(0x12345678));
+ __ Add(w13, w1, Operand(0xffffffff));
+
+ __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
+
+ __ Sub(w21, w0, Operand(0x12345678));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
+ ASSERT_EQUAL_64(0x100000000UL, x11);
+
+ ASSERT_EQUAL_32(0x12345678, w12);
+ ASSERT_EQUAL_64(0x0, x13);
+
+ ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20);
+
+ ASSERT_EQUAL_32(-0x12345678, w21);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_shifted) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+ __ Mov(x3, 0xffffffffffffffffL);
+
+ __ Add(x10, x1, Operand(x2));
+ __ Add(x11, x0, Operand(x1, LSL, 8));
+ __ Add(x12, x0, Operand(x1, LSR, 8));
+ __ Add(x13, x0, Operand(x1, ASR, 8));
+ __ Add(x14, x0, Operand(x2, ASR, 8));
+ __ Add(w15, w0, Operand(w1, ASR, 8));
+ __ Add(w18, w3, Operand(w1, ROR, 8));
+ __ Add(x19, x3, Operand(x1, ROR, 8));
+
+ __ Sub(x20, x3, Operand(x2));
+ __ Sub(x21, x3, Operand(x1, LSL, 8));
+ __ Sub(x22, x3, Operand(x1, LSR, 8));
+ __ Sub(x23, x3, Operand(x1, ASR, 8));
+ __ Sub(x24, x3, Operand(x2, ASR, 8));
+ __ Sub(w25, w3, Operand(w1, ASR, 8));
+ __ Sub(w26, w3, Operand(w1, ROR, 8));
+ __ Sub(x27, x3, Operand(x1, ROR, 8));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
+ ASSERT_EQUAL_64(0x23456789abcdef00L, x11);
+ ASSERT_EQUAL_64(0x000123456789abcdL, x12);
+ ASSERT_EQUAL_64(0x000123456789abcdL, x13);
+ ASSERT_EQUAL_64(0xfffedcba98765432L, x14);
+ ASSERT_EQUAL_64(0xff89abcd, x15);
+ ASSERT_EQUAL_64(0xef89abcc, x18);
+ ASSERT_EQUAL_64(0xef0123456789abccL, x19);
+
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x20);
+ ASSERT_EQUAL_64(0xdcba9876543210ffL, x21);
+ ASSERT_EQUAL_64(0xfffedcba98765432L, x22);
+ ASSERT_EQUAL_64(0xfffedcba98765432L, x23);
+ ASSERT_EQUAL_64(0x000123456789abcdL, x24);
+ ASSERT_EQUAL_64(0x00765432, x25);
+ ASSERT_EQUAL_64(0x10765432, x26);
+ ASSERT_EQUAL_64(0x10fedcba98765432L, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_extended) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+ __ Mov(w3, 0x80);
+
+ __ Add(x10, x0, Operand(x1, UXTB, 0));
+ __ Add(x11, x0, Operand(x1, UXTB, 1));
+ __ Add(x12, x0, Operand(x1, UXTH, 2));
+ __ Add(x13, x0, Operand(x1, UXTW, 4));
+
+ __ Add(x14, x0, Operand(x1, SXTB, 0));
+ __ Add(x15, x0, Operand(x1, SXTB, 1));
+ __ Add(x16, x0, Operand(x1, SXTH, 2));
+ __ Add(x17, x0, Operand(x1, SXTW, 3));
+ __ Add(x18, x0, Operand(x2, SXTB, 0));
+ __ Add(x19, x0, Operand(x2, SXTB, 1));
+ __ Add(x20, x0, Operand(x2, SXTH, 2));
+ __ Add(x21, x0, Operand(x2, SXTW, 3));
+
+ __ Add(x22, x1, Operand(x2, SXTB, 1));
+ __ Sub(x23, x1, Operand(x2, SXTB, 1));
+
+ __ Add(w24, w1, Operand(w2, UXTB, 2));
+ __ Add(w25, w0, Operand(w1, SXTB, 0));
+ __ Add(w26, w0, Operand(w1, SXTB, 1));
+ __ Add(w27, w2, Operand(w1, SXTW, 3));
+
+ __ Add(w28, w0, Operand(w1, SXTW, 3));
+ __ Add(x29, x0, Operand(w1, SXTW, 3));
+
+ __ Sub(x30, x0, Operand(w3, SXTB, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xefL, x10);
+ ASSERT_EQUAL_64(0x1deL, x11);
+ ASSERT_EQUAL_64(0x337bcL, x12);
+ ASSERT_EQUAL_64(0x89abcdef0L, x13);
+
+ ASSERT_EQUAL_64(0xffffffffffffffefL, x14);
+ ASSERT_EQUAL_64(0xffffffffffffffdeL, x15);
+ ASSERT_EQUAL_64(0xffffffffffff37bcL, x16);
+ ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x17);
+ ASSERT_EQUAL_64(0x10L, x18);
+ ASSERT_EQUAL_64(0x20L, x19);
+ ASSERT_EQUAL_64(0xc840L, x20);
+ ASSERT_EQUAL_64(0x3b2a19080L, x21);
+
+ ASSERT_EQUAL_64(0x0123456789abce0fL, x22);
+ ASSERT_EQUAL_64(0x0123456789abcdcfL, x23);
+
+ ASSERT_EQUAL_32(0x89abce2f, w24);
+ ASSERT_EQUAL_32(0xffffffef, w25);
+ ASSERT_EQUAL_32(0xffffffde, w26);
+ ASSERT_EQUAL_32(0xc3b2a188, w27);
+
+ ASSERT_EQUAL_32(0x4d5e6f78, w28);
+ ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x29);
+
+ ASSERT_EQUAL_64(256, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_negative) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 4687);
+ __ Mov(x2, 0x1122334455667788);
+ __ Mov(w3, 0x11223344);
+ __ Mov(w4, 400000);
+
+ __ Add(x10, x0, -42);
+ __ Add(x11, x1, -687);
+ __ Add(x12, x2, -0x88);
+
+ __ Sub(x13, x0, -600);
+ __ Sub(x14, x1, -313);
+ __ Sub(x15, x2, -0x555);
+
+ __ Add(w19, w3, -0x344);
+ __ Add(w20, w4, -2000);
+
+ __ Sub(w21, w3, -0xbc);
+ __ Sub(w22, w4, -2000);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(-42, x10);
+ ASSERT_EQUAL_64(4000, x11);
+ ASSERT_EQUAL_64(0x1122334455667700, x12);
+
+ ASSERT_EQUAL_64(600, x13);
+ ASSERT_EQUAL_64(5000, x14);
+ ASSERT_EQUAL_64(0x1122334455667cdd, x15);
+
+ ASSERT_EQUAL_32(0x11223000, w19);
+ ASSERT_EQUAL_32(398000, w20);
+
+ ASSERT_EQUAL_32(0x11223400, w21);
+ ASSERT_EQUAL_32(402000, w22);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_zero) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0);
+ __ Mov(x2, 0);
+
+ Label blob1;
+ __ Bind(&blob1);
+ __ Add(x0, x0, 0);
+ __ Sub(x1, x1, 0);
+ __ Sub(x2, x2, xzr);
+ CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&blob1));
+
+ Label blob2;
+ __ Bind(&blob2);
+ __ Add(w3, w3, 0);
+ CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob2));
+
+ Label blob3;
+ __ Bind(&blob3);
+ __ Sub(w3, w3, wzr);
+ CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob3));
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(0, x2);
+
+ TEARDOWN();
+}
+
+
+TEST(claim_drop_zero) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ Label start;
+ __ Bind(&start);
+ __ Claim(0);
+ __ Drop(0);
+ __ Claim(xzr, 8);
+ __ Drop(xzr, 8);
+ __ Claim(xzr, 0);
+ __ Drop(xzr, 0);
+ __ Claim(x7, 0);
+ __ Drop(x7, 0);
+ __ ClaimBySMI(xzr, 8);
+ __ DropBySMI(xzr, 8);
+ __ ClaimBySMI(xzr, 0);
+ __ DropBySMI(xzr, 0);
+ CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&start));
+
+ END();
+
+ RUN();
+
+ TEARDOWN();
+}
+
+
+TEST(neg) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xf123456789abcdefL);
+
+ // Immediate.
+ __ Neg(x1, 0x123);
+ __ Neg(w2, 0x123);
+
+ // Shifted.
+ __ Neg(x3, Operand(x0, LSL, 1));
+ __ Neg(w4, Operand(w0, LSL, 2));
+ __ Neg(x5, Operand(x0, LSR, 3));
+ __ Neg(w6, Operand(w0, LSR, 4));
+ __ Neg(x7, Operand(x0, ASR, 5));
+ __ Neg(w8, Operand(w0, ASR, 6));
+
+ // Extended.
+ __ Neg(w9, Operand(w0, UXTB));
+ __ Neg(x10, Operand(x0, SXTB, 1));
+ __ Neg(w11, Operand(w0, UXTH, 2));
+ __ Neg(x12, Operand(x0, SXTH, 3));
+ __ Neg(w13, Operand(w0, UXTW, 4));
+ __ Neg(x14, Operand(x0, SXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfffffffffffffeddUL, x1);
+ ASSERT_EQUAL_64(0xfffffedd, x2);
+ ASSERT_EQUAL_64(0x1db97530eca86422UL, x3);
+ ASSERT_EQUAL_64(0xd950c844, x4);
+ ASSERT_EQUAL_64(0xe1db97530eca8643UL, x5);
+ ASSERT_EQUAL_64(0xf7654322, x6);
+ ASSERT_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
+ ASSERT_EQUAL_64(0x01d950c9, x8);
+ ASSERT_EQUAL_64(0xffffff11, x9);
+ ASSERT_EQUAL_64(0x0000000000000022UL, x10);
+ ASSERT_EQUAL_64(0xfffcc844, x11);
+ ASSERT_EQUAL_64(0x0000000000019088UL, x12);
+ ASSERT_EQUAL_64(0x65432110, x13);
+ ASSERT_EQUAL_64(0x0000000765432110UL, x14);
+
+ TEARDOWN();
+}
+
+
+TEST(adc_sbc_shift) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x2, 0x0123456789abcdefL);
+ __ Mov(x3, 0xfedcba9876543210L);
+ __ Mov(x4, 0xffffffffffffffffL);
+
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+
+ __ Adc(x5, x2, Operand(x3));
+ __ Adc(x6, x0, Operand(x1, LSL, 60));
+ __ Sbc(x7, x4, Operand(x3, LSR, 4));
+ __ Adc(x8, x2, Operand(x3, ASR, 4));
+ __ Adc(x9, x2, Operand(x3, ROR, 8));
+
+ __ Adc(w10, w2, Operand(w3));
+ __ Adc(w11, w0, Operand(w1, LSL, 30));
+ __ Sbc(w12, w4, Operand(w3, LSR, 4));
+ __ Adc(w13, w2, Operand(w3, ASR, 4));
+ __ Adc(w14, w2, Operand(w3, ROR, 8));
+
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+
+ __ Adc(x18, x2, Operand(x3));
+ __ Adc(x19, x0, Operand(x1, LSL, 60));
+ __ Sbc(x20, x4, Operand(x3, LSR, 4));
+ __ Adc(x21, x2, Operand(x3, ASR, 4));
+ __ Adc(x22, x2, Operand(x3, ROR, 8));
+
+ __ Adc(w23, w2, Operand(w3));
+ __ Adc(w24, w0, Operand(w1, LSL, 30));
+ __ Sbc(w25, w4, Operand(w3, LSR, 4));
+ __ Adc(w26, w2, Operand(w3, ASR, 4));
+ __ Adc(w27, w2, Operand(w3, ROR, 8));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x5);
+ ASSERT_EQUAL_64(1L << 60, x6);
+ ASSERT_EQUAL_64(0xf0123456789abcddL, x7);
+ ASSERT_EQUAL_64(0x0111111111111110L, x8);
+ ASSERT_EQUAL_64(0x1222222222222221L, x9);
+
+ ASSERT_EQUAL_32(0xffffffff, w10);
+ ASSERT_EQUAL_32(1 << 30, w11);
+ ASSERT_EQUAL_32(0xf89abcdd, w12);
+ ASSERT_EQUAL_32(0x91111110, w13);
+ ASSERT_EQUAL_32(0x9a222221, w14);
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL + 1, x18);
+ ASSERT_EQUAL_64((1L << 60) + 1, x19);
+ ASSERT_EQUAL_64(0xf0123456789abcddL + 1, x20);
+ ASSERT_EQUAL_64(0x0111111111111110L + 1, x21);
+ ASSERT_EQUAL_64(0x1222222222222221L + 1, x22);
+
+ ASSERT_EQUAL_32(0xffffffff + 1, w23);
+ ASSERT_EQUAL_32((1 << 30) + 1, w24);
+ ASSERT_EQUAL_32(0xf89abcdd + 1, w25);
+ ASSERT_EQUAL_32(0x91111110 + 1, w26);
+ ASSERT_EQUAL_32(0x9a222221 + 1, w27);
+
+ // Check that adc correctly sets the condition flags.
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x8000000000000000L);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, ASR, 63));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START();
+ __ Mov(x0, 0x10);
+ __ Mov(x1, 0x07ffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, LSL, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x10);
+
+ // Check that sbc correctly sets the condition flags.
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Sbcs(x10, x0, Operand(x1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Sbcs(x10, x0, Operand(x1, LSR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000001L, x10);
+
+ START();
+ __ Mov(x0, 0);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Sbcs(x10, x0, Operand(0xffffffffffffffffL));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START()
+ __ Mov(w0, 0x7fffffff);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Ngcs(w10, w0);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x80000000, x10);
+
+ START();
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Ngcs(x10, 0x7fffffffffffffffL);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x10);
+
+ START()
+ __ Mov(x0, 0);
+ // Set the C flag.
+ __ Cmp(x0, Operand(x0));
+ __ Sbcs(x10, x0, Operand(1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
+
+ START()
+ __ Mov(x0, 0);
+ // Set the C flag.
+ __ Cmp(x0, Operand(x0));
+ __ Ngcs(x10, 0x7fffffffffffffffL);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000001L, x10);
+
+ TEARDOWN();
+}
+
+
+TEST(adc_sbc_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x2, 0x0123456789abcdefL);
+
+ __ Adc(x10, x1, Operand(w2, UXTB, 1));
+ __ Adc(x11, x1, Operand(x2, SXTH, 2));
+ __ Sbc(x12, x1, Operand(w2, UXTW, 4));
+ __ Adc(x13, x1, Operand(x2, UXTX, 4));
+
+ __ Adc(w14, w1, Operand(w2, UXTB, 1));
+ __ Adc(w15, w1, Operand(w2, SXTH, 2));
+ __ Adc(w9, w1, Operand(w2, UXTW, 4));
+
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+
+ __ Adc(x20, x1, Operand(w2, UXTB, 1));
+ __ Adc(x21, x1, Operand(x2, SXTH, 2));
+ __ Sbc(x22, x1, Operand(w2, UXTW, 4));
+ __ Adc(x23, x1, Operand(x2, UXTX, 4));
+
+ __ Adc(w24, w1, Operand(w2, UXTB, 1));
+ __ Adc(w25, w1, Operand(w2, SXTH, 2));
+ __ Adc(w26, w1, Operand(w2, UXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1df, x10);
+ ASSERT_EQUAL_64(0xffffffffffff37bdL, x11);
+ ASSERT_EQUAL_64(0xfffffff765432110L, x12);
+ ASSERT_EQUAL_64(0x123456789abcdef1L, x13);
+
+ ASSERT_EQUAL_32(0x1df, w14);
+ ASSERT_EQUAL_32(0xffff37bd, w15);
+ ASSERT_EQUAL_32(0x9abcdef1, w9);
+
+ ASSERT_EQUAL_64(0x1df + 1, x20);
+ ASSERT_EQUAL_64(0xffffffffffff37bdL + 1, x21);
+ ASSERT_EQUAL_64(0xfffffff765432110L + 1, x22);
+ ASSERT_EQUAL_64(0x123456789abcdef1L + 1, x23);
+
+ ASSERT_EQUAL_32(0x1df + 1, w24);
+ ASSERT_EQUAL_32(0xffff37bd + 1, w25);
+ ASSERT_EQUAL_32(0x9abcdef1 + 1, w26);
+
+ // Check that adc correctly sets the condition flags.
+ START();
+ __ Mov(x0, 0xff);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, SXTX, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(CFlag);
+
+ START();
+ __ Mov(x0, 0x7fffffffffffffffL);
+ __ Mov(x1, 1);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, UXTB, 2));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ START();
+ __ Mov(x0, 0x7fffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ TEARDOWN();
+}
+
+
+TEST(adc_sbc_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+
+ __ Adc(x7, x0, Operand(0x1234567890abcdefUL));
+ __ Adc(w8, w0, Operand(0xffffffff));
+ __ Sbc(x9, x0, Operand(0x1234567890abcdefUL));
+ __ Sbc(w10, w0, Operand(0xffffffff));
+ __ Ngc(x11, Operand(0xffffffff00000000UL));
+ __ Ngc(w12, Operand(0xffff0000));
+
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+
+ __ Adc(x18, x0, Operand(0x1234567890abcdefUL));
+ __ Adc(w19, w0, Operand(0xffffffff));
+ __ Sbc(x20, x0, Operand(0x1234567890abcdefUL));
+ __ Sbc(w21, w0, Operand(0xffffffff));
+ __ Ngc(x22, Operand(0xffffffff00000000UL));
+ __ Ngc(w23, Operand(0xffff0000));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x7);
+ ASSERT_EQUAL_64(0xffffffff, x8);
+ ASSERT_EQUAL_64(0xedcba9876f543210UL, x9);
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(0xffffffff, x11);
+ ASSERT_EQUAL_64(0xffff, x12);
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL + 1, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0xedcba9876f543211UL, x20);
+ ASSERT_EQUAL_64(1, x21);
+ ASSERT_EQUAL_64(0x100000000UL, x22);
+ ASSERT_EQUAL_64(0x10000, x23);
+
+ TEARDOWN();
+}
+
+
+TEST(flags) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x1111111111111111L);
+ __ Neg(x10, Operand(x0));
+ __ Neg(x11, Operand(x1));
+ __ Neg(w12, Operand(w1));
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Ngc(x13, Operand(x0));
+ // Set the C flag.
+ __ Cmp(x0, Operand(x0));
+ __ Ngc(w14, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(-0x1111111111111111L, x11);
+ ASSERT_EQUAL_32(-0x11111111, w12);
+ ASSERT_EQUAL_64(-1L, x13);
+ ASSERT_EQUAL_32(0, w14);
+
+ START();
+ __ Mov(x0, 0);
+ __ Cmp(x0, Operand(x0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Cmp(w0, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x1111111111111111L);
+ __ Cmp(x0, Operand(x1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 0x11111111);
+ __ Cmp(w0, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+
+ START();
+ __ Mov(x1, 0x1111111111111111L);
+ __ Cmp(x1, Operand(0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(CFlag);
+
+ START();
+ __ Mov(w1, 0x11111111);
+ __ Cmp(w1, Operand(0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(CFlag);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x7fffffffffffffffL);
+ __ Cmn(x1, Operand(x0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ START();
+ __ Mov(w0, 1);
+ __ Mov(w1, 0x7fffffff);
+ __ Cmn(w1, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0xffffffffffffffffL);
+ __ Cmn(x1, Operand(x0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(w0, 1);
+ __ Mov(w1, 0xffffffff);
+ __ Cmn(w1, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 1);
+ // Clear the C flag.
+ __ Adds(w0, w0, Operand(0));
+ __ Ngcs(w0, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 0);
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+ __ Ngcs(w0, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ TEARDOWN();
+}
+
+
+TEST(cmp_shift) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x18, 0xf0000000);
+ __ Mov(x19, 0xf000000010000000UL);
+ __ Mov(x20, 0xf0000000f0000000UL);
+ __ Mov(x21, 0x7800000078000000UL);
+ __ Mov(x22, 0x3c0000003c000000UL);
+ __ Mov(x23, 0x8000000780000000UL);
+ __ Mov(x24, 0x0000000f00000000UL);
+ __ Mov(x25, 0x00000003c0000000UL);
+ __ Mov(x26, 0x8000000780000000UL);
+ __ Mov(x27, 0xc0000003);
+
+ __ Cmp(w20, Operand(w21, LSL, 1));
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(x20, Operand(x22, LSL, 2));
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(w19, Operand(w23, LSR, 3));
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x18, Operand(x24, LSR, 4));
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(w20, Operand(w25, ASR, 2));
+ __ Mrs(x4, NZCV);
+
+ __ Cmp(x20, Operand(x26, ASR, 3));
+ __ Mrs(x5, NZCV);
+
+ __ Cmp(w27, Operand(w22, ROR, 28));
+ __ Mrs(x6, NZCV);
+
+ __ Cmp(x20, Operand(x21, ROR, 31));
+ __ Mrs(x7, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(ZCFlag, w1);
+ ASSERT_EQUAL_32(ZCFlag, w2);
+ ASSERT_EQUAL_32(ZCFlag, w3);
+ ASSERT_EQUAL_32(ZCFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+ ASSERT_EQUAL_32(ZCFlag, w6);
+ ASSERT_EQUAL_32(ZCFlag, w7);
+
+ TEARDOWN();
+}
+
+
+TEST(cmp_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w20, 0x2);
+ __ Mov(w21, 0x1);
+ __ Mov(x22, 0xffffffffffffffffUL);
+ __ Mov(x23, 0xff);
+ __ Mov(x24, 0xfffffffffffffffeUL);
+ __ Mov(x25, 0xffff);
+ __ Mov(x26, 0xffffffff);
+
+ __ Cmp(w20, Operand(w21, LSL, 1));
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(x22, Operand(x23, SXTB, 0));
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(x24, Operand(x23, SXTB, 1));
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x24, Operand(x23, UXTB, 1));
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(w22, Operand(w25, UXTH));
+ __ Mrs(x4, NZCV);
+
+ __ Cmp(x22, Operand(x25, SXTH));
+ __ Mrs(x5, NZCV);
+
+ __ Cmp(x22, Operand(x26, UXTW));
+ __ Mrs(x6, NZCV);
+
+ __ Cmp(x24, Operand(x26, SXTW, 1));
+ __ Mrs(x7, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(ZCFlag, w1);
+ ASSERT_EQUAL_32(ZCFlag, w2);
+ ASSERT_EQUAL_32(NCFlag, w3);
+ ASSERT_EQUAL_32(NCFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+ ASSERT_EQUAL_32(NCFlag, w6);
+ ASSERT_EQUAL_32(ZCFlag, w7);
+
+ TEARDOWN();
+}
+
+
+TEST(ccmp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w16, 0);
+ __ Mov(w17, 1);
+ __ Cmp(w16, w16);
+ __ Ccmp(w16, w17, NCFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(w16, w16);
+ __ Ccmp(w16, w17, NCFlag, ne);
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(x16, x16);
+ __ Ccmn(x16, 2, NZCVFlag, eq);
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x16, x16);
+ __ Ccmn(x16, 2, NZCVFlag, ne);
+ __ Mrs(x3, NZCV);
+
+ __ ccmp(x16, x16, NZCVFlag, al);
+ __ Mrs(x4, NZCV);
+
+ __ ccmp(x16, x16, NZCVFlag, nv);
+ __ Mrs(x5, NZCV);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(NFlag, w0);
+ ASSERT_EQUAL_32(NCFlag, w1);
+ ASSERT_EQUAL_32(NoFlag, w2);
+ ASSERT_EQUAL_32(NZCVFlag, w3);
+ ASSERT_EQUAL_32(ZCFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+
+ TEARDOWN();
+}
+
+
+TEST(ccmp_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w20, 0);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x20, Operand(0xffffffffffffffffUL), NZCVFlag, eq);
+ __ Mrs(x1, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(NFlag, w0);
+ ASSERT_EQUAL_32(NoFlag, w1);
+
+ TEARDOWN();
+}
+
+
+TEST(ccmp_shift_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w20, 0x2);
+ __ Mov(w21, 0x1);
+ __ Mov(x22, 0xffffffffffffffffUL);
+ __ Mov(x23, 0xff);
+ __ Mov(x24, 0xfffffffffffffffeUL);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
+ __ Mrs(x4, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(ZCFlag, w1);
+ ASSERT_EQUAL_32(ZCFlag, w2);
+ ASSERT_EQUAL_32(NCFlag, w3);
+ ASSERT_EQUAL_32(NZCVFlag, w4);
+
+ TEARDOWN();
+}
+
+
+TEST(csel) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x24, 0x0000000f0000000fUL);
+ __ Mov(x25, 0x0000001f0000001fUL);
+ __ Mov(x26, 0);
+ __ Mov(x27, 0);
+
+ __ Cmp(w16, 0);
+ __ Csel(w0, w24, w25, eq);
+ __ Csel(w1, w24, w25, ne);
+ __ Csinc(w2, w24, w25, mi);
+ __ Csinc(w3, w24, w25, pl);
+
+ __ csel(w13, w24, w25, al);
+ __ csel(x14, x24, x25, nv);
+
+ __ Cmp(x16, 1);
+ __ Csinv(x4, x24, x25, gt);
+ __ Csinv(x5, x24, x25, le);
+ __ Csneg(x6, x24, x25, hs);
+ __ Csneg(x7, x24, x25, lo);
+
+ __ Cset(w8, ne);
+ __ Csetm(w9, ne);
+ __ Cinc(x10, x25, ne);
+ __ Cinv(x11, x24, ne);
+ __ Cneg(x12, x24, ne);
+
+ __ csel(w15, w24, w25, al);
+ __ csel(x18, x24, x25, nv);
+
+ __ CzeroX(x24, ne);
+ __ CzeroX(x25, eq);
+
+ __ CmovX(x26, x25, ne);
+ __ CmovX(x27, x25, eq);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0000000f, x0);
+ ASSERT_EQUAL_64(0x0000001f, x1);
+ ASSERT_EQUAL_64(0x00000020, x2);
+ ASSERT_EQUAL_64(0x0000000f, x3);
+ ASSERT_EQUAL_64(0xffffffe0ffffffe0UL, x4);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x5);
+ ASSERT_EQUAL_64(0xffffffe0ffffffe1UL, x6);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x7);
+ ASSERT_EQUAL_64(0x00000001, x8);
+ ASSERT_EQUAL_64(0xffffffff, x9);
+ ASSERT_EQUAL_64(0x0000001f00000020UL, x10);
+ ASSERT_EQUAL_64(0xfffffff0fffffff0UL, x11);
+ ASSERT_EQUAL_64(0xfffffff0fffffff1UL, x12);
+ ASSERT_EQUAL_64(0x0000000f, x13);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x14);
+ ASSERT_EQUAL_64(0x0000000f, x15);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x18);
+ ASSERT_EQUAL_64(0, x24);
+ ASSERT_EQUAL_64(0x0000001f0000001fUL, x25);
+ ASSERT_EQUAL_64(0x0000001f0000001fUL, x26);
+ ASSERT_EQUAL_64(0, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(csel_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x18, 0);
+ __ Mov(x19, 0x80000000);
+ __ Mov(x20, 0x8000000000000000UL);
+
+ __ Cmp(x18, Operand(0));
+ __ Csel(w0, w19, -2, ne);
+ __ Csel(w1, w19, -1, ne);
+ __ Csel(w2, w19, 0, ne);
+ __ Csel(w3, w19, 1, ne);
+ __ Csel(w4, w19, 2, ne);
+ __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
+ __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
+ __ Csel(w7, w19, 3, eq);
+
+ __ Csel(x8, x20, -2, ne);
+ __ Csel(x9, x20, -1, ne);
+ __ Csel(x10, x20, 0, ne);
+ __ Csel(x11, x20, 1, ne);
+ __ Csel(x12, x20, 2, ne);
+ __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
+ __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
+ __ Csel(x15, x20, 3, eq);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(-2, w0);
+ ASSERT_EQUAL_32(-1, w1);
+ ASSERT_EQUAL_32(0, w2);
+ ASSERT_EQUAL_32(1, w3);
+ ASSERT_EQUAL_32(2, w4);
+ ASSERT_EQUAL_32(-1, w5);
+ ASSERT_EQUAL_32(0x40000000, w6);
+ ASSERT_EQUAL_32(0x80000000, w7);
+
+ ASSERT_EQUAL_64(-2, x8);
+ ASSERT_EQUAL_64(-1, x9);
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(1, x11);
+ ASSERT_EQUAL_64(2, x12);
+ ASSERT_EQUAL_64(-1, x13);
+ ASSERT_EQUAL_64(0x4000000000000000UL, x14);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x15);
+
+ TEARDOWN();
+}
+
+
+TEST(lslv) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t value = 0x0123456789abcdefUL;
+ int shift[] = {1, 3, 5, 9, 17, 33};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ lslv(x0, x0, xzr);
+
+ __ Lsl(x16, x0, x1);
+ __ Lsl(x17, x0, x2);
+ __ Lsl(x18, x0, x3);
+ __ Lsl(x19, x0, x4);
+ __ Lsl(x20, x0, x5);
+ __ Lsl(x21, x0, x6);
+
+ __ Lsl(w22, w0, w1);
+ __ Lsl(w23, w0, w2);
+ __ Lsl(w24, w0, w3);
+ __ Lsl(w25, w0, w4);
+ __ Lsl(w26, w0, w5);
+ __ Lsl(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(value << (shift[0] & 63), x16);
+ ASSERT_EQUAL_64(value << (shift[1] & 63), x17);
+ ASSERT_EQUAL_64(value << (shift[2] & 63), x18);
+ ASSERT_EQUAL_64(value << (shift[3] & 63), x19);
+ ASSERT_EQUAL_64(value << (shift[4] & 63), x20);
+ ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
+ ASSERT_EQUAL_32(value << (shift[0] & 31), w22);
+ ASSERT_EQUAL_32(value << (shift[1] & 31), w23);
+ ASSERT_EQUAL_32(value << (shift[2] & 31), w24);
+ ASSERT_EQUAL_32(value << (shift[3] & 31), w25);
+ ASSERT_EQUAL_32(value << (shift[4] & 31), w26);
+ ASSERT_EQUAL_32(value << (shift[5] & 31), w27);
+
+ TEARDOWN();
+}
+
+
+TEST(lsrv) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t value = 0x0123456789abcdefUL;
+ int shift[] = {1, 3, 5, 9, 17, 33};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ lsrv(x0, x0, xzr);
+
+ __ Lsr(x16, x0, x1);
+ __ Lsr(x17, x0, x2);
+ __ Lsr(x18, x0, x3);
+ __ Lsr(x19, x0, x4);
+ __ Lsr(x20, x0, x5);
+ __ Lsr(x21, x0, x6);
+
+ __ Lsr(w22, w0, w1);
+ __ Lsr(w23, w0, w2);
+ __ Lsr(w24, w0, w3);
+ __ Lsr(w25, w0, w4);
+ __ Lsr(w26, w0, w5);
+ __ Lsr(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
+ ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
+ ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
+ ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
+ ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
+ ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
+
+ value &= 0xffffffffUL;
+ ASSERT_EQUAL_32(value >> (shift[0] & 31), w22);
+ ASSERT_EQUAL_32(value >> (shift[1] & 31), w23);
+ ASSERT_EQUAL_32(value >> (shift[2] & 31), w24);
+ ASSERT_EQUAL_32(value >> (shift[3] & 31), w25);
+ ASSERT_EQUAL_32(value >> (shift[4] & 31), w26);
+ ASSERT_EQUAL_32(value >> (shift[5] & 31), w27);
+
+ TEARDOWN();
+}
+
+
+TEST(asrv) {
+ INIT_V8();
+ SETUP();
+
+ int64_t value = 0xfedcba98fedcba98UL;
+ int shift[] = {1, 3, 5, 9, 17, 33};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ asrv(x0, x0, xzr);
+
+ __ Asr(x16, x0, x1);
+ __ Asr(x17, x0, x2);
+ __ Asr(x18, x0, x3);
+ __ Asr(x19, x0, x4);
+ __ Asr(x20, x0, x5);
+ __ Asr(x21, x0, x6);
+
+ __ Asr(w22, w0, w1);
+ __ Asr(w23, w0, w2);
+ __ Asr(w24, w0, w3);
+ __ Asr(w25, w0, w4);
+ __ Asr(w26, w0, w5);
+ __ Asr(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
+ ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
+ ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
+ ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
+ ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
+ ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
+
+ int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL);
+ ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22);
+ ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23);
+ ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24);
+ ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25);
+ ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26);
+ ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27);
+
+ TEARDOWN();
+}
+
+
+TEST(rorv) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t value = 0x0123456789abcdefUL;
+ int shift[] = {4, 8, 12, 16, 24, 36};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ rorv(x0, x0, xzr);
+
+ __ Ror(x16, x0, x1);
+ __ Ror(x17, x0, x2);
+ __ Ror(x18, x0, x3);
+ __ Ror(x19, x0, x4);
+ __ Ror(x20, x0, x5);
+ __ Ror(x21, x0, x6);
+
+ __ Ror(w22, w0, w1);
+ __ Ror(w23, w0, w2);
+ __ Ror(w24, w0, w3);
+ __ Ror(w25, w0, w4);
+ __ Ror(w26, w0, w5);
+ __ Ror(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(0xf0123456789abcdeUL, x16);
+ ASSERT_EQUAL_64(0xef0123456789abcdUL, x17);
+ ASSERT_EQUAL_64(0xdef0123456789abcUL, x18);
+ ASSERT_EQUAL_64(0xcdef0123456789abUL, x19);
+ ASSERT_EQUAL_64(0xabcdef0123456789UL, x20);
+ ASSERT_EQUAL_64(0x789abcdef0123456UL, x21);
+ ASSERT_EQUAL_32(0xf89abcde, w22);
+ ASSERT_EQUAL_32(0xef89abcd, w23);
+ ASSERT_EQUAL_32(0xdef89abc, w24);
+ ASSERT_EQUAL_32(0xcdef89ab, w25);
+ ASSERT_EQUAL_32(0xabcdef89, w26);
+ ASSERT_EQUAL_32(0xf89abcde, w27);
+
+ TEARDOWN();
+}
+
+
+TEST(bfm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+
+ __ Mov(x10, 0x8888888888888888L);
+ __ Mov(x11, 0x8888888888888888L);
+ __ Mov(x12, 0x8888888888888888L);
+ __ Mov(x13, 0x8888888888888888L);
+ __ Mov(w20, 0x88888888);
+ __ Mov(w21, 0x88888888);
+
+ __ bfm(x10, x1, 16, 31);
+ __ bfm(x11, x1, 32, 15);
+
+ __ bfm(w20, w1, 16, 23);
+ __ bfm(w21, w1, 24, 15);
+
+ // Aliases.
+ __ Bfi(x12, x1, 16, 8);
+ __ Bfxil(x13, x1, 16, 8);
+ END();
+
+ RUN();
+
+
+ ASSERT_EQUAL_64(0x88888888888889abL, x10);
+ ASSERT_EQUAL_64(0x8888cdef88888888L, x11);
+
+ ASSERT_EQUAL_32(0x888888ab, w20);
+ ASSERT_EQUAL_32(0x88cdef88, w21);
+
+ ASSERT_EQUAL_64(0x8888888888ef8888L, x12);
+ ASSERT_EQUAL_64(0x88888888888888abL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(sbfm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+
+ __ sbfm(x10, x1, 16, 31);
+ __ sbfm(x11, x1, 32, 15);
+ __ sbfm(x12, x1, 32, 47);
+ __ sbfm(x13, x1, 48, 35);
+
+ __ sbfm(w14, w1, 16, 23);
+ __ sbfm(w15, w1, 24, 15);
+ __ sbfm(w16, w2, 16, 23);
+ __ sbfm(w17, w2, 24, 15);
+
+ // Aliases.
+ __ Asr(x18, x1, 32);
+ __ Asr(x19, x2, 32);
+ __ Sbfiz(x20, x1, 8, 16);
+ __ Sbfiz(x21, x2, 8, 16);
+ __ Sbfx(x22, x1, 8, 16);
+ __ Sbfx(x23, x2, 8, 16);
+ __ Sxtb(x24, w1);
+ __ Sxtb(x25, x2);
+ __ Sxth(x26, w1);
+ __ Sxth(x27, x2);
+ __ Sxtw(x28, w1);
+ __ Sxtw(x29, x2);
+ END();
+
+ RUN();
+
+
+ ASSERT_EQUAL_64(0xffffffffffff89abL, x10);
+ ASSERT_EQUAL_64(0xffffcdef00000000L, x11);
+ ASSERT_EQUAL_64(0x4567L, x12);
+ ASSERT_EQUAL_64(0x789abcdef0000L, x13);
+
+ ASSERT_EQUAL_32(0xffffffab, w14);
+ ASSERT_EQUAL_32(0xffcdef00, w15);
+ ASSERT_EQUAL_32(0x54, w16);
+ ASSERT_EQUAL_32(0x00321000, w17);
+
+ ASSERT_EQUAL_64(0x01234567L, x18);
+ ASSERT_EQUAL_64(0xfffffffffedcba98L, x19);
+ ASSERT_EQUAL_64(0xffffffffffcdef00L, x20);
+ ASSERT_EQUAL_64(0x321000L, x21);
+ ASSERT_EQUAL_64(0xffffffffffffabcdL, x22);
+ ASSERT_EQUAL_64(0x5432L, x23);
+ ASSERT_EQUAL_64(0xffffffffffffffefL, x24);
+ ASSERT_EQUAL_64(0x10, x25);
+ ASSERT_EQUAL_64(0xffffffffffffcdefL, x26);
+ ASSERT_EQUAL_64(0x3210, x27);
+ ASSERT_EQUAL_64(0xffffffff89abcdefL, x28);
+ ASSERT_EQUAL_64(0x76543210, x29);
+
+ TEARDOWN();
+}
+
+
+TEST(ubfm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+
+ __ Mov(x10, 0x8888888888888888L);
+ __ Mov(x11, 0x8888888888888888L);
+
+ __ ubfm(x10, x1, 16, 31);
+ __ ubfm(x11, x1, 32, 15);
+ __ ubfm(x12, x1, 32, 47);
+ __ ubfm(x13, x1, 48, 35);
+
+ __ ubfm(w25, w1, 16, 23);
+ __ ubfm(w26, w1, 24, 15);
+ __ ubfm(w27, w2, 16, 23);
+ __ ubfm(w28, w2, 24, 15);
+
+ // Aliases
+ __ Lsl(x15, x1, 63);
+ __ Lsl(x16, x1, 0);
+ __ Lsr(x17, x1, 32);
+ __ Ubfiz(x18, x1, 8, 16);
+ __ Ubfx(x19, x1, 8, 16);
+ __ Uxtb(x20, x1);
+ __ Uxth(x21, x1);
+ __ Uxtw(x22, x1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000000000089abL, x10);
+ ASSERT_EQUAL_64(0x0000cdef00000000L, x11);
+ ASSERT_EQUAL_64(0x4567L, x12);
+ ASSERT_EQUAL_64(0x789abcdef0000L, x13);
+
+ ASSERT_EQUAL_32(0x000000ab, w25);
+ ASSERT_EQUAL_32(0x00cdef00, w26);
+ ASSERT_EQUAL_32(0x54, w27);
+ ASSERT_EQUAL_32(0x00321000, w28);
+
+ ASSERT_EQUAL_64(0x8000000000000000L, x15);
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x16);
+ ASSERT_EQUAL_64(0x01234567L, x17);
+ ASSERT_EQUAL_64(0xcdef00L, x18);
+ ASSERT_EQUAL_64(0xabcdL, x19);
+ ASSERT_EQUAL_64(0xefL, x20);
+ ASSERT_EQUAL_64(0xcdefL, x21);
+ ASSERT_EQUAL_64(0x89abcdefL, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(extr) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+
+ __ Extr(w10, w1, w2, 0);
+ __ Extr(w11, w1, w2, 1);
+ __ Extr(x12, x2, x1, 2);
+
+ __ Ror(w13, w1, 0);
+ __ Ror(w14, w2, 17);
+ __ Ror(w15, w1, 31);
+ __ Ror(x18, x2, 1);
+ __ Ror(x19, x1, 63);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x76543210, x10);
+ ASSERT_EQUAL_64(0xbb2a1908, x11);
+ ASSERT_EQUAL_64(0x0048d159e26af37bUL, x12);
+ ASSERT_EQUAL_64(0x89abcdef, x13);
+ ASSERT_EQUAL_64(0x19083b2a, x14);
+ ASSERT_EQUAL_64(0x13579bdf, x15);
+ ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908UL, x18);
+ ASSERT_EQUAL_64(0x02468acf13579bdeUL, x19);
+
+ TEARDOWN();
+}
+
+
+TEST(fmov_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s11, 1.0);
+ __ Fmov(d22, -13.0);
+ __ Fmov(s1, 255.0);
+ __ Fmov(d2, 12.34567);
+ __ Fmov(s3, 0.0);
+ __ Fmov(d4, 0.0);
+ __ Fmov(s5, kFP32PositiveInfinity);
+ __ Fmov(d6, kFP64NegativeInfinity);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s11);
+ ASSERT_EQUAL_FP64(-13.0, d22);
+ ASSERT_EQUAL_FP32(255.0, s1);
+ ASSERT_EQUAL_FP64(12.34567, d2);
+ ASSERT_EQUAL_FP32(0.0, s3);
+ ASSERT_EQUAL_FP64(0.0, d4);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d6);
+
+ TEARDOWN();
+}
+
+
+TEST(fmov_reg) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s20, 1.0);
+ __ Fmov(w10, s20);
+ __ Fmov(s30, w10);
+ __ Fmov(s5, s20);
+ __ Fmov(d1, -13.0);
+ __ Fmov(x1, d1);
+ __ Fmov(d2, x1);
+ __ Fmov(d4, d1);
+ __ Fmov(d6, rawbits_to_double(0x0123456789abcdefL));
+ __ Fmov(s6, s6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(float_to_rawbits(1.0), w10);
+ ASSERT_EQUAL_FP32(1.0, s30);
+ ASSERT_EQUAL_FP32(1.0, s5);
+ ASSERT_EQUAL_64(double_to_rawbits(-13.0), x1);
+ ASSERT_EQUAL_FP64(-13.0, d2);
+ ASSERT_EQUAL_FP64(-13.0, d4);
+ ASSERT_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
+
+ TEARDOWN();
+}
+
+
+TEST(fadd) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s13, -0.0);
+ __ Fmov(s14, kFP32PositiveInfinity);
+ __ Fmov(s15, kFP32NegativeInfinity);
+ __ Fmov(s16, 3.25);
+ __ Fmov(s17, 1.0);
+ __ Fmov(s18, 0);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fadd(s0, s16, s17);
+ __ Fadd(s1, s17, s18);
+ __ Fadd(s2, s13, s17);
+ __ Fadd(s3, s14, s17);
+ __ Fadd(s4, s15, s17);
+
+ __ Fadd(d5, d30, d31);
+ __ Fadd(d6, d29, d31);
+ __ Fadd(d7, d26, d31);
+ __ Fadd(d8, d27, d31);
+ __ Fadd(d9, d28, d31);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(4.25, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(1.0, s2);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
+ ASSERT_EQUAL_FP64(0.25, d5);
+ ASSERT_EQUAL_FP64(2.25, d6);
+ ASSERT_EQUAL_FP64(2.25, d7);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d8);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d9);
+
+ TEARDOWN();
+}
+
+
+TEST(fsub) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s13, -0.0);
+ __ Fmov(s14, kFP32PositiveInfinity);
+ __ Fmov(s15, kFP32NegativeInfinity);
+ __ Fmov(s16, 3.25);
+ __ Fmov(s17, 1.0);
+ __ Fmov(s18, 0);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fsub(s0, s16, s17);
+ __ Fsub(s1, s17, s18);
+ __ Fsub(s2, s13, s17);
+ __ Fsub(s3, s17, s14);
+ __ Fsub(s4, s17, s15);
+
+ __ Fsub(d5, d30, d31);
+ __ Fsub(d6, d29, d31);
+ __ Fsub(d7, d26, d31);
+ __ Fsub(d8, d31, d27);
+ __ Fsub(d9, d31, d28);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(2.25, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(-1.0, s2);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
+ ASSERT_EQUAL_FP64(-4.25, d5);
+ ASSERT_EQUAL_FP64(-2.25, d6);
+ ASSERT_EQUAL_FP64(-2.25, d7);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d9);
+
+ TEARDOWN();
+}
+
+
+TEST(fmul) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s13, -0.0);
+ __ Fmov(s14, kFP32PositiveInfinity);
+ __ Fmov(s15, kFP32NegativeInfinity);
+ __ Fmov(s16, 3.25);
+ __ Fmov(s17, 2.0);
+ __ Fmov(s18, 0);
+ __ Fmov(s19, -2.0);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fmul(s0, s16, s17);
+ __ Fmul(s1, s17, s18);
+ __ Fmul(s2, s13, s13);
+ __ Fmul(s3, s14, s19);
+ __ Fmul(s4, s15, s19);
+
+ __ Fmul(d5, d30, d31);
+ __ Fmul(d6, d29, d31);
+ __ Fmul(d7, d26, d26);
+ __ Fmul(d8, d27, d30);
+ __ Fmul(d9, d28, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(6.5, s0);
+ ASSERT_EQUAL_FP32(0.0, s1);
+ ASSERT_EQUAL_FP32(0.0, s2);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
+ ASSERT_EQUAL_FP64(-4.5, d5);
+ ASSERT_EQUAL_FP64(0.0, d6);
+ ASSERT_EQUAL_FP64(0.0, d7);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d9);
+
+ TEARDOWN();
+}
+
+
+static void FmaddFmsubDoubleHelper(double n, double m, double a,
+ double fmadd, double fmsub) {
+ SETUP();
+ START();
+
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+ __ Fmov(d2, a);
+ __ Fmadd(d28, d0, d1, d2);
+ __ Fmsub(d29, d0, d1, d2);
+ __ Fnmadd(d30, d0, d1, d2);
+ __ Fnmsub(d31, d0, d1, d2);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP64(fmadd, d28);
+ ASSERT_EQUAL_FP64(fmsub, d29);
+ ASSERT_EQUAL_FP64(-fmadd, d30);
+ ASSERT_EQUAL_FP64(-fmsub, d31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmadd_fmsub_double) {
+ INIT_V8();
+ double inputs[] = {
+ // Normal numbers, including -0.0.
+ DBL_MAX, DBL_MIN, 3.25, 2.0, 0.0,
+ -DBL_MAX, -DBL_MIN, -3.25, -2.0, -0.0,
+ // Infinities.
+ kFP64NegativeInfinity, kFP64PositiveInfinity,
+ // Subnormal numbers.
+ rawbits_to_double(0x000fffffffffffff),
+ rawbits_to_double(0x0000000000000001),
+ rawbits_to_double(0x000123456789abcd),
+ -rawbits_to_double(0x000fffffffffffff),
+ -rawbits_to_double(0x0000000000000001),
+ -rawbits_to_double(0x000123456789abcd),
+ // NaN.
+ kFP64QuietNaN,
+ -kFP64QuietNaN,
+ };
+ const int count = sizeof(inputs) / sizeof(inputs[0]);
+
+ for (int in = 0; in < count; in++) {
+ double n = inputs[in];
+ for (int im = 0; im < count; im++) {
+ double m = inputs[im];
+ for (int ia = 0; ia < count; ia++) {
+ double a = inputs[ia];
+ double fmadd = fma(n, m, a);
+ double fmsub = fma(-n, m, a);
+
+ FmaddFmsubDoubleHelper(n, m, a, fmadd, fmsub);
+ }
+ }
+ }
+}
+
+
+TEST(fmadd_fmsub_double_rounding) {
+ INIT_V8();
+ // Make sure we run plenty of tests where an intermediate rounding stage would
+ // produce an incorrect result.
+ const int limit = 1000;
+ int count_fmadd = 0;
+ int count_fmsub = 0;
+
+ uint16_t seed[3] = {42, 43, 44};
+ seed48(seed);
+
+ while ((count_fmadd < limit) || (count_fmsub < limit)) {
+ double n, m, a;
+ uint32_t r[2];
+ ASSERT(sizeof(r) == sizeof(n));
+
+ r[0] = mrand48();
+ r[1] = mrand48();
+ memcpy(&n, r, sizeof(r));
+ r[0] = mrand48();
+ r[1] = mrand48();
+ memcpy(&m, r, sizeof(r));
+ r[0] = mrand48();
+ r[1] = mrand48();
+ memcpy(&a, r, sizeof(r));
+
+ if (!std::isfinite(a) || !std::isfinite(n) || !std::isfinite(m)) {
+ continue;
+ }
+
+ // Calculate the expected results.
+ double fmadd = fma(n, m, a);
+ double fmsub = fma(-n, m, a);
+
+ bool test_fmadd = (fmadd != (a + n * m));
+ bool test_fmsub = (fmsub != (a - n * m));
+
+ // If rounding would produce a different result, increment the test count.
+ count_fmadd += test_fmadd;
+ count_fmsub += test_fmsub;
+
+ if (test_fmadd || test_fmsub) {
+ FmaddFmsubDoubleHelper(n, m, a, fmadd, fmsub);
+ }
+ }
+}
+
+
+static void FmaddFmsubFloatHelper(float n, float m, float a,
+ float fmadd, float fmsub) {
+ SETUP();
+ START();
+
+ __ Fmov(s0, n);
+ __ Fmov(s1, m);
+ __ Fmov(s2, a);
+ __ Fmadd(s30, s0, s1, s2);
+ __ Fmsub(s31, s0, s1, s2);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP32(fmadd, s30);
+ ASSERT_EQUAL_FP32(fmsub, s31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmadd_fmsub_float) {
+ INIT_V8();
+ float inputs[] = {
+ // Normal numbers, including -0.0f.
+ FLT_MAX, FLT_MIN, 3.25f, 2.0f, 0.0f,
+ -FLT_MAX, -FLT_MIN, -3.25f, -2.0f, -0.0f,
+ // Infinities.
+ kFP32NegativeInfinity, kFP32PositiveInfinity,
+ // Subnormal numbers.
+ rawbits_to_float(0x07ffffff),
+ rawbits_to_float(0x00000001),
+ rawbits_to_float(0x01234567),
+ -rawbits_to_float(0x07ffffff),
+ -rawbits_to_float(0x00000001),
+ -rawbits_to_float(0x01234567),
+ // NaN.
+ kFP32QuietNaN,
+ -kFP32QuietNaN,
+ };
+ const int count = sizeof(inputs) / sizeof(inputs[0]);
+
+ for (int in = 0; in < count; in++) {
+ float n = inputs[in];
+ for (int im = 0; im < count; im++) {
+ float m = inputs[im];
+ for (int ia = 0; ia < count; ia++) {
+ float a = inputs[ia];
+ float fmadd = fmaf(n, m, a);
+ float fmsub = fmaf(-n, m, a);
+
+ FmaddFmsubFloatHelper(n, m, a, fmadd, fmsub);
+ }
+ }
+ }
+}
+
+
+TEST(fmadd_fmsub_float_rounding) {
+ INIT_V8();
+ // Make sure we run plenty of tests where an intermediate rounding stage would
+ // produce an incorrect result.
+ const int limit = 1000;
+ int count_fmadd = 0;
+ int count_fmsub = 0;
+
+ uint16_t seed[3] = {42, 43, 44};
+ seed48(seed);
+
+ while ((count_fmadd < limit) || (count_fmsub < limit)) {
+ float n, m, a;
+ uint32_t r;
+ ASSERT(sizeof(r) == sizeof(n));
+
+ r = mrand48();
+ memcpy(&n, &r, sizeof(r));
+ r = mrand48();
+ memcpy(&m, &r, sizeof(r));
+ r = mrand48();
+ memcpy(&a, &r, sizeof(r));
+
+ if (!std::isfinite(a) || !std::isfinite(n) || !std::isfinite(m)) {
+ continue;
+ }
+
+ // Calculate the expected results.
+ float fmadd = fmaf(n, m, a);
+ float fmsub = fmaf(-n, m, a);
+
+ bool test_fmadd = (fmadd != (a + n * m));
+ bool test_fmsub = (fmsub != (a - n * m));
+
+ // If rounding would produce a different result, increment the test count.
+ count_fmadd += test_fmadd;
+ count_fmsub += test_fmsub;
+
+ if (test_fmadd || test_fmsub) {
+ FmaddFmsubFloatHelper(n, m, a, fmadd, fmsub);
+ }
+ }
+}
+
+
+TEST(fdiv) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s13, -0.0);
+ __ Fmov(s14, kFP32PositiveInfinity);
+ __ Fmov(s15, kFP32NegativeInfinity);
+ __ Fmov(s16, 3.25);
+ __ Fmov(s17, 2.0);
+ __ Fmov(s18, 2.0);
+ __ Fmov(s19, -2.0);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fdiv(s0, s16, s17);
+ __ Fdiv(s1, s17, s18);
+ __ Fdiv(s2, s13, s17);
+ __ Fdiv(s3, s17, s14);
+ __ Fdiv(s4, s17, s15);
+ __ Fdiv(d5, d31, d30);
+ __ Fdiv(d6, d29, d31);
+ __ Fdiv(d7, d26, d31);
+ __ Fdiv(d8, d31, d27);
+ __ Fdiv(d9, d31, d28);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.625, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(-0.0, s2);
+ ASSERT_EQUAL_FP32(0.0, s3);
+ ASSERT_EQUAL_FP32(-0.0, s4);
+ ASSERT_EQUAL_FP64(-1.125, d5);
+ ASSERT_EQUAL_FP64(0.0, d6);
+ ASSERT_EQUAL_FP64(-0.0, d7);
+ ASSERT_EQUAL_FP64(0.0, d8);
+ ASSERT_EQUAL_FP64(-0.0, d9);
+
+ TEARDOWN();
+}
+
+
+static float MinMaxHelper(float n,
+ float m,
+ bool min,
+ float quiet_nan_substitute = 0.0) {
+ const uint64_t kFP32QuietNaNMask = 0x00400000UL;
+ uint32_t raw_n = float_to_rawbits(n);
+ uint32_t raw_m = float_to_rawbits(m);
+
+ if (std::isnan(n) && ((raw_n & kFP32QuietNaNMask) == 0)) {
+ // n is signalling NaN.
+ return n;
+ } else if (std::isnan(m) && ((raw_m & kFP32QuietNaNMask) == 0)) {
+ // m is signalling NaN.
+ return m;
+ } else if (quiet_nan_substitute == 0.0) {
+ if (std::isnan(n)) {
+ // n is quiet NaN.
+ return n;
+ } else if (std::isnan(m)) {
+ // m is quiet NaN.
+ return m;
+ }
+ } else {
+ // Substitute n or m if one is quiet, but not both.
+ if (std::isnan(n) && !std::isnan(m)) {
+ // n is quiet NaN: replace with substitute.
+ n = quiet_nan_substitute;
+ } else if (!std::isnan(n) && std::isnan(m)) {
+ // m is quiet NaN: replace with substitute.
+ m = quiet_nan_substitute;
+ }
+ }
+
+ if ((n == 0.0) && (m == 0.0) &&
+ (copysign(1.0, n) != copysign(1.0, m))) {
+ return min ? -0.0 : 0.0;
+ }
+
+ return min ? fminf(n, m) : fmaxf(n, m);
+}
+
+
+static double MinMaxHelper(double n,
+ double m,
+ bool min,
+ double quiet_nan_substitute = 0.0) {
+ const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
+ uint64_t raw_n = double_to_rawbits(n);
+ uint64_t raw_m = double_to_rawbits(m);
+
+ if (std::isnan(n) && ((raw_n & kFP64QuietNaNMask) == 0)) {
+ // n is signalling NaN.
+ return n;
+ } else if (std::isnan(m) && ((raw_m & kFP64QuietNaNMask) == 0)) {
+ // m is signalling NaN.
+ return m;
+ } else if (quiet_nan_substitute == 0.0) {
+ if (std::isnan(n)) {
+ // n is quiet NaN.
+ return n;
+ } else if (std::isnan(m)) {
+ // m is quiet NaN.
+ return m;
+ }
+ } else {
+ // Substitute n or m if one is quiet, but not both.
+ if (std::isnan(n) && !std::isnan(m)) {
+ // n is quiet NaN: replace with substitute.
+ n = quiet_nan_substitute;
+ } else if (!std::isnan(n) && std::isnan(m)) {
+ // m is quiet NaN: replace with substitute.
+ m = quiet_nan_substitute;
+ }
+ }
+
+ if ((n == 0.0) && (m == 0.0) &&
+ (copysign(1.0, n) != copysign(1.0, m))) {
+ return min ? -0.0 : 0.0;
+ }
+
+ return min ? fmin(n, m) : fmax(n, m);
+}
+
+
+static void FminFmaxDoubleHelper(double n, double m, double min, double max,
+ double minnm, double maxnm) {
+ SETUP();
+
+ START();
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+ __ Fmin(d28, d0, d1);
+ __ Fmax(d29, d0, d1);
+ __ Fminnm(d30, d0, d1);
+ __ Fmaxnm(d31, d0, d1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(min, d28);
+ ASSERT_EQUAL_FP64(max, d29);
+ ASSERT_EQUAL_FP64(minnm, d30);
+ ASSERT_EQUAL_FP64(maxnm, d31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmax_fmin_d) {
+ INIT_V8();
+ // Bootstrap tests.
+ FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
+ FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
+ FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
+ kFP64NegativeInfinity, kFP64PositiveInfinity,
+ kFP64NegativeInfinity, kFP64PositiveInfinity);
+ FminFmaxDoubleHelper(kFP64SignallingNaN, 0,
+ kFP64SignallingNaN, kFP64SignallingNaN,
+ kFP64SignallingNaN, kFP64SignallingNaN);
+ FminFmaxDoubleHelper(kFP64QuietNaN, 0,
+ kFP64QuietNaN, kFP64QuietNaN,
+ 0, 0);
+ FminFmaxDoubleHelper(kFP64QuietNaN, kFP64SignallingNaN,
+ kFP64SignallingNaN, kFP64SignallingNaN,
+ kFP64SignallingNaN, kFP64SignallingNaN);
+
+ // Iterate over all combinations of inputs.
+ double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
+ -DBL_MAX, -DBL_MIN, -1.0, -0.0,
+ kFP64PositiveInfinity, kFP64NegativeInfinity,
+ kFP64QuietNaN, kFP64SignallingNaN };
+
+ const int count = sizeof(inputs) / sizeof(inputs[0]);
+
+ for (int in = 0; in < count; in++) {
+ double n = inputs[in];
+ for (int im = 0; im < count; im++) {
+ double m = inputs[im];
+ FminFmaxDoubleHelper(n, m,
+ MinMaxHelper(n, m, true),
+ MinMaxHelper(n, m, false),
+ MinMaxHelper(n, m, true, kFP64PositiveInfinity),
+ MinMaxHelper(n, m, false, kFP64NegativeInfinity));
+ }
+ }
+}
+
+
+static void FminFmaxFloatHelper(float n, float m, float min, float max,
+ float minnm, float maxnm) {
+ SETUP();
+
+ START();
+ // TODO(all): Signalling NaNs are sometimes converted by the C compiler to
+ // quiet NaNs on implicit casts from float to double. Here, we move the raw
+ // bits into a W register first, so we get the correct value. Fix Fmov so this
+ // additional step is no longer needed.
+ __ Mov(w0, float_to_rawbits(n));
+ __ Fmov(s0, w0);
+ __ Mov(w0, float_to_rawbits(m));
+ __ Fmov(s1, w0);
+ __ Fmin(s28, s0, s1);
+ __ Fmax(s29, s0, s1);
+ __ Fminnm(s30, s0, s1);
+ __ Fmaxnm(s31, s0, s1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(min, s28);
+ ASSERT_EQUAL_FP32(max, s29);
+ ASSERT_EQUAL_FP32(minnm, s30);
+ ASSERT_EQUAL_FP32(maxnm, s31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmax_fmin_s) {
+ INIT_V8();
+ // Bootstrap tests.
+ FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
+ FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
+ FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
+ kFP32NegativeInfinity, kFP32PositiveInfinity,
+ kFP32NegativeInfinity, kFP32PositiveInfinity);
+ FminFmaxFloatHelper(kFP32SignallingNaN, 0,
+ kFP32SignallingNaN, kFP32SignallingNaN,
+ kFP32SignallingNaN, kFP32SignallingNaN);
+ FminFmaxFloatHelper(kFP32QuietNaN, 0,
+ kFP32QuietNaN, kFP32QuietNaN,
+ 0, 0);
+ FminFmaxFloatHelper(kFP32QuietNaN, kFP32SignallingNaN,
+ kFP32SignallingNaN, kFP32SignallingNaN,
+ kFP32SignallingNaN, kFP32SignallingNaN);
+
+ // Iterate over all combinations of inputs.
+ float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
+ -FLT_MAX, -FLT_MIN, -1.0, -0.0,
+ kFP32PositiveInfinity, kFP32NegativeInfinity,
+ kFP32QuietNaN, kFP32SignallingNaN };
+
+ const int count = sizeof(inputs) / sizeof(inputs[0]);
+
+ for (int in = 0; in < count; in++) {
+ float n = inputs[in];
+ for (int im = 0; im < count; im++) {
+ float m = inputs[im];
+ FminFmaxFloatHelper(n, m,
+ MinMaxHelper(n, m, true),
+ MinMaxHelper(n, m, false),
+ MinMaxHelper(n, m, true, kFP32PositiveInfinity),
+ MinMaxHelper(n, m, false, kFP32NegativeInfinity));
+ }
+ }
+}
+
+
+TEST(fccmp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 0.0);
+ __ Fmov(s17, 0.5);
+ __ Fmov(d18, -0.5);
+ __ Fmov(d19, -1.0);
+ __ Mov(x20, 0);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s16, NoFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s16, VFlag, ne);
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s17, CFlag, ge);
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s17, CVFlag, lt);
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d18, ZFlag, le);
+ __ Mrs(x4, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d18, ZVFlag, gt);
+ __ Mrs(x5, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d19, ZCVFlag, ls);
+ __ Mrs(x6, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d19, NFlag, hi);
+ __ Mrs(x7, NZCV);
+
+ __ fccmp(s16, s16, NFlag, al);
+ __ Mrs(x8, NZCV);
+
+ __ fccmp(d18, d18, NFlag, nv);
+ __ Mrs(x9, NZCV);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(VFlag, w1);
+ ASSERT_EQUAL_32(NFlag, w2);
+ ASSERT_EQUAL_32(CVFlag, w3);
+ ASSERT_EQUAL_32(ZCFlag, w4);
+ ASSERT_EQUAL_32(ZVFlag, w5);
+ ASSERT_EQUAL_32(CFlag, w6);
+ ASSERT_EQUAL_32(NFlag, w7);
+ ASSERT_EQUAL_32(ZCFlag, w8);
+ ASSERT_EQUAL_32(ZCFlag, w9);
+
+ TEARDOWN();
+}
+
+
+TEST(fcmp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ // Some of these tests require a floating-point scratch register assigned to
+ // the macro assembler, but most do not.
+ __ SetFPScratchRegister(NoFPReg);
+
+ __ Fmov(s8, 0.0);
+ __ Fmov(s9, 0.5);
+ __ Mov(w18, 0x7f800001); // Single precision NaN.
+ __ Fmov(s18, w18);
+
+ __ Fcmp(s8, s8);
+ __ Mrs(x0, NZCV);
+ __ Fcmp(s8, s9);
+ __ Mrs(x1, NZCV);
+ __ Fcmp(s9, s8);
+ __ Mrs(x2, NZCV);
+ __ Fcmp(s8, s18);
+ __ Mrs(x3, NZCV);
+ __ Fcmp(s18, s18);
+ __ Mrs(x4, NZCV);
+ __ Fcmp(s8, 0.0);
+ __ Mrs(x5, NZCV);
+ __ SetFPScratchRegister(d0);
+ __ Fcmp(s8, 255.0);
+ __ SetFPScratchRegister(NoFPReg);
+ __ Mrs(x6, NZCV);
+
+ __ Fmov(d19, 0.0);
+ __ Fmov(d20, 0.5);
+ __ Mov(x21, 0x7ff0000000000001UL); // Double precision NaN.
+ __ Fmov(d21, x21);
+
+ __ Fcmp(d19, d19);
+ __ Mrs(x10, NZCV);
+ __ Fcmp(d19, d20);
+ __ Mrs(x11, NZCV);
+ __ Fcmp(d20, d19);
+ __ Mrs(x12, NZCV);
+ __ Fcmp(d19, d21);
+ __ Mrs(x13, NZCV);
+ __ Fcmp(d21, d21);
+ __ Mrs(x14, NZCV);
+ __ Fcmp(d19, 0.0);
+ __ Mrs(x15, NZCV);
+ __ SetFPScratchRegister(d0);
+ __ Fcmp(d19, 12.3456);
+ __ SetFPScratchRegister(NoFPReg);
+ __ Mrs(x16, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(NFlag, w1);
+ ASSERT_EQUAL_32(CFlag, w2);
+ ASSERT_EQUAL_32(CVFlag, w3);
+ ASSERT_EQUAL_32(CVFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+ ASSERT_EQUAL_32(NFlag, w6);
+ ASSERT_EQUAL_32(ZCFlag, w10);
+ ASSERT_EQUAL_32(NFlag, w11);
+ ASSERT_EQUAL_32(CFlag, w12);
+ ASSERT_EQUAL_32(CVFlag, w13);
+ ASSERT_EQUAL_32(CVFlag, w14);
+ ASSERT_EQUAL_32(ZCFlag, w15);
+ ASSERT_EQUAL_32(NFlag, w16);
+
+ TEARDOWN();
+}
+
+
+TEST(fcsel) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 2.0);
+ __ Fmov(d18, 3.0);
+ __ Fmov(d19, 4.0);
+
+ __ Cmp(x16, 0);
+ __ Fcsel(s0, s16, s17, eq);
+ __ Fcsel(s1, s16, s17, ne);
+ __ Fcsel(d2, d18, d19, eq);
+ __ Fcsel(d3, d18, d19, ne);
+ __ fcsel(s4, s16, s17, al);
+ __ fcsel(d5, d18, d19, nv);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(2.0, s1);
+ ASSERT_EQUAL_FP64(3.0, d2);
+ ASSERT_EQUAL_FP64(4.0, d3);
+ ASSERT_EQUAL_FP32(1.0, s4);
+ ASSERT_EQUAL_FP64(3.0, d5);
+
+ TEARDOWN();
+}
+
+
+TEST(fneg) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 0.0);
+ __ Fmov(s18, kFP32PositiveInfinity);
+ __ Fmov(d19, 1.0);
+ __ Fmov(d20, 0.0);
+ __ Fmov(d21, kFP64PositiveInfinity);
+
+ __ Fneg(s0, s16);
+ __ Fneg(s1, s0);
+ __ Fneg(s2, s17);
+ __ Fneg(s3, s2);
+ __ Fneg(s4, s18);
+ __ Fneg(s5, s4);
+ __ Fneg(d6, d19);
+ __ Fneg(d7, d6);
+ __ Fneg(d8, d20);
+ __ Fneg(d9, d8);
+ __ Fneg(d10, d21);
+ __ Fneg(d11, d10);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(-1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(-0.0, s2);
+ ASSERT_EQUAL_FP32(0.0, s3);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
+ ASSERT_EQUAL_FP64(-1.0, d6);
+ ASSERT_EQUAL_FP64(1.0, d7);
+ ASSERT_EQUAL_FP64(-0.0, d8);
+ ASSERT_EQUAL_FP64(0.0, d9);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
+
+ TEARDOWN();
+}
+
+
+TEST(fabs) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, -1.0);
+ __ Fmov(s17, -0.0);
+ __ Fmov(s18, kFP32NegativeInfinity);
+ __ Fmov(d19, -1.0);
+ __ Fmov(d20, -0.0);
+ __ Fmov(d21, kFP64NegativeInfinity);
+
+ __ Fabs(s0, s16);
+ __ Fabs(s1, s0);
+ __ Fabs(s2, s17);
+ __ Fabs(s3, s18);
+ __ Fabs(d4, d19);
+ __ Fabs(d5, d4);
+ __ Fabs(d6, d20);
+ __ Fabs(d7, d21);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(0.0, s2);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
+ ASSERT_EQUAL_FP64(1.0, d4);
+ ASSERT_EQUAL_FP64(1.0, d5);
+ ASSERT_EQUAL_FP64(0.0, d6);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
+
+ TEARDOWN();
+}
+
+
+TEST(fsqrt) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 0.0);
+ __ Fmov(s17, 1.0);
+ __ Fmov(s18, 0.25);
+ __ Fmov(s19, 65536.0);
+ __ Fmov(s20, -0.0);
+ __ Fmov(s21, kFP32PositiveInfinity);
+ __ Fmov(d22, 0.0);
+ __ Fmov(d23, 1.0);
+ __ Fmov(d24, 0.25);
+ __ Fmov(d25, 4294967296.0);
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+
+ __ Fsqrt(s0, s16);
+ __ Fsqrt(s1, s17);
+ __ Fsqrt(s2, s18);
+ __ Fsqrt(s3, s19);
+ __ Fsqrt(s4, s20);
+ __ Fsqrt(s5, s21);
+ __ Fsqrt(d6, d22);
+ __ Fsqrt(d7, d23);
+ __ Fsqrt(d8, d24);
+ __ Fsqrt(d9, d25);
+ __ Fsqrt(d10, d26);
+ __ Fsqrt(d11, d27);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(0.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(0.5, s2);
+ ASSERT_EQUAL_FP32(256.0, s3);
+ ASSERT_EQUAL_FP32(-0.0, s4);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
+ ASSERT_EQUAL_FP64(0.0, d6);
+ ASSERT_EQUAL_FP64(1.0, d7);
+ ASSERT_EQUAL_FP64(0.5, d8);
+ ASSERT_EQUAL_FP64(65536.0, d9);
+ ASSERT_EQUAL_FP64(-0.0, d10);
+ ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d11);
+
+ TEARDOWN();
+}
+
+
+TEST(frinta) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+
+ __ Frinta(s0, s16);
+ __ Frinta(s1, s17);
+ __ Frinta(s2, s18);
+ __ Frinta(s3, s19);
+ __ Frinta(s4, s20);
+ __ Frinta(s5, s21);
+ __ Frinta(s6, s22);
+ __ Frinta(s7, s23);
+ __ Frinta(s8, s24);
+ __ Frinta(s9, s25);
+ __ Frinta(s10, s26);
+
+ __ Fmov(d16, 1.0);
+ __ Fmov(d17, 1.1);
+ __ Fmov(d18, 1.5);
+ __ Fmov(d19, 1.9);
+ __ Fmov(d20, 2.5);
+ __ Fmov(d21, -1.5);
+ __ Fmov(d22, -2.5);
+ __ Fmov(d23, kFP32PositiveInfinity);
+ __ Fmov(d24, kFP32NegativeInfinity);
+ __ Fmov(d25, 0.0);
+ __ Fmov(d26, -0.0);
+
+ __ Frinta(d11, d16);
+ __ Frinta(d12, d17);
+ __ Frinta(d13, d18);
+ __ Frinta(d14, d19);
+ __ Frinta(d15, d20);
+ __ Frinta(d16, d21);
+ __ Frinta(d17, d22);
+ __ Frinta(d18, d23);
+ __ Frinta(d19, d24);
+ __ Frinta(d20, d25);
+ __ Frinta(d21, d26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(2.0, s2);
+ ASSERT_EQUAL_FP32(2.0, s3);
+ ASSERT_EQUAL_FP32(3.0, s4);
+ ASSERT_EQUAL_FP32(-2.0, s5);
+ ASSERT_EQUAL_FP32(-3.0, s6);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+ ASSERT_EQUAL_FP32(0.0, s9);
+ ASSERT_EQUAL_FP32(-0.0, s10);
+ ASSERT_EQUAL_FP64(1.0, d11);
+ ASSERT_EQUAL_FP64(1.0, d12);
+ ASSERT_EQUAL_FP64(2.0, d13);
+ ASSERT_EQUAL_FP64(2.0, d14);
+ ASSERT_EQUAL_FP64(3.0, d15);
+ ASSERT_EQUAL_FP64(-2.0, d16);
+ ASSERT_EQUAL_FP64(-3.0, d17);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
+ ASSERT_EQUAL_FP64(0.0, d20);
+ ASSERT_EQUAL_FP64(-0.0, d21);
+
+ TEARDOWN();
+}
+
+
+TEST(frintn) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+
+ __ Frintn(s0, s16);
+ __ Frintn(s1, s17);
+ __ Frintn(s2, s18);
+ __ Frintn(s3, s19);
+ __ Frintn(s4, s20);
+ __ Frintn(s5, s21);
+ __ Frintn(s6, s22);
+ __ Frintn(s7, s23);
+ __ Frintn(s8, s24);
+ __ Frintn(s9, s25);
+ __ Frintn(s10, s26);
+
+ __ Fmov(d16, 1.0);
+ __ Fmov(d17, 1.1);
+ __ Fmov(d18, 1.5);
+ __ Fmov(d19, 1.9);
+ __ Fmov(d20, 2.5);
+ __ Fmov(d21, -1.5);
+ __ Fmov(d22, -2.5);
+ __ Fmov(d23, kFP32PositiveInfinity);
+ __ Fmov(d24, kFP32NegativeInfinity);
+ __ Fmov(d25, 0.0);
+ __ Fmov(d26, -0.0);
+
+ __ Frintn(d11, d16);
+ __ Frintn(d12, d17);
+ __ Frintn(d13, d18);
+ __ Frintn(d14, d19);
+ __ Frintn(d15, d20);
+ __ Frintn(d16, d21);
+ __ Frintn(d17, d22);
+ __ Frintn(d18, d23);
+ __ Frintn(d19, d24);
+ __ Frintn(d20, d25);
+ __ Frintn(d21, d26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(2.0, s2);
+ ASSERT_EQUAL_FP32(2.0, s3);
+ ASSERT_EQUAL_FP32(2.0, s4);
+ ASSERT_EQUAL_FP32(-2.0, s5);
+ ASSERT_EQUAL_FP32(-2.0, s6);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+ ASSERT_EQUAL_FP32(0.0, s9);
+ ASSERT_EQUAL_FP32(-0.0, s10);
+ ASSERT_EQUAL_FP64(1.0, d11);
+ ASSERT_EQUAL_FP64(1.0, d12);
+ ASSERT_EQUAL_FP64(2.0, d13);
+ ASSERT_EQUAL_FP64(2.0, d14);
+ ASSERT_EQUAL_FP64(2.0, d15);
+ ASSERT_EQUAL_FP64(-2.0, d16);
+ ASSERT_EQUAL_FP64(-2.0, d17);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
+ ASSERT_EQUAL_FP64(0.0, d20);
+ ASSERT_EQUAL_FP64(-0.0, d21);
+
+ TEARDOWN();
+}
+
+
+TEST(frintz) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+
+ __ Frintz(s0, s16);
+ __ Frintz(s1, s17);
+ __ Frintz(s2, s18);
+ __ Frintz(s3, s19);
+ __ Frintz(s4, s20);
+ __ Frintz(s5, s21);
+ __ Frintz(s6, s22);
+ __ Frintz(s7, s23);
+ __ Frintz(s8, s24);
+ __ Frintz(s9, s25);
+ __ Frintz(s10, s26);
+
+ __ Fmov(d16, 1.0);
+ __ Fmov(d17, 1.1);
+ __ Fmov(d18, 1.5);
+ __ Fmov(d19, 1.9);
+ __ Fmov(d20, 2.5);
+ __ Fmov(d21, -1.5);
+ __ Fmov(d22, -2.5);
+ __ Fmov(d23, kFP32PositiveInfinity);
+ __ Fmov(d24, kFP32NegativeInfinity);
+ __ Fmov(d25, 0.0);
+ __ Fmov(d26, -0.0);
+
+ __ Frintz(d11, d16);
+ __ Frintz(d12, d17);
+ __ Frintz(d13, d18);
+ __ Frintz(d14, d19);
+ __ Frintz(d15, d20);
+ __ Frintz(d16, d21);
+ __ Frintz(d17, d22);
+ __ Frintz(d18, d23);
+ __ Frintz(d19, d24);
+ __ Frintz(d20, d25);
+ __ Frintz(d21, d26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(1.0, s2);
+ ASSERT_EQUAL_FP32(1.0, s3);
+ ASSERT_EQUAL_FP32(2.0, s4);
+ ASSERT_EQUAL_FP32(-1.0, s5);
+ ASSERT_EQUAL_FP32(-2.0, s6);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+ ASSERT_EQUAL_FP32(0.0, s9);
+ ASSERT_EQUAL_FP32(-0.0, s10);
+ ASSERT_EQUAL_FP64(1.0, d11);
+ ASSERT_EQUAL_FP64(1.0, d12);
+ ASSERT_EQUAL_FP64(1.0, d13);
+ ASSERT_EQUAL_FP64(1.0, d14);
+ ASSERT_EQUAL_FP64(2.0, d15);
+ ASSERT_EQUAL_FP64(-1.0, d16);
+ ASSERT_EQUAL_FP64(-2.0, d17);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
+ ASSERT_EQUAL_FP64(0.0, d20);
+ ASSERT_EQUAL_FP64(-0.0, d21);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvt_ds) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+ __ Fmov(s27, FLT_MAX);
+ __ Fmov(s28, FLT_MIN);
+ __ Fmov(s29, rawbits_to_float(0x7fc12345)); // Quiet NaN.
+ __ Fmov(s30, rawbits_to_float(0x7f812345)); // Signalling NaN.
+
+ __ Fcvt(d0, s16);
+ __ Fcvt(d1, s17);
+ __ Fcvt(d2, s18);
+ __ Fcvt(d3, s19);
+ __ Fcvt(d4, s20);
+ __ Fcvt(d5, s21);
+ __ Fcvt(d6, s22);
+ __ Fcvt(d7, s23);
+ __ Fcvt(d8, s24);
+ __ Fcvt(d9, s25);
+ __ Fcvt(d10, s26);
+ __ Fcvt(d11, s27);
+ __ Fcvt(d12, s28);
+ __ Fcvt(d13, s29);
+ __ Fcvt(d14, s30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(1.0f, d0);
+ ASSERT_EQUAL_FP64(1.1f, d1);
+ ASSERT_EQUAL_FP64(1.5f, d2);
+ ASSERT_EQUAL_FP64(1.9f, d3);
+ ASSERT_EQUAL_FP64(2.5f, d4);
+ ASSERT_EQUAL_FP64(-1.5f, d5);
+ ASSERT_EQUAL_FP64(-2.5f, d6);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
+ ASSERT_EQUAL_FP64(0.0f, d9);
+ ASSERT_EQUAL_FP64(-0.0f, d10);
+ ASSERT_EQUAL_FP64(FLT_MAX, d11);
+ ASSERT_EQUAL_FP64(FLT_MIN, d12);
+
+ // Check that the NaN payload is preserved according to A64 conversion rules:
+ // - The sign bit is preserved.
+ // - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
+ // - The remaining mantissa bits are copied until they run out.
+ // - The low-order bits that haven't already been assigned are set to 0.
+ ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
+ ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvt_sd) {
+ INIT_V8();
+ // There are a huge number of corner-cases to check, so this test iterates
+ // through a list. The list is then negated and checked again (since the sign
+ // is irrelevant in ties-to-even rounding), so the list shouldn't include any
+ // negative values.
+ //
+ // Note that this test only checks ties-to-even rounding, because that is all
+ // that the simulator supports.
+ struct {double in; float expected;} test[] = {
+ // Check some simple conversions.
+ {0.0, 0.0f},
+ {1.0, 1.0f},
+ {1.5, 1.5f},
+ {2.0, 2.0f},
+ {FLT_MAX, FLT_MAX},
+ // - The smallest normalized float.
+ {pow(2.0, -126), powf(2, -126)},
+ // - Normal floats that need (ties-to-even) rounding.
+ // For normalized numbers:
+ // bit 29 (0x0000000020000000) is the lowest-order bit which will
+ // fit in the float's mantissa.
+ {rawbits_to_double(0x3ff0000000000000), rawbits_to_float(0x3f800000)},
+ {rawbits_to_double(0x3ff0000000000001), rawbits_to_float(0x3f800000)},
+ {rawbits_to_double(0x3ff0000010000000), rawbits_to_float(0x3f800000)},
+ {rawbits_to_double(0x3ff0000010000001), rawbits_to_float(0x3f800001)},
+ {rawbits_to_double(0x3ff0000020000000), rawbits_to_float(0x3f800001)},
+ {rawbits_to_double(0x3ff0000020000001), rawbits_to_float(0x3f800001)},
+ {rawbits_to_double(0x3ff0000030000000), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000030000001), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000040000000), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000040000001), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000050000000), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000050000001), rawbits_to_float(0x3f800003)},
+ {rawbits_to_double(0x3ff0000060000000), rawbits_to_float(0x3f800003)},
+ // - A mantissa that overflows into the exponent during rounding.
+ {rawbits_to_double(0x3feffffff0000000), rawbits_to_float(0x3f800000)},
+ // - The largest double that rounds to a normal float.
+ {rawbits_to_double(0x47efffffefffffff), rawbits_to_float(0x7f7fffff)},
+
+ // Doubles that are too big for a float.
+ {kFP64PositiveInfinity, kFP32PositiveInfinity},
+ {DBL_MAX, kFP32PositiveInfinity},
+ // - The smallest exponent that's too big for a float.
+ {pow(2.0, 128), kFP32PositiveInfinity},
+ // - This exponent is in range, but the value rounds to infinity.
+ {rawbits_to_double(0x47effffff0000000), kFP32PositiveInfinity},
+
+ // Doubles that are too small for a float.
+ // - The smallest (subnormal) double.
+ {DBL_MIN, 0.0},
+ // - The largest double which is too small for a subnormal float.
+ {rawbits_to_double(0x3690000000000000), rawbits_to_float(0x00000000)},
+
+ // Normal doubles that become subnormal floats.
+ // - The largest subnormal float.
+ {rawbits_to_double(0x380fffffc0000000), rawbits_to_float(0x007fffff)},
+ // - The smallest subnormal float.
+ {rawbits_to_double(0x36a0000000000000), rawbits_to_float(0x00000001)},
+ // - Subnormal floats that need (ties-to-even) rounding.
+ // For these subnormals:
+ // bit 34 (0x0000000400000000) is the lowest-order bit which will
+ // fit in the float's mantissa.
+ {rawbits_to_double(0x37c159e000000000), rawbits_to_float(0x00045678)},
+ {rawbits_to_double(0x37c159e000000001), rawbits_to_float(0x00045678)},
+ {rawbits_to_double(0x37c159e200000000), rawbits_to_float(0x00045678)},
+ {rawbits_to_double(0x37c159e200000001), rawbits_to_float(0x00045679)},
+ {rawbits_to_double(0x37c159e400000000), rawbits_to_float(0x00045679)},
+ {rawbits_to_double(0x37c159e400000001), rawbits_to_float(0x00045679)},
+ {rawbits_to_double(0x37c159e600000000), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159e600000001), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159e800000000), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159e800000001), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159ea00000000), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159ea00000001), rawbits_to_float(0x0004567b)},
+ {rawbits_to_double(0x37c159ec00000000), rawbits_to_float(0x0004567b)},
+ // - The smallest double which rounds up to become a subnormal float.
+ {rawbits_to_double(0x3690000000000001), rawbits_to_float(0x00000001)},
+
+ // Check NaN payload preservation.
+ {rawbits_to_double(0x7ff82468a0000000), rawbits_to_float(0x7fc12345)},
+ {rawbits_to_double(0x7ff82468bfffffff), rawbits_to_float(0x7fc12345)},
+ // - Signalling NaNs become quiet NaNs.
+ {rawbits_to_double(0x7ff02468a0000000), rawbits_to_float(0x7fc12345)},
+ {rawbits_to_double(0x7ff02468bfffffff), rawbits_to_float(0x7fc12345)},
+ {rawbits_to_double(0x7ff000001fffffff), rawbits_to_float(0x7fc00000)},
+ };
+ int count = sizeof(test) / sizeof(test[0]);
+
+ for (int i = 0; i < count; i++) {
+ double in = test[i].in;
+ float expected = test[i].expected;
+
+ // We only expect positive input.
+ ASSERT(std::signbit(in) == 0);
+ ASSERT(std::signbit(expected) == 0);
+
+ SETUP();
+ START();
+
+ __ Fmov(d10, in);
+ __ Fcvt(s20, d10);
+
+ __ Fmov(d11, -in);
+ __ Fcvt(s21, d11);
+
+ END();
+ RUN();
+ ASSERT_EQUAL_FP32(expected, s20);
+ ASSERT_EQUAL_FP32(-expected, s21);
+ TEARDOWN();
+ }
+}
+
+
+TEST(fcvtas) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 2.5);
+ __ Fmov(s3, -2.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 2.5);
+ __ Fmov(d11, -2.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 2.5);
+ __ Fmov(s19, -2.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 2.5);
+ __ Fmov(d26, -2.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtas(w0, s0);
+ __ Fcvtas(w1, s1);
+ __ Fcvtas(w2, s2);
+ __ Fcvtas(w3, s3);
+ __ Fcvtas(w4, s4);
+ __ Fcvtas(w5, s5);
+ __ Fcvtas(w6, s6);
+ __ Fcvtas(w7, s7);
+ __ Fcvtas(w8, d8);
+ __ Fcvtas(w9, d9);
+ __ Fcvtas(w10, d10);
+ __ Fcvtas(w11, d11);
+ __ Fcvtas(w12, d12);
+ __ Fcvtas(w13, d13);
+ __ Fcvtas(w14, d14);
+ __ Fcvtas(w15, d15);
+ __ Fcvtas(x17, s17);
+ __ Fcvtas(x18, s18);
+ __ Fcvtas(x19, s19);
+ __ Fcvtas(x20, s20);
+ __ Fcvtas(x21, s21);
+ __ Fcvtas(x22, s22);
+ __ Fcvtas(x23, s23);
+ __ Fcvtas(x24, d24);
+ __ Fcvtas(x25, d25);
+ __ Fcvtas(x26, d26);
+ __ Fcvtas(x27, d27);
+ __ Fcvtas(x28, d28);
+ __ Fcvtas(x29, d29);
+ __ Fcvtas(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(3, x2);
+ ASSERT_EQUAL_64(0xfffffffd, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(3, x10);
+ ASSERT_EQUAL_64(0xfffffffd, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(3, x18);
+ ASSERT_EQUAL_64(0xfffffffffffffffdUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(3, x25);
+ ASSERT_EQUAL_64(0xfffffffffffffffdUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtau) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 2.5);
+ __ Fmov(s3, -2.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 2.5);
+ __ Fmov(d11, -2.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, 0xfffffffe);
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 2.5);
+ __ Fmov(s19, -2.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 2.5);
+ __ Fmov(d26, -2.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
+ __ Fmov(s30, 0x100000000UL);
+
+ __ Fcvtau(w0, s0);
+ __ Fcvtau(w1, s1);
+ __ Fcvtau(w2, s2);
+ __ Fcvtau(w3, s3);
+ __ Fcvtau(w4, s4);
+ __ Fcvtau(w5, s5);
+ __ Fcvtau(w6, s6);
+ __ Fcvtau(w8, d8);
+ __ Fcvtau(w9, d9);
+ __ Fcvtau(w10, d10);
+ __ Fcvtau(w11, d11);
+ __ Fcvtau(w12, d12);
+ __ Fcvtau(w13, d13);
+ __ Fcvtau(w14, d14);
+ __ Fcvtau(w15, d15);
+ __ Fcvtau(x16, s16);
+ __ Fcvtau(x17, s17);
+ __ Fcvtau(x18, s18);
+ __ Fcvtau(x19, s19);
+ __ Fcvtau(x20, s20);
+ __ Fcvtau(x21, s21);
+ __ Fcvtau(x22, s22);
+ __ Fcvtau(x24, d24);
+ __ Fcvtau(x25, d25);
+ __ Fcvtau(x26, d26);
+ __ Fcvtau(x27, d27);
+ __ Fcvtau(x28, d28);
+ __ Fcvtau(x29, d29);
+ __ Fcvtau(w30, s30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(3, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0xffffff00, x6);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(3, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0xfffffffe, x14);
+ ASSERT_EQUAL_64(1, x16);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(3, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0, x21);
+ ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(3, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0, x28);
+ ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
+ ASSERT_EQUAL_64(0xffffffff, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtms) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtms(w0, s0);
+ __ Fcvtms(w1, s1);
+ __ Fcvtms(w2, s2);
+ __ Fcvtms(w3, s3);
+ __ Fcvtms(w4, s4);
+ __ Fcvtms(w5, s5);
+ __ Fcvtms(w6, s6);
+ __ Fcvtms(w7, s7);
+ __ Fcvtms(w8, d8);
+ __ Fcvtms(w9, d9);
+ __ Fcvtms(w10, d10);
+ __ Fcvtms(w11, d11);
+ __ Fcvtms(w12, d12);
+ __ Fcvtms(w13, d13);
+ __ Fcvtms(w14, d14);
+ __ Fcvtms(w15, d15);
+ __ Fcvtms(x17, s17);
+ __ Fcvtms(x18, s18);
+ __ Fcvtms(x19, s19);
+ __ Fcvtms(x20, s20);
+ __ Fcvtms(x21, s21);
+ __ Fcvtms(x22, s22);
+ __ Fcvtms(x23, s23);
+ __ Fcvtms(x24, d24);
+ __ Fcvtms(x25, d25);
+ __ Fcvtms(x26, d26);
+ __ Fcvtms(x27, d27);
+ __ Fcvtms(x28, d28);
+ __ Fcvtms(x29, d29);
+ __ Fcvtms(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0xfffffffe, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0xfffffffe, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtmu) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtmu(w0, s0);
+ __ Fcvtmu(w1, s1);
+ __ Fcvtmu(w2, s2);
+ __ Fcvtmu(w3, s3);
+ __ Fcvtmu(w4, s4);
+ __ Fcvtmu(w5, s5);
+ __ Fcvtmu(w6, s6);
+ __ Fcvtmu(w7, s7);
+ __ Fcvtmu(w8, d8);
+ __ Fcvtmu(w9, d9);
+ __ Fcvtmu(w10, d10);
+ __ Fcvtmu(w11, d11);
+ __ Fcvtmu(w12, d12);
+ __ Fcvtmu(w13, d13);
+ __ Fcvtmu(w14, d14);
+ __ Fcvtmu(x17, s17);
+ __ Fcvtmu(x18, s18);
+ __ Fcvtmu(x19, s19);
+ __ Fcvtmu(x20, s20);
+ __ Fcvtmu(x21, s21);
+ __ Fcvtmu(x22, s22);
+ __ Fcvtmu(x23, s23);
+ __ Fcvtmu(x24, d24);
+ __ Fcvtmu(x25, d25);
+ __ Fcvtmu(x26, d26);
+ __ Fcvtmu(x27, d27);
+ __ Fcvtmu(x28, d28);
+ __ Fcvtmu(x29, d29);
+ __ Fcvtmu(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0x0UL, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x0UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x0UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0x0UL, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x0UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x0UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtns) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtns(w0, s0);
+ __ Fcvtns(w1, s1);
+ __ Fcvtns(w2, s2);
+ __ Fcvtns(w3, s3);
+ __ Fcvtns(w4, s4);
+ __ Fcvtns(w5, s5);
+ __ Fcvtns(w6, s6);
+ __ Fcvtns(w7, s7);
+ __ Fcvtns(w8, d8);
+ __ Fcvtns(w9, d9);
+ __ Fcvtns(w10, d10);
+ __ Fcvtns(w11, d11);
+ __ Fcvtns(w12, d12);
+ __ Fcvtns(w13, d13);
+ __ Fcvtns(w14, d14);
+ __ Fcvtns(w15, d15);
+ __ Fcvtns(x17, s17);
+ __ Fcvtns(x18, s18);
+ __ Fcvtns(x19, s19);
+ __ Fcvtns(x20, s20);
+ __ Fcvtns(x21, s21);
+ __ Fcvtns(x22, s22);
+ __ Fcvtns(x23, s23);
+ __ Fcvtns(x24, d24);
+ __ Fcvtns(x25, d25);
+ __ Fcvtns(x26, d26);
+ __ Fcvtns(x27, d27);
+// __ Fcvtns(x28, d28);
+ __ Fcvtns(x29, d29);
+ __ Fcvtns(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(2, x2);
+ ASSERT_EQUAL_64(0xfffffffe, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(2, x10);
+ ASSERT_EQUAL_64(0xfffffffe, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(2, x18);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(2, x25);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+// ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtnu) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, 0xfffffffe);
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
+ __ Fmov(s30, 0x100000000UL);
+
+ __ Fcvtnu(w0, s0);
+ __ Fcvtnu(w1, s1);
+ __ Fcvtnu(w2, s2);
+ __ Fcvtnu(w3, s3);
+ __ Fcvtnu(w4, s4);
+ __ Fcvtnu(w5, s5);
+ __ Fcvtnu(w6, s6);
+ __ Fcvtnu(w8, d8);
+ __ Fcvtnu(w9, d9);
+ __ Fcvtnu(w10, d10);
+ __ Fcvtnu(w11, d11);
+ __ Fcvtnu(w12, d12);
+ __ Fcvtnu(w13, d13);
+ __ Fcvtnu(w14, d14);
+ __ Fcvtnu(w15, d15);
+ __ Fcvtnu(x16, s16);
+ __ Fcvtnu(x17, s17);
+ __ Fcvtnu(x18, s18);
+ __ Fcvtnu(x19, s19);
+ __ Fcvtnu(x20, s20);
+ __ Fcvtnu(x21, s21);
+ __ Fcvtnu(x22, s22);
+ __ Fcvtnu(x24, d24);
+ __ Fcvtnu(x25, d25);
+ __ Fcvtnu(x26, d26);
+ __ Fcvtnu(x27, d27);
+// __ Fcvtnu(x28, d28);
+ __ Fcvtnu(x29, d29);
+ __ Fcvtnu(w30, s30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(2, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0xffffff00, x6);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(2, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0xfffffffe, x14);
+ ASSERT_EQUAL_64(1, x16);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(2, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0, x21);
+ ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(2, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+// ASSERT_EQUAL_64(0, x28);
+ ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
+ ASSERT_EQUAL_64(0xffffffff, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtzs) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtzs(w0, s0);
+ __ Fcvtzs(w1, s1);
+ __ Fcvtzs(w2, s2);
+ __ Fcvtzs(w3, s3);
+ __ Fcvtzs(w4, s4);
+ __ Fcvtzs(w5, s5);
+ __ Fcvtzs(w6, s6);
+ __ Fcvtzs(w7, s7);
+ __ Fcvtzs(w8, d8);
+ __ Fcvtzs(w9, d9);
+ __ Fcvtzs(w10, d10);
+ __ Fcvtzs(w11, d11);
+ __ Fcvtzs(w12, d12);
+ __ Fcvtzs(w13, d13);
+ __ Fcvtzs(w14, d14);
+ __ Fcvtzs(w15, d15);
+ __ Fcvtzs(x17, s17);
+ __ Fcvtzs(x18, s18);
+ __ Fcvtzs(x19, s19);
+ __ Fcvtzs(x20, s20);
+ __ Fcvtzs(x21, s21);
+ __ Fcvtzs(x22, s22);
+ __ Fcvtzs(x23, s23);
+ __ Fcvtzs(x24, d24);
+ __ Fcvtzs(x25, d25);
+ __ Fcvtzs(x26, d26);
+ __ Fcvtzs(x27, d27);
+ __ Fcvtzs(x28, d28);
+ __ Fcvtzs(x29, d29);
+ __ Fcvtzs(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0xffffffff, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtzu) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtzu(w0, s0);
+ __ Fcvtzu(w1, s1);
+ __ Fcvtzu(w2, s2);
+ __ Fcvtzu(w3, s3);
+ __ Fcvtzu(w4, s4);
+ __ Fcvtzu(w5, s5);
+ __ Fcvtzu(w6, s6);
+ __ Fcvtzu(w7, s7);
+ __ Fcvtzu(w8, d8);
+ __ Fcvtzu(w9, d9);
+ __ Fcvtzu(w10, d10);
+ __ Fcvtzu(w11, d11);
+ __ Fcvtzu(w12, d12);
+ __ Fcvtzu(w13, d13);
+ __ Fcvtzu(w14, d14);
+ __ Fcvtzu(x17, s17);
+ __ Fcvtzu(x18, s18);
+ __ Fcvtzu(x19, s19);
+ __ Fcvtzu(x20, s20);
+ __ Fcvtzu(x21, s21);
+ __ Fcvtzu(x22, s22);
+ __ Fcvtzu(x23, s23);
+ __ Fcvtzu(x24, d24);
+ __ Fcvtzu(x25, d25);
+ __ Fcvtzu(x26, d26);
+ __ Fcvtzu(x27, d27);
+ __ Fcvtzu(x28, d28);
+ __ Fcvtzu(x29, d29);
+ __ Fcvtzu(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0x0UL, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x0UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x0UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0x0UL, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x0UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x0UL, x30);
+
+ TEARDOWN();
+}
+
+
+// Test that scvtf and ucvtf can convert the 64-bit input into the expected
+// value. All possible values of 'fbits' are tested. The expected value is
+// modified accordingly in each case.
+//
+// The expected value is specified as the bit encoding of the expected double
+// produced by scvtf (expected_scvtf_bits) as well as ucvtf
+// (expected_ucvtf_bits).
+//
+// Where the input value is representable by int32_t or uint32_t, conversions
+// from W registers will also be tested.
+static void TestUScvtfHelper(uint64_t in,
+ uint64_t expected_scvtf_bits,
+ uint64_t expected_ucvtf_bits) {
+ uint64_t u64 = in;
+ uint32_t u32 = u64 & 0xffffffff;
+ int64_t s64 = static_cast<int64_t>(in);
+ int32_t s32 = s64 & 0x7fffffff;
+
+ bool cvtf_s32 = (s64 == s32);
+ bool cvtf_u32 = (u64 == u32);
+
+ double results_scvtf_x[65];
+ double results_ucvtf_x[65];
+ double results_scvtf_w[33];
+ double results_ucvtf_w[33];
+
+ SETUP();
+ START();
+
+ __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
+ __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
+ __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
+ __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
+
+ __ Mov(x10, s64);
+
+ // Corrupt the top word, in case it is accidentally used during W-register
+ // conversions.
+ __ Mov(x11, 0x5555555555555555);
+ __ Bfi(x11, x10, 0, kWRegSize);
+
+ // Test integer conversions.
+ __ Scvtf(d0, x10);
+ __ Ucvtf(d1, x10);
+ __ Scvtf(d2, w11);
+ __ Ucvtf(d3, w11);
+ __ Str(d0, MemOperand(x0));
+ __ Str(d1, MemOperand(x1));
+ __ Str(d2, MemOperand(x2));
+ __ Str(d3, MemOperand(x3));
+
+ // Test all possible values of fbits.
+ for (int fbits = 1; fbits <= 32; fbits++) {
+ __ Scvtf(d0, x10, fbits);
+ __ Ucvtf(d1, x10, fbits);
+ __ Scvtf(d2, w11, fbits);
+ __ Ucvtf(d3, w11, fbits);
+ __ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes));
+ __ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes));
+ __ Str(d2, MemOperand(x2, fbits * kDRegSizeInBytes));
+ __ Str(d3, MemOperand(x3, fbits * kDRegSizeInBytes));
+ }
+
+ // Conversions from W registers can only handle fbits values <= 32, so just
+ // test conversions from X registers for 32 < fbits <= 64.
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ __ Scvtf(d0, x10, fbits);
+ __ Ucvtf(d1, x10, fbits);
+ __ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes));
+ __ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes));
+ }
+
+ END();
+ RUN();
+
+ // Check the results.
+ double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits);
+ double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits);
+
+ for (int fbits = 0; fbits <= 32; fbits++) {
+ double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
+ double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
+ ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
+ if (cvtf_s32) ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
+ if (cvtf_u32) ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
+ }
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
+ double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
+ ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(scvtf_ucvtf_double) {
+ INIT_V8();
+ // Simple conversions of positive numbers which require no rounding; the
+ // results should not depened on the rounding mode, and ucvtf and scvtf should
+ // produce the same result.
+ TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000);
+ TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000);
+ TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000);
+ TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000);
+ TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000);
+ // Test mantissa extremities.
+ TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001);
+ // The largest int32_t that fits in a double.
+ TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000);
+ // Values that would be negative if treated as an int32_t.
+ TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000);
+ TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000);
+ TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000);
+ // The largest int64_t that fits in a double.
+ TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff);
+ // Check for bit pattern reproduction.
+ TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde);
+ TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000);
+
+ // Simple conversions of negative int64_t values. These require no rounding,
+ // and the results should not depend on the rounding mode.
+ TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000);
+ TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000);
+ TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000);
+
+ // Conversions which require rounding.
+ TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000);
+ TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000);
+ TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000);
+ TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001);
+ TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001);
+ TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001);
+ TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003);
+ TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003);
+ // Check rounding of negative int64_t values (and large uint64_t values).
+ TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002);
+ // Round up to produce a result that's too big for the input to represent.
+ TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000);
+ TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000);
+}
+
+
+// The same as TestUScvtfHelper, but convert to floats.
+static void TestUScvtf32Helper(uint64_t in,
+ uint32_t expected_scvtf_bits,
+ uint32_t expected_ucvtf_bits) {
+ uint64_t u64 = in;
+ uint32_t u32 = u64 & 0xffffffff;
+ int64_t s64 = static_cast<int64_t>(in);
+ int32_t s32 = s64 & 0x7fffffff;
+
+ bool cvtf_s32 = (s64 == s32);
+ bool cvtf_u32 = (u64 == u32);
+
+ float results_scvtf_x[65];
+ float results_ucvtf_x[65];
+ float results_scvtf_w[33];
+ float results_ucvtf_w[33];
+
+ SETUP();
+ START();
+
+ __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
+ __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
+ __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
+ __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
+
+ __ Mov(x10, s64);
+
+ // Corrupt the top word, in case it is accidentally used during W-register
+ // conversions.
+ __ Mov(x11, 0x5555555555555555);
+ __ Bfi(x11, x10, 0, kWRegSize);
+
+ // Test integer conversions.
+ __ Scvtf(s0, x10);
+ __ Ucvtf(s1, x10);
+ __ Scvtf(s2, w11);
+ __ Ucvtf(s3, w11);
+ __ Str(s0, MemOperand(x0));
+ __ Str(s1, MemOperand(x1));
+ __ Str(s2, MemOperand(x2));
+ __ Str(s3, MemOperand(x3));
+
+ // Test all possible values of fbits.
+ for (int fbits = 1; fbits <= 32; fbits++) {
+ __ Scvtf(s0, x10, fbits);
+ __ Ucvtf(s1, x10, fbits);
+ __ Scvtf(s2, w11, fbits);
+ __ Ucvtf(s3, w11, fbits);
+ __ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes));
+ __ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes));
+ __ Str(s2, MemOperand(x2, fbits * kSRegSizeInBytes));
+ __ Str(s3, MemOperand(x3, fbits * kSRegSizeInBytes));
+ }
+
+ // Conversions from W registers can only handle fbits values <= 32, so just
+ // test conversions from X registers for 32 < fbits <= 64.
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ __ Scvtf(s0, x10, fbits);
+ __ Ucvtf(s1, x10, fbits);
+ __ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes));
+ __ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes));
+ }
+
+ END();
+ RUN();
+
+ // Check the results.
+ float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits);
+ float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits);
+
+ for (int fbits = 0; fbits <= 32; fbits++) {
+ float expected_scvtf = expected_scvtf_base / powf(2, fbits);
+ float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
+ ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
+ if (cvtf_s32) ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
+ if (cvtf_u32) ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
+ break;
+ }
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ break;
+ float expected_scvtf = expected_scvtf_base / powf(2, fbits);
+ float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
+ ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(scvtf_ucvtf_float) {
+ INIT_V8();
+ // Simple conversions of positive numbers which require no rounding; the
+ // results should not depened on the rounding mode, and ucvtf and scvtf should
+ // produce the same result.
+ TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000);
+ TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000);
+ TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000);
+ TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000);
+ TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000);
+ // Test mantissa extremities.
+ TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001);
+ TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001);
+ // The largest int32_t that fits in a float.
+ TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff);
+ // Values that would be negative if treated as an int32_t.
+ TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff);
+ TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000);
+ TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001);
+ // The largest int64_t that fits in a float.
+ TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff);
+ // Check for bit pattern reproduction.
+ TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543);
+
+ // Simple conversions of negative int64_t values. These require no rounding,
+ // and the results should not depend on the rounding mode.
+ TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc);
+ TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000);
+
+ // Conversions which require rounding.
+ TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000);
+ TestUScvtf32Helper(0x0000800000000001, 0x57000000, 0x57000000);
+ TestUScvtf32Helper(0x0000800000800000, 0x57000000, 0x57000000);
+ TestUScvtf32Helper(0x0000800000800001, 0x57000001, 0x57000001);
+ TestUScvtf32Helper(0x0000800001000000, 0x57000001, 0x57000001);
+ TestUScvtf32Helper(0x0000800001000001, 0x57000001, 0x57000001);
+ TestUScvtf32Helper(0x0000800001800000, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800001800001, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002000000, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002000001, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002800000, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003);
+ TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003);
+ // Check rounding of negative int64_t values (and large uint64_t values).
+ TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000);
+ TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000);
+ TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000);
+ TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000);
+ TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000);
+ TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001);
+ TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001);
+ TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002);
+ // Round up to produce a result that's too big for the input to represent.
+ TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000);
+ TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000);
+ TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000);
+ TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000);
+ TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000);
+ TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000);
+ TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000);
+ TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000);
+}
+
+
+TEST(system_mrs) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 1);
+ __ Mov(w2, 0x80000000);
+
+ // Set the Z and C flags.
+ __ Cmp(w0, w0);
+ __ Mrs(x3, NZCV);
+
+ // Set the N flag.
+ __ Cmp(w0, w1);
+ __ Mrs(x4, NZCV);
+
+ // Set the Z, C and V flags.
+ __ Adds(w0, w2, w2);
+ __ Mrs(x5, NZCV);
+
+ // Read the default FPCR.
+ __ Mrs(x6, FPCR);
+ END();
+
+ RUN();
+
+ // NZCV
+ ASSERT_EQUAL_32(ZCFlag, w3);
+ ASSERT_EQUAL_32(NFlag, w4);
+ ASSERT_EQUAL_32(ZCVFlag, w5);
+
+ // FPCR
+ // The default FPCR on Linux-based platforms is 0.
+ ASSERT_EQUAL_32(0, w6);
+
+ TEARDOWN();
+}
+
+
+TEST(system_msr) {
+ INIT_V8();
+ // All FPCR fields that must be implemented: AHP, DN, FZ, RMode
+ const uint64_t fpcr_core = 0x07c00000;
+
+ // All FPCR fields (including fields which may be read-as-zero):
+ // Stride, Len
+ // IDE, IXE, UFE, OFE, DZE, IOE
+ const uint64_t fpcr_all = fpcr_core | 0x00379f00;
+
+ SETUP();
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 0x7fffffff);
+
+ __ Mov(x7, 0);
+
+ __ Mov(x10, NVFlag);
+ __ Cmp(w0, w0); // Set Z and C.
+ __ Msr(NZCV, x10); // Set N and V.
+ // The Msr should have overwritten every flag set by the Cmp.
+ __ Cinc(x7, x7, mi); // N
+ __ Cinc(x7, x7, ne); // !Z
+ __ Cinc(x7, x7, lo); // !C
+ __ Cinc(x7, x7, vs); // V
+
+ __ Mov(x10, ZCFlag);
+ __ Cmn(w1, w1); // Set N and V.
+ __ Msr(NZCV, x10); // Set Z and C.
+ // The Msr should have overwritten every flag set by the Cmn.
+ __ Cinc(x7, x7, pl); // !N
+ __ Cinc(x7, x7, eq); // Z
+ __ Cinc(x7, x7, hs); // C
+ __ Cinc(x7, x7, vc); // !V
+
+ // All core FPCR fields must be writable.
+ __ Mov(x8, fpcr_core);
+ __ Msr(FPCR, x8);
+ __ Mrs(x8, FPCR);
+
+ // All FPCR fields, including optional ones. This part of the test doesn't
+ // achieve much other than ensuring that supported fields can be cleared by
+ // the next test.
+ __ Mov(x9, fpcr_all);
+ __ Msr(FPCR, x9);
+ __ Mrs(x9, FPCR);
+ __ And(x9, x9, fpcr_core);
+
+ // The undefined bits must ignore writes.
+ // It's conceivable that a future version of the architecture could use these
+ // fields (making this test fail), but in the meantime this is a useful test
+ // for the simulator.
+ __ Mov(x10, ~fpcr_all);
+ __ Msr(FPCR, x10);
+ __ Mrs(x10, FPCR);
+
+ END();
+
+ RUN();
+
+ // We should have incremented x7 (from 0) exactly 8 times.
+ ASSERT_EQUAL_64(8, x7);
+
+ ASSERT_EQUAL_64(fpcr_core, x8);
+ ASSERT_EQUAL_64(fpcr_core, x9);
+ ASSERT_EQUAL_64(0, x10);
+
+ TEARDOWN();
+}
+
+
+TEST(system_nop) {
+ INIT_V8();
+ SETUP();
+ RegisterDump before;
+
+ START();
+ before.Dump(&masm);
+ __ Nop();
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_REGISTERS(before);
+ ASSERT_EQUAL_NZCV(before.flags_nzcv());
+
+ TEARDOWN();
+}
+
+
+TEST(zero_dest) {
+ INIT_V8();
+ SETUP();
+ RegisterDump before;
+
+ START();
+ // Preserve the system stack pointer, in case we clobber it.
+ __ Mov(x30, csp);
+ // Initialize the other registers used in this test.
+ uint64_t literal_base = 0x0100001000100101UL;
+ __ Mov(x0, 0);
+ __ Mov(x1, literal_base);
+ for (unsigned i = 2; i < x30.code(); i++) {
+ __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
+ }
+ before.Dump(&masm);
+
+ // All of these instructions should be NOPs in these forms, but have
+ // alternate forms which can write into the stack pointer.
+ __ add(xzr, x0, x1);
+ __ add(xzr, x1, xzr);
+ __ add(xzr, xzr, x1);
+
+ __ and_(xzr, x0, x2);
+ __ and_(xzr, x2, xzr);
+ __ and_(xzr, xzr, x2);
+
+ __ bic(xzr, x0, x3);
+ __ bic(xzr, x3, xzr);
+ __ bic(xzr, xzr, x3);
+
+ __ eon(xzr, x0, x4);
+ __ eon(xzr, x4, xzr);
+ __ eon(xzr, xzr, x4);
+
+ __ eor(xzr, x0, x5);
+ __ eor(xzr, x5, xzr);
+ __ eor(xzr, xzr, x5);
+
+ __ orr(xzr, x0, x6);
+ __ orr(xzr, x6, xzr);
+ __ orr(xzr, xzr, x6);
+
+ __ sub(xzr, x0, x7);
+ __ sub(xzr, x7, xzr);
+ __ sub(xzr, xzr, x7);
+
+ // Swap the saved system stack pointer with the real one. If csp was written
+ // during the test, it will show up in x30. This is done because the test
+ // framework assumes that csp will be valid at the end of the test.
+ __ Mov(x29, x30);
+ __ Mov(x30, csp);
+ __ Mov(csp, x29);
+ // We used x29 as a scratch register, so reset it to make sure it doesn't
+ // trigger a test failure.
+ __ Add(x29, x28, x1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_REGISTERS(before);
+ ASSERT_EQUAL_NZCV(before.flags_nzcv());
+
+ TEARDOWN();
+}
+
+
+TEST(zero_dest_setflags) {
+ INIT_V8();
+ SETUP();
+ RegisterDump before;
+
+ START();
+ // Preserve the system stack pointer, in case we clobber it.
+ __ Mov(x30, csp);
+ // Initialize the other registers used in this test.
+ uint64_t literal_base = 0x0100001000100101UL;
+ __ Mov(x0, 0);
+ __ Mov(x1, literal_base);
+ for (int i = 2; i < 30; i++) {
+ __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
+ }
+ before.Dump(&masm);
+
+ // All of these instructions should only write to the flags in these forms,
+ // but have alternate forms which can write into the stack pointer.
+ __ adds(xzr, x0, Operand(x1, UXTX));
+ __ adds(xzr, x1, Operand(xzr, UXTX));
+ __ adds(xzr, x1, 1234);
+ __ adds(xzr, x0, x1);
+ __ adds(xzr, x1, xzr);
+ __ adds(xzr, xzr, x1);
+
+ __ ands(xzr, x2, ~0xf);
+ __ ands(xzr, xzr, ~0xf);
+ __ ands(xzr, x0, x2);
+ __ ands(xzr, x2, xzr);
+ __ ands(xzr, xzr, x2);
+
+ __ bics(xzr, x3, ~0xf);
+ __ bics(xzr, xzr, ~0xf);
+ __ bics(xzr, x0, x3);
+ __ bics(xzr, x3, xzr);
+ __ bics(xzr, xzr, x3);
+
+ __ subs(xzr, x0, Operand(x3, UXTX));
+ __ subs(xzr, x3, Operand(xzr, UXTX));
+ __ subs(xzr, x3, 1234);
+ __ subs(xzr, x0, x3);
+ __ subs(xzr, x3, xzr);
+ __ subs(xzr, xzr, x3);
+
+ // Swap the saved system stack pointer with the real one. If csp was written
+ // during the test, it will show up in x30. This is done because the test
+ // framework assumes that csp will be valid at the end of the test.
+ __ Mov(x29, x30);
+ __ Mov(x30, csp);
+ __ Mov(csp, x29);
+ // We used x29 as a scratch register, so reset it to make sure it doesn't
+ // trigger a test failure.
+ __ Add(x29, x28, x1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_REGISTERS(before);
+
+ TEARDOWN();
+}
+
+
+TEST(register_bit) {
+ // No code generation takes place in this test, so no need to setup and
+ // teardown.
+
+ // Simple tests.
+ CHECK(x0.Bit() == (1UL << 0));
+ CHECK(x1.Bit() == (1UL << 1));
+ CHECK(x10.Bit() == (1UL << 10));
+
+ // AAPCS64 definitions.
+ CHECK(fp.Bit() == (1UL << kFramePointerRegCode));
+ CHECK(lr.Bit() == (1UL << kLinkRegCode));
+
+ // Fixed (hardware) definitions.
+ CHECK(xzr.Bit() == (1UL << kZeroRegCode));
+
+ // Internal ABI definitions.
+ CHECK(jssp.Bit() == (1UL << kJSSPCode));
+ CHECK(csp.Bit() == (1UL << kSPRegInternalCode));
+ CHECK(csp.Bit() != xzr.Bit());
+
+ // xn.Bit() == wn.Bit() at all times, for the same n.
+ CHECK(x0.Bit() == w0.Bit());
+ CHECK(x1.Bit() == w1.Bit());
+ CHECK(x10.Bit() == w10.Bit());
+ CHECK(jssp.Bit() == wjssp.Bit());
+ CHECK(xzr.Bit() == wzr.Bit());
+ CHECK(csp.Bit() == wcsp.Bit());
+}
+
+
+TEST(stack_pointer_override) {
+ // This test generates some stack maintenance code, but the test only checks
+ // the reported state.
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The default stack pointer in V8 is jssp, but for compatibility with W16,
+ // the test framework sets it to csp before calling the test.
+ CHECK(csp.Is(__ StackPointer()));
+ __ SetStackPointer(x0);
+ CHECK(x0.Is(__ StackPointer()));
+ __ SetStackPointer(jssp);
+ CHECK(jssp.Is(__ StackPointer()));
+ __ SetStackPointer(csp);
+ CHECK(csp.Is(__ StackPointer()));
+
+ END();
+ RUN();
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_simple) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ static const RegList x0_to_x3 = x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit();
+ static const RegList x10_to_x13 = x10.Bit() | x11.Bit() |
+ x12.Bit() | x13.Bit();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+ __ Add(x2, x1, x0);
+ __ Add(x3, x2, x0);
+
+ __ Claim(4);
+
+ // Simple exchange.
+ // After this test:
+ // x0-x3 should be unchanged.
+ // w10-w13 should contain the lower words of x0-x3.
+ __ Poke(x0, 0);
+ __ Poke(x1, 8);
+ __ Poke(x2, 16);
+ __ Poke(x3, 24);
+ Clobber(&masm, x0_to_x3);
+ __ Peek(x0, 0);
+ __ Peek(x1, 8);
+ __ Peek(x2, 16);
+ __ Peek(x3, 24);
+
+ __ Poke(w0, 0);
+ __ Poke(w1, 4);
+ __ Poke(w2, 8);
+ __ Poke(w3, 12);
+ Clobber(&masm, x10_to_x13);
+ __ Peek(w10, 0);
+ __ Peek(w11, 4);
+ __ Peek(w12, 8);
+ __ Peek(w13, 12);
+
+ __ Drop(4);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_64(literal_base * 1, x0);
+ ASSERT_EQUAL_64(literal_base * 2, x1);
+ ASSERT_EQUAL_64(literal_base * 3, x2);
+ ASSERT_EQUAL_64(literal_base * 4, x3);
+
+ ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
+ ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
+ ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
+ ASSERT_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_unaligned) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+ __ Add(x2, x1, x0);
+ __ Add(x3, x2, x0);
+ __ Add(x4, x3, x0);
+ __ Add(x5, x4, x0);
+ __ Add(x6, x5, x0);
+
+ __ Claim(4);
+
+ // Unaligned exchanges.
+ // After this test:
+ // x0-x6 should be unchanged.
+ // w10-w12 should contain the lower words of x0-x2.
+ __ Poke(x0, 1);
+ Clobber(&masm, x0.Bit());
+ __ Peek(x0, 1);
+ __ Poke(x1, 2);
+ Clobber(&masm, x1.Bit());
+ __ Peek(x1, 2);
+ __ Poke(x2, 3);
+ Clobber(&masm, x2.Bit());
+ __ Peek(x2, 3);
+ __ Poke(x3, 4);
+ Clobber(&masm, x3.Bit());
+ __ Peek(x3, 4);
+ __ Poke(x4, 5);
+ Clobber(&masm, x4.Bit());
+ __ Peek(x4, 5);
+ __ Poke(x5, 6);
+ Clobber(&masm, x5.Bit());
+ __ Peek(x5, 6);
+ __ Poke(x6, 7);
+ Clobber(&masm, x6.Bit());
+ __ Peek(x6, 7);
+
+ __ Poke(w0, 1);
+ Clobber(&masm, w10.Bit());
+ __ Peek(w10, 1);
+ __ Poke(w1, 2);
+ Clobber(&masm, w11.Bit());
+ __ Peek(w11, 2);
+ __ Poke(w2, 3);
+ Clobber(&masm, w12.Bit());
+ __ Peek(w12, 3);
+
+ __ Drop(4);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_64(literal_base * 1, x0);
+ ASSERT_EQUAL_64(literal_base * 2, x1);
+ ASSERT_EQUAL_64(literal_base * 3, x2);
+ ASSERT_EQUAL_64(literal_base * 4, x3);
+ ASSERT_EQUAL_64(literal_base * 5, x4);
+ ASSERT_EQUAL_64(literal_base * 6, x5);
+ ASSERT_EQUAL_64(literal_base * 7, x6);
+
+ ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
+ ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
+ ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
+
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_endianness) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+
+ __ Claim(4);
+
+ // Endianness tests.
+ // After this section:
+ // x4 should match x0[31:0]:x0[63:32]
+ // w5 should match w1[15:0]:w1[31:16]
+ __ Poke(x0, 0);
+ __ Poke(x0, 8);
+ __ Peek(x4, 4);
+
+ __ Poke(w1, 0);
+ __ Poke(w1, 4);
+ __ Peek(w5, 2);
+
+ __ Drop(4);
+
+ END();
+ RUN();
+
+ uint64_t x0_expected = literal_base * 1;
+ uint64_t x1_expected = literal_base * 2;
+ uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
+ uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
+ ((x1_expected >> 16) & 0x0000ffff);
+
+ ASSERT_EQUAL_64(x0_expected, x0);
+ ASSERT_EQUAL_64(x1_expected, x1);
+ ASSERT_EQUAL_64(x4_expected, x4);
+ ASSERT_EQUAL_64(x5_expected, x5);
+
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_mixed) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+ __ Add(x2, x1, x0);
+ __ Add(x3, x2, x0);
+
+ __ Claim(4);
+
+ // Mix with other stack operations.
+ // After this section:
+ // x0-x3 should be unchanged.
+ // x6 should match x1[31:0]:x0[63:32]
+ // w7 should match x1[15:0]:x0[63:48]
+ __ Poke(x1, 8);
+ __ Poke(x0, 0);
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(x4, __ StackPointer());
+ __ SetStackPointer(x4);
+
+ __ Poke(wzr, 0); // Clobber the space we're about to drop.
+ __ Drop(1, kWRegSizeInBytes);
+ __ Peek(x6, 0);
+ __ Claim(1);
+ __ Peek(w7, 10);
+ __ Poke(x3, 28);
+ __ Poke(xzr, 0); // Clobber the space we're about to drop.
+ __ Drop(1);
+ __ Poke(x2, 12);
+ __ Push(w0);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ __ Pop(x0, x1, x2, x3);
+
+ END();
+ RUN();
+
+ uint64_t x0_expected = literal_base * 1;
+ uint64_t x1_expected = literal_base * 2;
+ uint64_t x2_expected = literal_base * 3;
+ uint64_t x3_expected = literal_base * 4;
+ uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
+ uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
+ ((x0_expected >> 48) & 0x0000ffff);
+
+ ASSERT_EQUAL_64(x0_expected, x0);
+ ASSERT_EQUAL_64(x1_expected, x1);
+ ASSERT_EQUAL_64(x2_expected, x2);
+ ASSERT_EQUAL_64(x3_expected, x3);
+ ASSERT_EQUAL_64(x6_expected, x6);
+ ASSERT_EQUAL_64(x7_expected, x7);
+
+ TEARDOWN();
+}
+
+
+// This enum is used only as an argument to the push-pop test helpers.
+enum PushPopMethod {
+ // Push or Pop using the Push and Pop methods, with blocks of up to four
+ // registers. (Smaller blocks will be used if necessary.)
+ PushPopByFour,
+
+ // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers.
+ PushPopRegList
+};
+
+
+// The maximum number of registers that can be used by the PushPopJssp* tests,
+// where a reg_count field is provided.
+static int const kPushPopJsspMaxRegCount = -1;
+
+// Test a simple push-pop pattern:
+// * Claim <claim> bytes to set the stack alignment.
+// * Push <reg_count> registers with size <reg_size>.
+// * Clobber the register contents.
+// * Pop <reg_count> registers to restore the original contents.
+// * Drop <claim> bytes to restore the original stack pointer.
+//
+// Different push and pop methods can be specified independently to test for
+// proper word-endian behaviour.
+static void PushPopJsspSimpleHelper(int reg_count,
+ int claim,
+ int reg_size,
+ PushPopMethod push_method,
+ PushPopMethod pop_method) {
+ SETUP();
+
+ START();
+
+ // Registers x8 and x9 are used by the macro assembler for debug code (for
+ // example in 'Pop'), so we can't use them here. We can't use jssp because it
+ // will be the stack pointer for this test.
+ static RegList const allowed = ~(x8.Bit() | x9.Bit() | jssp.Bit());
+ if (reg_count == kPushPopJsspMaxRegCount) {
+ reg_count = CountSetBits(allowed, kNumberOfRegisters);
+ }
+ // Work out which registers to use, based on reg_size.
+ Register r[kNumberOfRegisters];
+ Register x[kNumberOfRegisters];
+ RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
+ allowed);
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ int i;
+
+ // Initialize the registers.
+ for (i = 0; i < reg_count; i++) {
+ // Always write into the X register, to ensure that the upper word is
+ // properly ignored by Push when testing W registers.
+ if (!x[i].IsZero()) {
+ __ Mov(x[i], literal_base * i);
+ }
+ }
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ switch (push_method) {
+ case PushPopByFour:
+ // Push high-numbered registers first (to the highest addresses).
+ for (i = reg_count; i >= 4; i -= 4) {
+ __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
+ }
+ // Finish off the leftovers.
+ switch (i) {
+ case 3: __ Push(r[2], r[1], r[0]); break;
+ case 2: __ Push(r[1], r[0]); break;
+ case 1: __ Push(r[0]); break;
+ default: ASSERT(i == 0); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PushSizeRegList(list, reg_size);
+ break;
+ }
+
+ // Clobber all the registers, to ensure that they get repopulated by Pop.
+ Clobber(&masm, list);
+
+ switch (pop_method) {
+ case PushPopByFour:
+ // Pop low-numbered registers first (from the lowest addresses).
+ for (i = 0; i <= (reg_count-4); i += 4) {
+ __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
+ }
+ // Finish off the leftovers.
+ switch (reg_count - i) {
+ case 3: __ Pop(r[i], r[i+1], r[i+2]); break;
+ case 2: __ Pop(r[i], r[i+1]); break;
+ case 1: __ Pop(r[i]); break;
+ default: ASSERT(i == reg_count); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PopSizeRegList(list, reg_size);
+ break;
+ }
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ // Check that the register contents were preserved.
+ // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
+ // that the upper word was properly cleared by Pop.
+ literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+ for (int i = 0; i < reg_count; i++) {
+ if (x[i].IsZero()) {
+ ASSERT_EQUAL_64(0, x[i]);
+ } else {
+ ASSERT_EQUAL_64(literal_base * i, x[i]);
+ }
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_jssp_simple_32) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopJsspSimpleHelper(count, claim, kWRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kWRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(count, claim, kWRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kWRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+TEST(push_pop_jssp_simple_64) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopJsspSimpleHelper(count, claim, kXRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kXRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(count, claim, kXRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kXRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+// The maximum number of registers that can be used by the PushPopFPJssp* tests,
+// where a reg_count field is provided.
+static int const kPushPopFPJsspMaxRegCount = -1;
+
+// Test a simple push-pop pattern:
+// * Claim <claim> bytes to set the stack alignment.
+// * Push <reg_count> FP registers with size <reg_size>.
+// * Clobber the register contents.
+// * Pop <reg_count> FP registers to restore the original contents.
+// * Drop <claim> bytes to restore the original stack pointer.
+//
+// Different push and pop methods can be specified independently to test for
+// proper word-endian behaviour.
+static void PushPopFPJsspSimpleHelper(int reg_count,
+ int claim,
+ int reg_size,
+ PushPopMethod push_method,
+ PushPopMethod pop_method) {
+ SETUP();
+
+ START();
+
+ // We can use any floating-point register. None of them are reserved for
+ // debug code, for example.
+ static RegList const allowed = ~0;
+ if (reg_count == kPushPopFPJsspMaxRegCount) {
+ reg_count = CountSetBits(allowed, kNumberOfFPRegisters);
+ }
+ // Work out which registers to use, based on reg_size.
+ FPRegister v[kNumberOfRegisters];
+ FPRegister d[kNumberOfRegisters];
+ RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count,
+ allowed);
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied (using an integer) by small values (such as a register
+ // index), this value is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ // * It is never a floating-point NaN, and will therefore always compare
+ // equal to itself.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ int i;
+
+ // Initialize the registers, using X registers to load the literal.
+ __ Mov(x0, 0);
+ __ Mov(x1, literal_base);
+ for (i = 0; i < reg_count; i++) {
+ // Always write into the D register, to ensure that the upper word is
+ // properly ignored by Push when testing S registers.
+ __ Fmov(d[i], x0);
+ // Calculate the next literal.
+ __ Add(x0, x0, x1);
+ }
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ switch (push_method) {
+ case PushPopByFour:
+ // Push high-numbered registers first (to the highest addresses).
+ for (i = reg_count; i >= 4; i -= 4) {
+ __ Push(v[i-1], v[i-2], v[i-3], v[i-4]);
+ }
+ // Finish off the leftovers.
+ switch (i) {
+ case 3: __ Push(v[2], v[1], v[0]); break;
+ case 2: __ Push(v[1], v[0]); break;
+ case 1: __ Push(v[0]); break;
+ default: ASSERT(i == 0); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister);
+ break;
+ }
+
+ // Clobber all the registers, to ensure that they get repopulated by Pop.
+ ClobberFP(&masm, list);
+
+ switch (pop_method) {
+ case PushPopByFour:
+ // Pop low-numbered registers first (from the lowest addresses).
+ for (i = 0; i <= (reg_count-4); i += 4) {
+ __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
+ }
+ // Finish off the leftovers.
+ switch (reg_count - i) {
+ case 3: __ Pop(v[i], v[i+1], v[i+2]); break;
+ case 2: __ Pop(v[i], v[i+1]); break;
+ case 1: __ Pop(v[i]); break;
+ default: ASSERT(i == reg_count); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister);
+ break;
+ }
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ // Check that the register contents were preserved.
+ // Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can
+ // test that the upper word was properly cleared by Pop.
+ literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+ for (int i = 0; i < reg_count; i++) {
+ uint64_t literal = literal_base * i;
+ double expected;
+ memcpy(&expected, &literal, sizeof(expected));
+ ASSERT_EQUAL_FP64(expected, d[i]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_fp_jssp_simple_32) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+TEST(push_pop_fp_jssp_simple_64) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+// Push and pop data using an overlapping combination of Push/Pop and
+// RegList-based methods.
+static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
+ SETUP();
+
+ // Registers x8 and x9 are used by the macro assembler for debug code (for
+ // example in 'Pop'), so we can't use them here. We can't use jssp because it
+ // will be the stack pointer for this test.
+ static RegList const allowed =
+ ~(x8.Bit() | x9.Bit() | jssp.Bit() | xzr.Bit());
+ // Work out which registers to use, based on reg_size.
+ Register r[10];
+ Register x[10];
+ PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
+
+ // Calculate some handy register lists.
+ RegList r0_to_r3 = 0;
+ for (int i = 0; i <= 3; i++) {
+ r0_to_r3 |= x[i].Bit();
+ }
+ RegList r4_to_r5 = 0;
+ for (int i = 4; i <= 5; i++) {
+ r4_to_r5 |= x[i].Bit();
+ }
+ RegList r6_to_r9 = 0;
+ for (int i = 6; i <= 9; i++) {
+ r6_to_r9 |= x[i].Bit();
+ }
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ START();
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ __ Mov(x[3], literal_base * 3);
+ __ Mov(x[2], literal_base * 2);
+ __ Mov(x[1], literal_base * 1);
+ __ Mov(x[0], literal_base * 0);
+
+ __ PushSizeRegList(r0_to_r3, reg_size);
+ __ Push(r[3], r[2]);
+
+ Clobber(&masm, r0_to_r3);
+ __ PopSizeRegList(r0_to_r3, reg_size);
+
+ __ Push(r[2], r[1], r[3], r[0]);
+
+ Clobber(&masm, r4_to_r5);
+ __ Pop(r[4], r[5]);
+ Clobber(&masm, r6_to_r9);
+ __ Pop(r[6], r[7], r[8], r[9]);
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
+ // that the upper word was properly cleared by Pop.
+ literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+
+ ASSERT_EQUAL_64(literal_base * 3, x[9]);
+ ASSERT_EQUAL_64(literal_base * 2, x[8]);
+ ASSERT_EQUAL_64(literal_base * 0, x[7]);
+ ASSERT_EQUAL_64(literal_base * 3, x[6]);
+ ASSERT_EQUAL_64(literal_base * 1, x[5]);
+ ASSERT_EQUAL_64(literal_base * 2, x[4]);
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_jssp_mixed_methods_64) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ PushPopJsspMixedMethodsHelper(claim, kXRegSize);
+ }
+}
+
+
+TEST(push_pop_jssp_mixed_methods_32) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ PushPopJsspMixedMethodsHelper(claim, kWRegSize);
+ }
+}
+
+
+// Push and pop data using overlapping X- and W-sized quantities.
+static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
+ // This test emits rather a lot of code.
+ SETUP_SIZE(BUF_SIZE * 2);
+
+ // Work out which registers to use, based on reg_size.
+ static RegList const allowed = ~(x8.Bit() | x9.Bit() | jssp.Bit());
+ if (reg_count == kPushPopJsspMaxRegCount) {
+ reg_count = CountSetBits(allowed, kNumberOfRegisters);
+ }
+ Register w[kNumberOfRegisters];
+ Register x[kNumberOfRegisters];
+ RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
+
+ // The number of W-sized slots we expect to pop. When we pop, we alternate
+ // between W and X registers, so we need reg_count*1.5 W-sized slots.
+ int const requested_w_slots = reg_count + reg_count / 2;
+
+ // Track what _should_ be on the stack, using W-sized slots.
+ static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
+ uint32_t stack[kMaxWSlots];
+ for (int i = 0; i < kMaxWSlots; i++) {
+ stack[i] = 0xdeadbeef;
+ }
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ static uint64_t const literal_base = 0x0100001000100101UL;
+ static uint64_t const literal_base_hi = literal_base >> 32;
+ static uint64_t const literal_base_lo = literal_base & 0xffffffff;
+ static uint64_t const literal_base_w = literal_base & 0xffffffff;
+
+ START();
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ // Initialize the registers.
+ for (int i = 0; i < reg_count; i++) {
+ // Always write into the X register, to ensure that the upper word is
+ // properly ignored by Push when testing W registers.
+ if (!x[i].IsZero()) {
+ __ Mov(x[i], literal_base * i);
+ }
+ }
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ // The push-pop pattern is as follows:
+ // Push: Pop:
+ // x[0](hi) -> w[0]
+ // x[0](lo) -> x[1](hi)
+ // w[1] -> x[1](lo)
+ // w[1] -> w[2]
+ // x[2](hi) -> x[2](hi)
+ // x[2](lo) -> x[2](lo)
+ // x[2](hi) -> w[3]
+ // x[2](lo) -> x[4](hi)
+ // x[2](hi) -> x[4](lo)
+ // x[2](lo) -> w[5]
+ // w[3] -> x[5](hi)
+ // w[3] -> x[6](lo)
+ // w[3] -> w[7]
+ // w[3] -> x[8](hi)
+ // x[4](hi) -> x[8](lo)
+ // x[4](lo) -> w[9]
+ // ... pattern continues ...
+ //
+ // That is, registers are pushed starting with the lower numbers,
+ // alternating between x and w registers, and pushing i%4+1 copies of each,
+ // where i is the register number.
+ // Registers are popped starting with the higher numbers one-by-one,
+ // alternating between x and w registers, but only popping one at a time.
+ //
+ // This pattern provides a wide variety of alignment effects and overlaps.
+
+ // ---- Push ----
+
+ int active_w_slots = 0;
+ for (int i = 0; active_w_slots < requested_w_slots; i++) {
+ ASSERT(i < reg_count);
+ // In order to test various arguments to PushMultipleTimes, and to try to
+ // exercise different alignment and overlap effects, we push each
+ // register a different number of times.
+ int times = i % 4 + 1;
+ if (i & 1) {
+ // Push odd-numbered registers as W registers.
+ __ PushMultipleTimes(times, w[i]);
+ // Fill in the expected stack slots.
+ for (int j = 0; j < times; j++) {
+ if (w[i].Is(wzr)) {
+ // The zero register always writes zeroes.
+ stack[active_w_slots++] = 0;
+ } else {
+ stack[active_w_slots++] = literal_base_w * i;
+ }
+ }
+ } else {
+ // Push even-numbered registers as X registers.
+ __ PushMultipleTimes(times, x[i]);
+ // Fill in the expected stack slots.
+ for (int j = 0; j < times; j++) {
+ if (x[i].IsZero()) {
+ // The zero register always writes zeroes.
+ stack[active_w_slots++] = 0;
+ stack[active_w_slots++] = 0;
+ } else {
+ stack[active_w_slots++] = literal_base_hi * i;
+ stack[active_w_slots++] = literal_base_lo * i;
+ }
+ }
+ }
+ }
+ // Because we were pushing several registers at a time, we probably pushed
+ // more than we needed to.
+ if (active_w_slots > requested_w_slots) {
+ __ Drop(active_w_slots - requested_w_slots, kWRegSizeInBytes);
+ // Bump the number of active W-sized slots back to where it should be,
+ // and fill the empty space with a dummy value.
+ do {
+ stack[active_w_slots--] = 0xdeadbeef;
+ } while (active_w_slots > requested_w_slots);
+ }
+
+ // ---- Pop ----
+
+ Clobber(&masm, list);
+
+ // If popping an even number of registers, the first one will be X-sized.
+ // Otherwise, the first one will be W-sized.
+ bool next_is_64 = !(reg_count & 1);
+ for (int i = reg_count-1; i >= 0; i--) {
+ if (next_is_64) {
+ __ Pop(x[i]);
+ active_w_slots -= 2;
+ } else {
+ __ Pop(w[i]);
+ active_w_slots -= 1;
+ }
+ next_is_64 = !next_is_64;
+ }
+ ASSERT(active_w_slots == 0);
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ int slot = 0;
+ for (int i = 0; i < reg_count; i++) {
+ // Even-numbered registers were written as W registers.
+ // Odd-numbered registers were written as X registers.
+ bool expect_64 = (i & 1);
+ uint64_t expected;
+
+ if (expect_64) {
+ uint64_t hi = stack[slot++];
+ uint64_t lo = stack[slot++];
+ expected = (hi << 32) | lo;
+ } else {
+ expected = stack[slot++];
+ }
+
+ // Always use ASSERT_EQUAL_64, even when testing W registers, so we can
+ // test that the upper word was properly cleared by Pop.
+ if (x[i].IsZero()) {
+ ASSERT_EQUAL_64(0, x[i]);
+ } else {
+ ASSERT_EQUAL_64(expected, x[i]);
+ }
+ }
+ ASSERT(slot == requested_w_slots);
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_jssp_wx_overlap) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 1; count <= 8; count++) {
+ PushPopJsspWXOverlapHelper(count, claim);
+ PushPopJsspWXOverlapHelper(count, claim);
+ PushPopJsspWXOverlapHelper(count, claim);
+ PushPopJsspWXOverlapHelper(count, claim);
+ }
+ // Test with the maximum number of registers.
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ }
+}
+
+
+TEST(push_pop_csp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ ASSERT(csp.Is(__ StackPointer()));
+
+ __ Mov(x3, 0x3333333333333333UL);
+ __ Mov(x2, 0x2222222222222222UL);
+ __ Mov(x1, 0x1111111111111111UL);
+ __ Mov(x0, 0x0000000000000000UL);
+ __ Claim(2);
+ __ PushXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
+ __ Push(x3, x2);
+ __ PopXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
+ __ Push(x2, x1, x3, x0);
+ __ Pop(x4, x5);
+ __ Pop(x6, x7, x8, x9);
+
+ __ Claim(2);
+ __ PushWRegList(w0.Bit() | w1.Bit() | w2.Bit() | w3.Bit());
+ __ Push(w3, w1, w2, w0);
+ __ PopWRegList(w10.Bit() | w11.Bit() | w12.Bit() | w13.Bit());
+ __ Pop(w14, w15, w16, w17);
+
+ __ Claim(2);
+ __ Push(w2, w2, w1, w1);
+ __ Push(x3, x3);
+ __ Pop(w18, w19, w20, w21);
+ __ Pop(x22, x23);
+
+ __ Claim(2);
+ __ PushXRegList(x1.Bit() | x22.Bit());
+ __ PopXRegList(x24.Bit() | x26.Bit());
+
+ __ Claim(2);
+ __ PushWRegList(w1.Bit() | w2.Bit() | w4.Bit() | w22.Bit());
+ __ PopWRegList(w25.Bit() | w27.Bit() | w28.Bit() | w29.Bit());
+
+ __ Claim(2);
+ __ PushXRegList(0);
+ __ PopXRegList(0);
+ __ PushXRegList(0xffffffff);
+ __ PopXRegList(0xffffffff);
+ __ Drop(12);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1111111111111111UL, x3);
+ ASSERT_EQUAL_64(0x0000000000000000UL, x2);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x1);
+ ASSERT_EQUAL_64(0x2222222222222222UL, x0);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x9);
+ ASSERT_EQUAL_64(0x2222222222222222UL, x8);
+ ASSERT_EQUAL_64(0x0000000000000000UL, x7);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x6);
+ ASSERT_EQUAL_64(0x1111111111111111UL, x5);
+ ASSERT_EQUAL_64(0x2222222222222222UL, x4);
+
+ ASSERT_EQUAL_32(0x11111111U, w13);
+ ASSERT_EQUAL_32(0x33333333U, w12);
+ ASSERT_EQUAL_32(0x00000000U, w11);
+ ASSERT_EQUAL_32(0x22222222U, w10);
+ ASSERT_EQUAL_32(0x11111111U, w17);
+ ASSERT_EQUAL_32(0x00000000U, w16);
+ ASSERT_EQUAL_32(0x33333333U, w15);
+ ASSERT_EQUAL_32(0x22222222U, w14);
+
+ ASSERT_EQUAL_32(0x11111111U, w18);
+ ASSERT_EQUAL_32(0x11111111U, w19);
+ ASSERT_EQUAL_32(0x11111111U, w20);
+ ASSERT_EQUAL_32(0x11111111U, w21);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x22);
+ ASSERT_EQUAL_64(0x0000000000000000UL, x23);
+
+ ASSERT_EQUAL_64(0x3333333333333333UL, x24);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x26);
+
+ ASSERT_EQUAL_32(0x33333333U, w25);
+ ASSERT_EQUAL_32(0x00000000U, w27);
+ ASSERT_EQUAL_32(0x22222222U, w28);
+ ASSERT_EQUAL_32(0x33333333U, w29);
+ TEARDOWN();
+}
+
+
+TEST(jump_both_smi) {
+ INIT_V8();
+ SETUP();
+
+ Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
+ Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
+ Label return1, return2, return3, done;
+
+ START();
+
+ __ Mov(x0, 0x5555555500000001UL); // A pointer.
+ __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
+ __ Mov(x2, 0x1234567800000000UL); // A smi.
+ __ Mov(x3, 0x8765432100000000UL); // A smi.
+ __ Mov(x4, 0xdead);
+ __ Mov(x5, 0xdead);
+ __ Mov(x6, 0xdead);
+ __ Mov(x7, 0xdead);
+
+ __ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00);
+ __ Bind(&return1);
+ __ JumpIfBothSmi(x0, x2, &cond_pass_01, &cond_fail_01);
+ __ Bind(&return2);
+ __ JumpIfBothSmi(x2, x1, &cond_pass_10, &cond_fail_10);
+ __ Bind(&return3);
+ __ JumpIfBothSmi(x2, x3, &cond_pass_11, &cond_fail_11);
+
+ __ Bind(&cond_fail_00);
+ __ Mov(x4, 0);
+ __ B(&return1);
+ __ Bind(&cond_pass_00);
+ __ Mov(x4, 1);
+ __ B(&return1);
+
+ __ Bind(&cond_fail_01);
+ __ Mov(x5, 0);
+ __ B(&return2);
+ __ Bind(&cond_pass_01);
+ __ Mov(x5, 1);
+ __ B(&return2);
+
+ __ Bind(&cond_fail_10);
+ __ Mov(x6, 0);
+ __ B(&return3);
+ __ Bind(&cond_pass_10);
+ __ Mov(x6, 1);
+ __ B(&return3);
+
+ __ Bind(&cond_fail_11);
+ __ Mov(x7, 0);
+ __ B(&done);
+ __ Bind(&cond_pass_11);
+ __ Mov(x7, 1);
+
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x5555555500000001UL, x0);
+ ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
+ ASSERT_EQUAL_64(0x1234567800000000UL, x2);
+ ASSERT_EQUAL_64(0x8765432100000000UL, x3);
+ ASSERT_EQUAL_64(0, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0, x6);
+ ASSERT_EQUAL_64(1, x7);
+
+ TEARDOWN();
+}
+
+
+TEST(jump_either_smi) {
+ INIT_V8();
+ SETUP();
+
+ Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
+ Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
+ Label return1, return2, return3, done;
+
+ START();
+
+ __ Mov(x0, 0x5555555500000001UL); // A pointer.
+ __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
+ __ Mov(x2, 0x1234567800000000UL); // A smi.
+ __ Mov(x3, 0x8765432100000000UL); // A smi.
+ __ Mov(x4, 0xdead);
+ __ Mov(x5, 0xdead);
+ __ Mov(x6, 0xdead);
+ __ Mov(x7, 0xdead);
+
+ __ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00);
+ __ Bind(&return1);
+ __ JumpIfEitherSmi(x0, x2, &cond_pass_01, &cond_fail_01);
+ __ Bind(&return2);
+ __ JumpIfEitherSmi(x2, x1, &cond_pass_10, &cond_fail_10);
+ __ Bind(&return3);
+ __ JumpIfEitherSmi(x2, x3, &cond_pass_11, &cond_fail_11);
+
+ __ Bind(&cond_fail_00);
+ __ Mov(x4, 0);
+ __ B(&return1);
+ __ Bind(&cond_pass_00);
+ __ Mov(x4, 1);
+ __ B(&return1);
+
+ __ Bind(&cond_fail_01);
+ __ Mov(x5, 0);
+ __ B(&return2);
+ __ Bind(&cond_pass_01);
+ __ Mov(x5, 1);
+ __ B(&return2);
+
+ __ Bind(&cond_fail_10);
+ __ Mov(x6, 0);
+ __ B(&return3);
+ __ Bind(&cond_pass_10);
+ __ Mov(x6, 1);
+ __ B(&return3);
+
+ __ Bind(&cond_fail_11);
+ __ Mov(x7, 0);
+ __ B(&done);
+ __ Bind(&cond_pass_11);
+ __ Mov(x7, 1);
+
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x5555555500000001UL, x0);
+ ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
+ ASSERT_EQUAL_64(0x1234567800000000UL, x2);
+ ASSERT_EQUAL_64(0x8765432100000000UL, x3);
+ ASSERT_EQUAL_64(0, x4);
+ ASSERT_EQUAL_64(1, x5);
+ ASSERT_EQUAL_64(1, x6);
+ ASSERT_EQUAL_64(1, x7);
+
+ TEARDOWN();
+}
+
+
+TEST(noreg) {
+ // This test doesn't generate any code, but it verifies some invariants
+ // related to NoReg.
+ CHECK(NoReg.Is(NoFPReg));
+ CHECK(NoFPReg.Is(NoReg));
+ CHECK(NoReg.Is(NoCPUReg));
+ CHECK(NoCPUReg.Is(NoReg));
+ CHECK(NoFPReg.Is(NoCPUReg));
+ CHECK(NoCPUReg.Is(NoFPReg));
+
+ CHECK(NoReg.IsNone());
+ CHECK(NoFPReg.IsNone());
+ CHECK(NoCPUReg.IsNone());
+}
+
+
+TEST(isvalid) {
+ // This test doesn't generate any code, but it verifies some invariants
+ // related to IsValid().
+ CHECK(!NoReg.IsValid());
+ CHECK(!NoFPReg.IsValid());
+ CHECK(!NoCPUReg.IsValid());
+
+ CHECK(x0.IsValid());
+ CHECK(w0.IsValid());
+ CHECK(x30.IsValid());
+ CHECK(w30.IsValid());
+ CHECK(xzr.IsValid());
+ CHECK(wzr.IsValid());
+
+ CHECK(csp.IsValid());
+ CHECK(wcsp.IsValid());
+
+ CHECK(d0.IsValid());
+ CHECK(s0.IsValid());
+ CHECK(d31.IsValid());
+ CHECK(s31.IsValid());
+
+ CHECK(x0.IsValidRegister());
+ CHECK(w0.IsValidRegister());
+ CHECK(xzr.IsValidRegister());
+ CHECK(wzr.IsValidRegister());
+ CHECK(csp.IsValidRegister());
+ CHECK(wcsp.IsValidRegister());
+ CHECK(!x0.IsValidFPRegister());
+ CHECK(!w0.IsValidFPRegister());
+ CHECK(!xzr.IsValidFPRegister());
+ CHECK(!wzr.IsValidFPRegister());
+ CHECK(!csp.IsValidFPRegister());
+ CHECK(!wcsp.IsValidFPRegister());
+
+ CHECK(d0.IsValidFPRegister());
+ CHECK(s0.IsValidFPRegister());
+ CHECK(!d0.IsValidRegister());
+ CHECK(!s0.IsValidRegister());
+
+ // Test the same as before, but using CPURegister types. This shouldn't make
+ // any difference.
+ CHECK(static_cast<CPURegister>(x0).IsValid());
+ CHECK(static_cast<CPURegister>(w0).IsValid());
+ CHECK(static_cast<CPURegister>(x30).IsValid());
+ CHECK(static_cast<CPURegister>(w30).IsValid());
+ CHECK(static_cast<CPURegister>(xzr).IsValid());
+ CHECK(static_cast<CPURegister>(wzr).IsValid());
+
+ CHECK(static_cast<CPURegister>(csp).IsValid());
+ CHECK(static_cast<CPURegister>(wcsp).IsValid());
+
+ CHECK(static_cast<CPURegister>(d0).IsValid());
+ CHECK(static_cast<CPURegister>(s0).IsValid());
+ CHECK(static_cast<CPURegister>(d31).IsValid());
+ CHECK(static_cast<CPURegister>(s31).IsValid());
+
+ CHECK(static_cast<CPURegister>(x0).IsValidRegister());
+ CHECK(static_cast<CPURegister>(w0).IsValidRegister());
+ CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
+ CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
+ CHECK(static_cast<CPURegister>(csp).IsValidRegister());
+ CHECK(static_cast<CPURegister>(wcsp).IsValidRegister());
+ CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(csp).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(wcsp).IsValidFPRegister());
+
+ CHECK(static_cast<CPURegister>(d0).IsValidFPRegister());
+ CHECK(static_cast<CPURegister>(s0).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
+ CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
+}
+
+
+TEST(cpureglist_utils_x) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of X registers.
+ CPURegList test(x0, x1, x2, x3);
+
+ CHECK(test.IncludesAliasOf(x0));
+ CHECK(test.IncludesAliasOf(x1));
+ CHECK(test.IncludesAliasOf(x2));
+ CHECK(test.IncludesAliasOf(x3));
+ CHECK(test.IncludesAliasOf(w0));
+ CHECK(test.IncludesAliasOf(w1));
+ CHECK(test.IncludesAliasOf(w2));
+ CHECK(test.IncludesAliasOf(w3));
+
+ CHECK(!test.IncludesAliasOf(x4));
+ CHECK(!test.IncludesAliasOf(x30));
+ CHECK(!test.IncludesAliasOf(xzr));
+ CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(w4));
+ CHECK(!test.IncludesAliasOf(w30));
+ CHECK(!test.IncludesAliasOf(wzr));
+ CHECK(!test.IncludesAliasOf(wcsp));
+
+ CHECK(!test.IncludesAliasOf(d0));
+ CHECK(!test.IncludesAliasOf(d1));
+ CHECK(!test.IncludesAliasOf(d2));
+ CHECK(!test.IncludesAliasOf(d3));
+ CHECK(!test.IncludesAliasOf(s0));
+ CHECK(!test.IncludesAliasOf(s1));
+ CHECK(!test.IncludesAliasOf(s2));
+ CHECK(!test.IncludesAliasOf(s3));
+
+ CHECK(!test.IsEmpty());
+
+ CHECK(test.type() == x0.type());
+
+ CHECK(test.PopHighestIndex().Is(x3));
+ CHECK(test.PopLowestIndex().Is(x0));
+
+ CHECK(test.IncludesAliasOf(x1));
+ CHECK(test.IncludesAliasOf(x2));
+ CHECK(test.IncludesAliasOf(w1));
+ CHECK(test.IncludesAliasOf(w2));
+ CHECK(!test.IncludesAliasOf(x0));
+ CHECK(!test.IncludesAliasOf(x3));
+ CHECK(!test.IncludesAliasOf(w0));
+ CHECK(!test.IncludesAliasOf(w3));
+
+ CHECK(test.PopHighestIndex().Is(x2));
+ CHECK(test.PopLowestIndex().Is(x1));
+
+ CHECK(!test.IncludesAliasOf(x1));
+ CHECK(!test.IncludesAliasOf(x2));
+ CHECK(!test.IncludesAliasOf(w1));
+ CHECK(!test.IncludesAliasOf(w2));
+
+ CHECK(test.IsEmpty());
+}
+
+
+TEST(cpureglist_utils_w) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of W registers.
+ CPURegList test(w10, w11, w12, w13);
+
+ CHECK(test.IncludesAliasOf(x10));
+ CHECK(test.IncludesAliasOf(x11));
+ CHECK(test.IncludesAliasOf(x12));
+ CHECK(test.IncludesAliasOf(x13));
+ CHECK(test.IncludesAliasOf(w10));
+ CHECK(test.IncludesAliasOf(w11));
+ CHECK(test.IncludesAliasOf(w12));
+ CHECK(test.IncludesAliasOf(w13));
+
+ CHECK(!test.IncludesAliasOf(x0));
+ CHECK(!test.IncludesAliasOf(x9));
+ CHECK(!test.IncludesAliasOf(x14));
+ CHECK(!test.IncludesAliasOf(x30));
+ CHECK(!test.IncludesAliasOf(xzr));
+ CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(w0));
+ CHECK(!test.IncludesAliasOf(w9));
+ CHECK(!test.IncludesAliasOf(w14));
+ CHECK(!test.IncludesAliasOf(w30));
+ CHECK(!test.IncludesAliasOf(wzr));
+ CHECK(!test.IncludesAliasOf(wcsp));
+
+ CHECK(!test.IncludesAliasOf(d10));
+ CHECK(!test.IncludesAliasOf(d11));
+ CHECK(!test.IncludesAliasOf(d12));
+ CHECK(!test.IncludesAliasOf(d13));
+ CHECK(!test.IncludesAliasOf(s10));
+ CHECK(!test.IncludesAliasOf(s11));
+ CHECK(!test.IncludesAliasOf(s12));
+ CHECK(!test.IncludesAliasOf(s13));
+
+ CHECK(!test.IsEmpty());
+
+ CHECK(test.type() == w10.type());
+
+ CHECK(test.PopHighestIndex().Is(w13));
+ CHECK(test.PopLowestIndex().Is(w10));
+
+ CHECK(test.IncludesAliasOf(x11));
+ CHECK(test.IncludesAliasOf(x12));
+ CHECK(test.IncludesAliasOf(w11));
+ CHECK(test.IncludesAliasOf(w12));
+ CHECK(!test.IncludesAliasOf(x10));
+ CHECK(!test.IncludesAliasOf(x13));
+ CHECK(!test.IncludesAliasOf(w10));
+ CHECK(!test.IncludesAliasOf(w13));
+
+ CHECK(test.PopHighestIndex().Is(w12));
+ CHECK(test.PopLowestIndex().Is(w11));
+
+ CHECK(!test.IncludesAliasOf(x11));
+ CHECK(!test.IncludesAliasOf(x12));
+ CHECK(!test.IncludesAliasOf(w11));
+ CHECK(!test.IncludesAliasOf(w12));
+
+ CHECK(test.IsEmpty());
+}
+
+
+TEST(cpureglist_utils_d) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of D registers.
+ CPURegList test(d20, d21, d22, d23);
+
+ CHECK(test.IncludesAliasOf(d20));
+ CHECK(test.IncludesAliasOf(d21));
+ CHECK(test.IncludesAliasOf(d22));
+ CHECK(test.IncludesAliasOf(d23));
+ CHECK(test.IncludesAliasOf(s20));
+ CHECK(test.IncludesAliasOf(s21));
+ CHECK(test.IncludesAliasOf(s22));
+ CHECK(test.IncludesAliasOf(s23));
+
+ CHECK(!test.IncludesAliasOf(d0));
+ CHECK(!test.IncludesAliasOf(d19));
+ CHECK(!test.IncludesAliasOf(d24));
+ CHECK(!test.IncludesAliasOf(d31));
+ CHECK(!test.IncludesAliasOf(s0));
+ CHECK(!test.IncludesAliasOf(s19));
+ CHECK(!test.IncludesAliasOf(s24));
+ CHECK(!test.IncludesAliasOf(s31));
+
+ CHECK(!test.IncludesAliasOf(x20));
+ CHECK(!test.IncludesAliasOf(x21));
+ CHECK(!test.IncludesAliasOf(x22));
+ CHECK(!test.IncludesAliasOf(x23));
+ CHECK(!test.IncludesAliasOf(w20));
+ CHECK(!test.IncludesAliasOf(w21));
+ CHECK(!test.IncludesAliasOf(w22));
+ CHECK(!test.IncludesAliasOf(w23));
+
+ CHECK(!test.IncludesAliasOf(xzr));
+ CHECK(!test.IncludesAliasOf(wzr));
+ CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(wcsp));
+
+ CHECK(!test.IsEmpty());
+
+ CHECK(test.type() == d20.type());
+
+ CHECK(test.PopHighestIndex().Is(d23));
+ CHECK(test.PopLowestIndex().Is(d20));
+
+ CHECK(test.IncludesAliasOf(d21));
+ CHECK(test.IncludesAliasOf(d22));
+ CHECK(test.IncludesAliasOf(s21));
+ CHECK(test.IncludesAliasOf(s22));
+ CHECK(!test.IncludesAliasOf(d20));
+ CHECK(!test.IncludesAliasOf(d23));
+ CHECK(!test.IncludesAliasOf(s20));
+ CHECK(!test.IncludesAliasOf(s23));
+
+ CHECK(test.PopHighestIndex().Is(d22));
+ CHECK(test.PopLowestIndex().Is(d21));
+
+ CHECK(!test.IncludesAliasOf(d21));
+ CHECK(!test.IncludesAliasOf(d22));
+ CHECK(!test.IncludesAliasOf(s21));
+ CHECK(!test.IncludesAliasOf(s22));
+
+ CHECK(test.IsEmpty());
+}
+
+
+TEST(cpureglist_utils_s) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of S registers.
+ CPURegList test(s20, s21, s22, s23);
+
+ // The type and size mechanisms are already covered, so here we just test
+ // that lists of S registers alias individual D registers.
+
+ CHECK(test.IncludesAliasOf(d20));
+ CHECK(test.IncludesAliasOf(d21));
+ CHECK(test.IncludesAliasOf(d22));
+ CHECK(test.IncludesAliasOf(d23));
+ CHECK(test.IncludesAliasOf(s20));
+ CHECK(test.IncludesAliasOf(s21));
+ CHECK(test.IncludesAliasOf(s22));
+ CHECK(test.IncludesAliasOf(s23));
+}
+
+
+TEST(cpureglist_utils_empty) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test an empty list.
+ // Empty lists can have type and size properties. Check that we can create
+ // them, and that they are empty.
+ CPURegList reg32(CPURegister::kRegister, kWRegSize, 0);
+ CPURegList reg64(CPURegister::kRegister, kXRegSize, 0);
+ CPURegList fpreg32(CPURegister::kFPRegister, kSRegSize, 0);
+ CPURegList fpreg64(CPURegister::kFPRegister, kDRegSize, 0);
+
+ CHECK(reg32.IsEmpty());
+ CHECK(reg64.IsEmpty());
+ CHECK(fpreg32.IsEmpty());
+ CHECK(fpreg64.IsEmpty());
+
+ CHECK(reg32.PopLowestIndex().IsNone());
+ CHECK(reg64.PopLowestIndex().IsNone());
+ CHECK(fpreg32.PopLowestIndex().IsNone());
+ CHECK(fpreg64.PopLowestIndex().IsNone());
+
+ CHECK(reg32.PopHighestIndex().IsNone());
+ CHECK(reg64.PopHighestIndex().IsNone());
+ CHECK(fpreg32.PopHighestIndex().IsNone());
+ CHECK(fpreg64.PopHighestIndex().IsNone());
+
+ CHECK(reg32.IsEmpty());
+ CHECK(reg64.IsEmpty());
+ CHECK(fpreg32.IsEmpty());
+ CHECK(fpreg64.IsEmpty());
+}
+
+
+TEST(printf) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ char const * test_plain_string = "Printf with no arguments.\n";
+ char const * test_substring = "'This is a substring.'";
+ RegisterDump before;
+
+ // Initialize x29 to the value of the stack pointer. We will use x29 as a
+ // temporary stack pointer later, and initializing it in this way allows the
+ // RegisterDump check to pass.
+ __ Mov(x29, __ StackPointer());
+
+ // Test simple integer arguments.
+ __ Mov(x0, 1234);
+ __ Mov(x1, 0x1234);
+
+ // Test simple floating-point arguments.
+ __ Fmov(d0, 1.234);
+
+ // Test pointer (string) arguments.
+ __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
+
+ // Test the maximum number of arguments, and sign extension.
+ __ Mov(w3, 0xffffffff);
+ __ Mov(w4, 0xffffffff);
+ __ Mov(x5, 0xffffffffffffffff);
+ __ Mov(x6, 0xffffffffffffffff);
+ __ Fmov(s1, 1.234);
+ __ Fmov(s2, 2.345);
+ __ Fmov(d3, 3.456);
+ __ Fmov(d4, 4.567);
+
+ // Test printing callee-saved registers.
+ __ Mov(x28, 0x123456789abcdef);
+ __ Fmov(d10, 42.0);
+
+ // Test with three arguments.
+ __ Mov(x10, 3);
+ __ Mov(x11, 40);
+ __ Mov(x12, 500);
+
+ // x8 and x9 are used by debug code in part of the macro assembler. However,
+ // Printf guarantees to preserve them (so we can use Printf in debug code),
+ // and we need to test that they are properly preserved. The above code
+ // shouldn't need to use them, but we initialize x8 and x9 last to be on the
+ // safe side. This test still assumes that none of the code from
+ // before->Dump() to the end of the test can clobber x8 or x9, so where
+ // possible we use the Assembler directly to be safe.
+ __ orr(x8, xzr, 0x8888888888888888);
+ __ orr(x9, xzr, 0x9999999999999999);
+
+ // Check that we don't clobber any registers, except those that we explicitly
+ // write results into.
+ before.Dump(&masm);
+
+ __ Printf(test_plain_string); // NOLINT(runtime/printf)
+ __ Printf("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
+ __ Printf("d0: %f\n", d0);
+ __ Printf("Test %%s: %s\n", x2);
+ __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
+ "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
+ w3, w4, x5, x6);
+ __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
+ __ Printf("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
+ __ Printf("%g\n", d10);
+
+ // Test with a different stack pointer.
+ const Register old_stack_pointer = __ StackPointer();
+ __ mov(x29, old_stack_pointer);
+ __ SetStackPointer(x29);
+ __ Printf("old_stack_pointer: 0x%016" PRIx64 "\n", old_stack_pointer);
+ __ mov(old_stack_pointer, __ StackPointer());
+ __ SetStackPointer(old_stack_pointer);
+
+ __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
+
+ END();
+ RUN();
+
+ // We cannot easily test the output of the Printf sequences, and because
+ // Printf preserves all registers by default, we can't look at the number of
+ // bytes that were printed. However, the printf_no_preserve test should check
+ // that, and here we just test that we didn't clobber any registers.
+ ASSERT_EQUAL_REGISTERS(before);
+
+ TEARDOWN();
+}
+
+
+TEST(printf_no_preserve) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ char const * test_plain_string = "Printf with no arguments.\n";
+ char const * test_substring = "'This is a substring.'";
+
+ __ PrintfNoPreserve(test_plain_string); // NOLINT(runtime/printf)
+ __ Mov(x19, x0);
+
+ // Test simple integer arguments.
+ __ Mov(x0, 1234);
+ __ Mov(x1, 0x1234);
+ __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
+ __ Mov(x20, x0);
+
+ // Test simple floating-point arguments.
+ __ Fmov(d0, 1.234);
+ __ PrintfNoPreserve("d0: %f\n", d0);
+ __ Mov(x21, x0);
+
+ // Test pointer (string) arguments.
+ __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
+ __ PrintfNoPreserve("Test %%s: %s\n", x2);
+ __ Mov(x22, x0);
+
+ // Test the maximum number of arguments, and sign extension.
+ __ Mov(w3, 0xffffffff);
+ __ Mov(w4, 0xffffffff);
+ __ Mov(x5, 0xffffffffffffffff);
+ __ Mov(x6, 0xffffffffffffffff);
+ __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
+ "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
+ w3, w4, x5, x6);
+ __ Mov(x23, x0);
+
+ __ Fmov(s1, 1.234);
+ __ Fmov(s2, 2.345);
+ __ Fmov(d3, 3.456);
+ __ Fmov(d4, 4.567);
+ __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
+ __ Mov(x24, x0);
+
+ // Test printing callee-saved registers.
+ __ Mov(x28, 0x123456789abcdef);
+ __ PrintfNoPreserve("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
+ __ Mov(x25, x0);
+
+ __ Fmov(d10, 42.0);
+ __ PrintfNoPreserve("%g\n", d10);
+ __ Mov(x26, x0);
+
+ // Test with a different stack pointer.
+ const Register old_stack_pointer = __ StackPointer();
+ __ Mov(x29, old_stack_pointer);
+ __ SetStackPointer(x29);
+
+ __ PrintfNoPreserve("old_stack_pointer: 0x%016" PRIx64 "\n",
+ old_stack_pointer);
+ __ Mov(x27, x0);
+
+ __ Mov(old_stack_pointer, __ StackPointer());
+ __ SetStackPointer(old_stack_pointer);
+
+ // Test with three arguments.
+ __ Mov(x3, 3);
+ __ Mov(x4, 40);
+ __ Mov(x5, 500);
+ __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
+ __ Mov(x28, x0);
+
+ END();
+ RUN();
+
+ // We cannot easily test the exact output of the Printf sequences, but we can
+ // use the return code to check that the string length was correct.
+
+ // Printf with no arguments.
+ ASSERT_EQUAL_64(strlen(test_plain_string), x19);
+ // x0: 1234, x1: 0x00001234
+ ASSERT_EQUAL_64(25, x20);
+ // d0: 1.234000
+ ASSERT_EQUAL_64(13, x21);
+ // Test %s: 'This is a substring.'
+ ASSERT_EQUAL_64(32, x22);
+ // w3(uint32): 4294967295
+ // w4(int32): -1
+ // x5(uint64): 18446744073709551615
+ // x6(int64): -1
+ ASSERT_EQUAL_64(23 + 14 + 33 + 14, x23);
+ // %f: 1.234000
+ // %g: 2.345
+ // %e: 3.456000e+00
+ // %E: 4.567000E+00
+ ASSERT_EQUAL_64(13 + 10 + 17 + 17, x24);
+ // 0x89abcdef, 0x0123456789abcdef
+ ASSERT_EQUAL_64(31, x25);
+ // 42
+ ASSERT_EQUAL_64(3, x26);
+ // old_stack_pointer: 0x00007fb037ae2370
+ // Note: This is an example value, but the field width is fixed here so the
+ // string length is still predictable.
+ ASSERT_EQUAL_64(38, x27);
+ // 3=3, 4=40, 5=500
+ ASSERT_EQUAL_64(17, x28);
+
+ TEARDOWN();
+}
+
+
+// This is a V8-specific test.
+static void CopyFieldsHelper(CPURegList temps) {
+ static const uint64_t kLiteralBase = 0x0100001000100101UL;
+ static const uint64_t src[] = {kLiteralBase * 1,
+ kLiteralBase * 2,
+ kLiteralBase * 3,
+ kLiteralBase * 4,
+ kLiteralBase * 5,
+ kLiteralBase * 6,
+ kLiteralBase * 7,
+ kLiteralBase * 8,
+ kLiteralBase * 9,
+ kLiteralBase * 10,
+ kLiteralBase * 11};
+ static const uint64_t src_tagged =
+ reinterpret_cast<uint64_t>(src) + kHeapObjectTag;
+
+ static const unsigned kTestCount = sizeof(src) / sizeof(src[0]) + 1;
+ uint64_t* dst[kTestCount];
+ uint64_t dst_tagged[kTestCount];
+
+ // The first test will be to copy 0 fields. The destination (and source)
+ // should not be accessed in any way.
+ dst[0] = NULL;
+ dst_tagged[0] = kHeapObjectTag;
+
+ // Allocate memory for each other test. Each test <n> will have <n> fields.
+ // This is intended to exercise as many paths in CopyFields as possible.
+ for (unsigned i = 1; i < kTestCount; i++) {
+ dst[i] = new uint64_t[i];
+ memset(dst[i], 0, i * sizeof(kLiteralBase));
+ dst_tagged[i] = reinterpret_cast<uint64_t>(dst[i]) + kHeapObjectTag;
+ }
+
+ SETUP();
+ START();
+
+ __ Mov(x0, dst_tagged[0]);
+ __ Mov(x1, 0);
+ __ CopyFields(x0, x1, temps, 0);
+ for (unsigned i = 1; i < kTestCount; i++) {
+ __ Mov(x0, dst_tagged[i]);
+ __ Mov(x1, src_tagged);
+ __ CopyFields(x0, x1, temps, i);
+ }
+
+ END();
+ RUN();
+ TEARDOWN();
+
+ for (unsigned i = 1; i < kTestCount; i++) {
+ for (unsigned j = 0; j < i; j++) {
+ CHECK(src[j] == dst[i][j]);
+ }
+ delete [] dst[i];
+ }
+}
+
+
+// This is a V8-specific test.
+TEST(copyfields) {
+ INIT_V8();
+ CopyFieldsHelper(CPURegList(x10));
+ CopyFieldsHelper(CPURegList(x10, x11));
+ CopyFieldsHelper(CPURegList(x10, x11, x12));
+ CopyFieldsHelper(CPURegList(x10, x11, x12, x13));
+}
+
+
+static void DoSmiAbsTest(int32_t value, bool must_fail = false) {
+ SETUP();
+
+ START();
+ Label end, slow;
+ __ Mov(x2, 0xc001c0de);
+ __ Mov(x1, value);
+ __ SmiTag(x1);
+ __ SmiAbs(x1, &slow);
+ __ SmiUntag(x1);
+ __ B(&end);
+
+ __ Bind(&slow);
+ __ Mov(x2, 0xbad);
+
+ __ Bind(&end);
+ END();
+
+ RUN();
+
+ if (must_fail) {
+ // We tested an invalid conversion. The code must have jump on slow.
+ ASSERT_EQUAL_64(0xbad, x2);
+ } else {
+ // The conversion is valid, check the result.
+ int32_t result = (value >= 0) ? value : -value;
+ ASSERT_EQUAL_64(result, x1);
+
+ // Check that we didn't jump on slow.
+ ASSERT_EQUAL_64(0xc001c0de, x2);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(smi_abs) {
+ INIT_V8();
+ // Simple and edge cases.
+ DoSmiAbsTest(0);
+ DoSmiAbsTest(0x12345);
+ DoSmiAbsTest(0x40000000);
+ DoSmiAbsTest(0x7fffffff);
+ DoSmiAbsTest(-1);
+ DoSmiAbsTest(-12345);
+ DoSmiAbsTest(0x80000001);
+
+ // Check that the most negative SMI is detected.
+ DoSmiAbsTest(0x80000000, true);
+}
+
+
+TEST(blr_lr) {
+ // A simple test to check that the simulator correcty handle "blr lr".
+ INIT_V8();
+ SETUP();
+
+ START();
+ Label target;
+ Label end;
+
+ __ Mov(x0, 0x0);
+ __ Adr(lr, &target);
+
+ __ Blr(lr);
+ __ Mov(x0, 0xdeadbeef);
+ __ B(&end);
+
+ __ Bind(&target);
+ __ Mov(x0, 0xc001c0de);
+
+ __ Bind(&end);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xc001c0de, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(barriers) {
+ // Generate all supported barriers, this is just a smoke test
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ // DMB
+ __ Dmb(FullSystem, BarrierAll);
+ __ Dmb(FullSystem, BarrierReads);
+ __ Dmb(FullSystem, BarrierWrites);
+ __ Dmb(FullSystem, BarrierOther);
+
+ __ Dmb(InnerShareable, BarrierAll);
+ __ Dmb(InnerShareable, BarrierReads);
+ __ Dmb(InnerShareable, BarrierWrites);
+ __ Dmb(InnerShareable, BarrierOther);
+
+ __ Dmb(NonShareable, BarrierAll);
+ __ Dmb(NonShareable, BarrierReads);
+ __ Dmb(NonShareable, BarrierWrites);
+ __ Dmb(NonShareable, BarrierOther);
+
+ __ Dmb(OuterShareable, BarrierAll);
+ __ Dmb(OuterShareable, BarrierReads);
+ __ Dmb(OuterShareable, BarrierWrites);
+ __ Dmb(OuterShareable, BarrierOther);
+
+ // DSB
+ __ Dsb(FullSystem, BarrierAll);
+ __ Dsb(FullSystem, BarrierReads);
+ __ Dsb(FullSystem, BarrierWrites);
+ __ Dsb(FullSystem, BarrierOther);
+
+ __ Dsb(InnerShareable, BarrierAll);
+ __ Dsb(InnerShareable, BarrierReads);
+ __ Dsb(InnerShareable, BarrierWrites);
+ __ Dsb(InnerShareable, BarrierOther);
+
+ __ Dsb(NonShareable, BarrierAll);
+ __ Dsb(NonShareable, BarrierReads);
+ __ Dsb(NonShareable, BarrierWrites);
+ __ Dsb(NonShareable, BarrierOther);
+
+ __ Dsb(OuterShareable, BarrierAll);
+ __ Dsb(OuterShareable, BarrierReads);
+ __ Dsb(OuterShareable, BarrierWrites);
+ __ Dsb(OuterShareable, BarrierOther);
+
+ // ISB
+ __ Isb();
+
+ END();
+
+ RUN();
+
+ TEARDOWN();
+}
+
+
+TEST(call_no_relocation) {
+ Address call_start;
+ Address return_address;
+
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ Label function;
+ Label test;
+
+ __ B(&test);
+
+ __ Bind(&function);
+ __ Mov(x0, 0x1);
+ __ Ret();
+
+ __ Bind(&test);
+ __ Mov(x0, 0x0);
+ __ Push(lr, xzr);
+ {
+ Assembler::BlockConstPoolScope scope(&masm);
+ call_start = buf + __ pc_offset();
+ __ Call(buf + function.pos(), RelocInfo::NONE64);
+ return_address = buf + __ pc_offset();
+ }
+ __ Pop(xzr, lr);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+
+ // The return_address_from_call_start function doesn't currently encounter any
+ // non-relocatable sequences, so we check it here to make sure it works.
+ // TODO(jbramley): Once Crankshaft is complete, decide if we need to support
+ // non-relocatable calls at all.
+ CHECK(return_address ==
+ Assembler::return_address_from_call_start(call_start));
+
+ TEARDOWN();
+}
+
+
+static void AbsHelperX(int64_t value) {
+ int64_t expected;
+
+ SETUP();
+ START();
+
+ Label fail;
+ Label done;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, value);
+
+ if (value != kXMinInt) {
+ expected = labs(value);
+
+ Label next;
+ // The result is representable.
+ __ Abs(x10, x1);
+ __ Abs(x11, x1, &fail);
+ __ Abs(x12, x1, &fail, &next);
+ __ Bind(&next);
+ __ Abs(x13, x1, NULL, &done);
+ } else {
+ // labs is undefined for kXMinInt but our implementation in the
+ // MacroAssembler will return kXMinInt in such a case.
+ expected = kXMinInt;
+
+ Label next;
+ // The result is not representable.
+ __ Abs(x10, x1);
+ __ Abs(x11, x1, NULL, &fail);
+ __ Abs(x12, x1, &next, &fail);
+ __ Bind(&next);
+ __ Abs(x13, x1, &done);
+ }
+
+ __ Bind(&fail);
+ __ Mov(x0, -1);
+
+ __ Bind(&done);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(value, x1);
+ ASSERT_EQUAL_64(expected, x10);
+ ASSERT_EQUAL_64(expected, x11);
+ ASSERT_EQUAL_64(expected, x12);
+ ASSERT_EQUAL_64(expected, x13);
+
+ TEARDOWN();
+}
+
+
+static void AbsHelperW(int32_t value) {
+ int32_t expected;
+
+ SETUP();
+ START();
+
+ Label fail;
+ Label done;
+
+ __ Mov(w0, 0);
+ // TODO(jbramley): The cast is needed to avoid a sign-extension bug in VIXL.
+ // Once it is fixed, we should remove the cast.
+ __ Mov(w1, static_cast<uint32_t>(value));
+
+ if (value != kWMinInt) {
+ expected = abs(value);
+
+ Label next;
+ // The result is representable.
+ __ Abs(w10, w1);
+ __ Abs(w11, w1, &fail);
+ __ Abs(w12, w1, &fail, &next);
+ __ Bind(&next);
+ __ Abs(w13, w1, NULL, &done);
+ } else {
+ // abs is undefined for kWMinInt but our implementation in the
+ // MacroAssembler will return kWMinInt in such a case.
+ expected = kWMinInt;
+
+ Label next;
+ // The result is not representable.
+ __ Abs(w10, w1);
+ __ Abs(w11, w1, NULL, &fail);
+ __ Abs(w12, w1, &next, &fail);
+ __ Bind(&next);
+ __ Abs(w13, w1, &done);
+ }
+
+ __ Bind(&fail);
+ __ Mov(w0, -1);
+
+ __ Bind(&done);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_32(0, w0);
+ ASSERT_EQUAL_32(value, w1);
+ ASSERT_EQUAL_32(expected, w10);
+ ASSERT_EQUAL_32(expected, w11);
+ ASSERT_EQUAL_32(expected, w12);
+ ASSERT_EQUAL_32(expected, w13);
+
+ TEARDOWN();
+}
+
+
+TEST(abs) {
+ INIT_V8();
+ AbsHelperX(0);
+ AbsHelperX(42);
+ AbsHelperX(-42);
+ AbsHelperX(kXMinInt);
+ AbsHelperX(kXMaxInt);
+
+ AbsHelperW(0);
+ AbsHelperW(42);
+ AbsHelperW(-42);
+ AbsHelperW(kWMinInt);
+ AbsHelperW(kWMaxInt);
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 69ea6f4742..b21dc34dc4 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -1439,6 +1439,72 @@ TEST(17) {
}
+#define TEST_SDIV(expected_, dividend_, divisor_) \
+ t.dividend = dividend_; \
+ t.divisor = divisor_; \
+ t.result = 0; \
+ dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0); \
+ CHECK_EQ(expected_, t.result);
+
+
+TEST(18) {
+ // Test the sdiv.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ uint32_t dividend;
+ uint32_t divisor;
+ uint32_t result;
+ } T;
+ T t;
+
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(&assm, SUDIV);
+
+ __ mov(r3, Operand(r0));
+
+ __ ldr(r0, MemOperand(r3, OFFSET_OF(T, dividend)));
+ __ ldr(r1, MemOperand(r3, OFFSET_OF(T, divisor)));
+
+ __ sdiv(r2, r0, r1);
+ __ str(r2, MemOperand(r3, OFFSET_OF(T, result)));
+
+ __ bx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ Object* dummy;
+ TEST_SDIV(1073741824, kMinInt, -2);
+ TEST_SDIV(kMinInt, kMinInt, -1);
+ TEST_SDIV(5, 10, 2);
+ TEST_SDIV(3, 10, 3);
+ TEST_SDIV(-5, 10, -2);
+ TEST_SDIV(-3, 10, -3);
+ TEST_SDIV(-5, -10, 2);
+ TEST_SDIV(-3, -10, 3);
+ TEST_SDIV(5, -10, -2);
+ TEST_SDIV(3, -10, -3);
+ USE(dummy);
+ }
+}
+
+
+#undef TEST_SDIV
+
+
TEST(code_relative_offset) {
// Test extracting the offset of a label from the beginning of the code
// in a register.
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index d40156841e..db28231d63 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -522,7 +522,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
F0 f = FUNCTION_CAST<F0>(Code::cast(code)->entry());
int res = f();
- args.GetReturnValue().Set(v8::Integer::New(res));
+ args.GetReturnValue().Set(v8::Integer::New(CcTest::isolate(), res));
}
@@ -534,8 +534,10 @@ TEST(StackAlignmentForSSE2) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
- global_template->Set(v8_str("do_sse2"), v8::FunctionTemplate::New(DoSSE2));
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
+ global_template->Set(v8_str("do_sse2"),
+ v8::FunctionTemplate::New(isolate, DoSSE2));
LocalContext env(NULL, global_template);
CompileRun(
@@ -548,7 +550,7 @@ TEST(StackAlignmentForSSE2) {
v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
int32_t vec[ELEMENT_COUNT] = { -1, 1, 1, 1 };
- v8::Local<v8::Array> v8_vec = v8::Array::New(ELEMENT_COUNT);
+ v8::Local<v8::Array> v8_vec = v8::Array::New(isolate, ELEMENT_COUNT);
for (int i = 0; i < ELEMENT_COUNT; i++) {
v8_vec->Set(i, v8_num(vec[i]));
}
@@ -599,4 +601,44 @@ TEST(AssemblerIa32Extractps) {
}
+typedef int (*F8)(float x, float y);
+TEST(AssemblerIa32SSE) {
+ CcTest::InitializeVM();
+ if (!CpuFeatures::IsSupported(SSE2)) return;
+
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[256];
+ MacroAssembler assm(isolate, buffer, sizeof buffer);
+ {
+ CpuFeatureScope fscope(&assm, SSE2);
+ __ movss(xmm0, Operand(esp, kPointerSize));
+ __ movss(xmm1, Operand(esp, 2 * kPointerSize));
+ __ shufps(xmm0, xmm0, 0x0);
+ __ shufps(xmm1, xmm1, 0x0);
+ __ movaps(xmm2, xmm1);
+ __ addps(xmm2, xmm0);
+ __ mulps(xmm2, xmm1);
+ __ subps(xmm2, xmm0);
+ __ divps(xmm2, xmm1);
+ __ cvttss2si(eax, xmm2);
+ __ ret(0);
+ }
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Code* code = Code::cast(isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked());
+ CHECK(code->IsCode());
+#ifdef OBJECT_PRINT
+ Code::cast(code)->Print();
+#endif
+
+ F8 f = FUNCTION_CAST<F8>(Code::cast(code)->entry());
+ CHECK_EQ(2, f(1.0, 2.0));
+}
+
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index e8e724c052..534c4cf0ae 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -765,10 +765,8 @@ TEST(MIPS10) {
double b;
int32_t dbl_mant;
int32_t dbl_exp;
- int32_t long_hi;
- int32_t long_lo;
- int32_t b_long_hi;
- int32_t b_long_lo;
+ int32_t word;
+ int32_t b_word;
} T;
T t;
@@ -786,18 +784,14 @@ TEST(MIPS10) {
__ sw(t1, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
// Convert double in f0 to long, save hi/lo parts.
- __ cvt_l_d(f0, f0);
- __ mfc1(t0, f0); // f0 has LS 32 bits of long.
- __ mfc1(t1, f1); // f1 has MS 32 bits of long.
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, long_lo)));
- __ sw(t1, MemOperand(a0, OFFSET_OF(T, long_hi)));
+ __ cvt_w_d(f0, f0);
+ __ mfc1(t0, f0); // f0 has a 32-bits word.
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, word)));
// Convert the b long integers to double b.
- __ lw(t0, MemOperand(a0, OFFSET_OF(T, b_long_lo)));
- __ lw(t1, MemOperand(a0, OFFSET_OF(T, b_long_hi)));
- __ mtc1(t0, f8); // f8 has LS 32-bits.
- __ mtc1(t1, f9); // f9 has MS 32-bits.
- __ cvt_d_l(f10, f8);
+ __ lw(t0, MemOperand(a0, OFFSET_OF(T, b_word)));
+ __ mtc1(t0, f8); // f8 has a 32-bits word.
+ __ cvt_d_w(f10, f8);
__ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
__ jr(ra);
@@ -811,18 +805,16 @@ TEST(MIPS10) {
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.a = 2.147483647e9; // 0x7fffffff -> 0x41DFFFFFFFC00000 as double.
- t.b_long_hi = 0x000000ff; // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
- t.b_long_lo = 0x00ff00ff;
+ t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
+ t.b_word = 0x0ff00ff0; // 0x0FF00FF0 -> 0x as double.
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(0x41DFFFFF, t.dbl_exp);
- CHECK_EQ(0xFFC00000, t.dbl_mant);
- CHECK_EQ(0, t.long_hi);
- CHECK_EQ(0x7fffffff, t.long_lo);
- // 0xFF00FF00FF -> 1.095233372415e12.
- CHECK_EQ(1.095233372415e12, t.b);
+ CHECK_EQ(0xFF800000, t.dbl_mant);
+ CHECK_EQ(0X7FFFFFFE, t.word);
+ // 0x0FF00FF0 -> 2.6739096+e08
+ CHECK_EQ(2.6739096e08, t.b);
}
}
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index cd1ed2823b..4fa5ffecb4 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -179,10 +179,10 @@ TEST(AssemblerX64XchglOperations) {
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
__ movq(rax, Operand(arg1, 0));
- __ movq(rbx, Operand(arg2, 0));
- __ xchgl(rax, rbx);
+ __ movq(r11, Operand(arg2, 0));
+ __ xchgl(rax, r11);
__ movq(Operand(arg1, 0), rax);
- __ movq(Operand(arg2, 0), rbx);
+ __ movq(Operand(arg2, 0), r11);
__ ret(0);
CodeDesc desc;
@@ -279,8 +279,8 @@ TEST(AssemblerX64TestlOperations) {
// Set rax with the ZF flag of the testl instruction.
Label done;
__ movq(rax, Immediate(1));
- __ movq(rbx, Operand(arg2, 0));
- __ testl(Operand(arg1, 0), rbx);
+ __ movq(r11, Operand(arg2, 0));
+ __ testl(Operand(arg1, 0), r11);
__ j(zero, &done, Label::kNear);
__ movq(rax, Immediate(0));
__ bind(&done);
@@ -604,7 +604,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
- args.GetReturnValue().Set(v8::Integer::New(res));
+ args.GetReturnValue().Set(v8::Integer::New(CcTest::isolate(), res));
}
@@ -614,8 +614,10 @@ TEST(StackAlignmentForSSE2) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
- global_template->Set(v8_str("do_sse2"), v8::FunctionTemplate::New(DoSSE2));
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
+ global_template->Set(v8_str("do_sse2"),
+ v8::FunctionTemplate::New(isolate, DoSSE2));
LocalContext env(NULL, global_template);
CompileRun(
@@ -628,7 +630,7 @@ TEST(StackAlignmentForSSE2) {
v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
int32_t vec[ELEMENT_COUNT] = { -1, 1, 1, 1 };
- v8::Local<v8::Array> v8_vec = v8::Array::New(ELEMENT_COUNT);
+ v8::Local<v8::Array> v8_vec = v8::Array::New(isolate, ELEMENT_COUNT);
for (int i = 0; i < ELEMENT_COUNT; i++) {
v8_vec->Set(i, v8_num(vec[i]));
}
@@ -676,4 +678,38 @@ TEST(AssemblerX64Extractps) {
}
+typedef int (*F6)(float x, float y);
+TEST(AssemblerX64SSE) {
+ CcTest::InitializeVM();
+
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[256];
+ MacroAssembler assm(isolate, buffer, sizeof buffer);
+ {
+ __ shufps(xmm0, xmm0, 0x0); // brocast first argument
+ __ shufps(xmm1, xmm1, 0x0); // brocast second argument
+ __ movaps(xmm2, xmm1);
+ __ addps(xmm2, xmm0);
+ __ mulps(xmm2, xmm1);
+ __ subps(xmm2, xmm0);
+ __ divps(xmm2, xmm1);
+ __ cvttss2si(rax, xmm2);
+ __ ret(0);
+ }
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Code* code = Code::cast(isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked());
+ CHECK(code->IsCode());
+#ifdef OBJECT_PRINT
+ Code::cast(code)->Print();
+#endif
+
+ F6 f = FUNCTION_CAST<F6>(Code::cast(code)->entry());
+ CHECK_EQ(2, f(1.0, 2.0));
+}
#undef __
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index 299f2a8960..d6431371aa 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -41,7 +41,7 @@ TEST(List) {
Isolate* isolate = CcTest::i_isolate();
Zone zone(isolate);
- AstNodeFactory<AstNullVisitor> factory(isolate, &zone);
+ AstNodeFactory<AstNullVisitor> factory(&zone);
AstNode* node = factory.NewEmptyStatement(RelocInfo::kNoPosition);
list->Add(node);
CHECK_EQ(1, list->length());
diff --git a/deps/v8/test/cctest/test-code-stubs-a64.cc b/deps/v8/test/cctest/test-code-stubs-a64.cc
new file mode 100644
index 0000000000..9416b19dda
--- /dev/null
+++ b/deps/v8/test/cctest/test-code-stubs-a64.cc
@@ -0,0 +1,188 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Rrdistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Rrdistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Rrdistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "code-stubs.h"
+#include "test-code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+#include "simulator.h"
+
+using namespace v8::internal;
+
+#define __ masm.
+
+ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
+ Register source_reg,
+ Register destination_reg,
+ bool inline_fastpath) {
+ // Allocate an executable page of memory.
+ size_t actual_size = 2 * Assembler::kMinimalBufferSize;
+ byte* buffer = static_cast<byte*>(OS::Allocate(actual_size,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles(isolate);
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
+ DoubleToIStub stub(source_reg, destination_reg, 0, true, inline_fastpath);
+
+ byte* start = stub.GetCode(isolate)->instruction_start();
+ Label done;
+
+ __ SetStackPointer(csp);
+ __ PushCalleeSavedRegisters();
+ __ Mov(jssp, csp);
+ __ SetStackPointer(jssp);
+
+ // Push the double argument.
+ __ Push(d0);
+ if (!source_reg.is(jssp)) {
+ __ Mov(source_reg, jssp);
+ }
+
+ // Save registers make sure they don't get clobbered.
+ int source_reg_offset = kDoubleSize;
+ int reg_num = 0;
+ for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(destination_reg)) {
+ __ Push(reg);
+ source_reg_offset += kPointerSize;
+ }
+ }
+
+ // Re-push the double argument.
+ __ Push(d0);
+
+ // Call through to the actual stub
+ if (inline_fastpath) {
+ __ Ldr(d0, MemOperand(source_reg));
+ __ TryInlineTruncateDoubleToI(destination_reg, d0, &done);
+ if (destination_reg.is(source_reg)) {
+ // Restore clobbered source_reg.
+ __ add(source_reg, jssp, Operand(source_reg_offset));
+ }
+ }
+ __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
+ __ bind(&done);
+
+ __ Drop(1, kDoubleSize);
+
+ // // Make sure no registers have been unexpectedly clobbered
+ for (--reg_num; reg_num >= 0; --reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(destination_reg)) {
+ __ Pop(ip0);
+ __ cmp(reg, ip0);
+ __ Assert(eq, kRegisterWasClobbered);
+ }
+ }
+
+ __ Drop(1, kDoubleSize);
+
+ if (!destination_reg.is(x0))
+ __ Mov(x0, destination_reg);
+
+ // Restore callee save registers.
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+ __ PopCalleeSavedRegisters();
+
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ CPU::FlushICache(buffer, actual_size);
+ return (reinterpret_cast<ConvertDToIFunc>(
+ reinterpret_cast<intptr_t>(buffer)));
+}
+
+#undef __
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
+ double from) {
+#ifdef USE_SIMULATOR
+ Simulator::CallArgument args[] = {
+ Simulator::CallArgument(from),
+ Simulator::CallArgument::End()
+ };
+ return Simulator::current(Isolate::Current())->CallInt64(
+ FUNCTION_ADDR(func), args);
+#else
+ return (*func)(from);
+#endif
+}
+
+
+TEST(ConvertDToI) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+#if DEBUG
+ // Verify that the tests actually work with the C version. In the release
+ // code, the compiler optimizes it away because it's all constant, but does it
+ // wrong, triggering an assert on gcc.
+ RunAllTruncationTests(&ConvertDToICVersion);
+#endif
+
+ Register source_registers[] = {jssp, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9,
+ x10, x11, x12, x13, x14, x15, x18, x19, x20,
+ x21, x22, x23, x24};
+ Register dest_registers[] = {x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11,
+ x12, x13, x14, x15, x18, x19, x20, x21, x22, x23,
+ x24};
+
+ for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d],
+ false));
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d],
+ true));
+ }
+ }
+}
diff --git a/deps/v8/test/cctest/test-code-stubs-arm.cc b/deps/v8/test/cctest/test-code-stubs-arm.cc
index 54eaa58318..53cdd16130 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm.cc
@@ -53,7 +53,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
- masm.set_allow_stub_calls(false);
DoubleToIStub stub(source_reg, destination_reg, 0, true, inline_fastpath);
byte* start = stub.GetCode(isolate)->instruction_start();
diff --git a/deps/v8/test/cctest/test-code-stubs-ia32.cc b/deps/v8/test/cctest/test-code-stubs-ia32.cc
index 3f621758e3..c206a0102a 100644
--- a/deps/v8/test/cctest/test-code-stubs-ia32.cc
+++ b/deps/v8/test/cctest/test-code-stubs-ia32.cc
@@ -53,7 +53,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
- assm.set_allow_stub_calls(false);
int offset =
source_reg.is(esp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
DoubleToIStub stub(source_reg, destination_reg, offset, true);
@@ -73,7 +72,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save registers make sure they don't get clobbered.
int reg_num = 0;
for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
- Register reg = Register::from_code(reg_num);
+ Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ push(reg);
param_offset += kPointerSize;
@@ -91,7 +90,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 0; --reg_num) {
- Register reg = Register::from_code(reg_num);
+ Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ cmp(reg, MemOperand(esp, 0));
__ Assert(equal, kRegisterWasClobbered);
diff --git a/deps/v8/test/cctest/test-code-stubs-mips.cc b/deps/v8/test/cctest/test-code-stubs-mips.cc
new file mode 100644
index 0000000000..8dce89694e
--- /dev/null
+++ b/deps/v8/test/cctest/test-code-stubs-mips.cc
@@ -0,0 +1,188 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Rrdistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Rrdistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Rrdistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "code-stubs.h"
+#include "test-code-stubs.h"
+#include "mips/constants-mips.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+#include "simulator.h"
+
+using namespace v8::internal;
+
+#define __ masm.
+
+ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
+ Register source_reg,
+ Register destination_reg,
+ bool inline_fastpath) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles(isolate);
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
+ DoubleToIStub stub(source_reg, destination_reg, 0, true, inline_fastpath);
+
+ byte* start = stub.GetCode(isolate)->instruction_start();
+ Label done;
+
+ // Save callee save registers.
+ __ MultiPush(kCalleeSaved | ra.bit());
+
+ // For softfp, move the input value into f12.
+ if (IsMipsSoftFloatABI) {
+ __ Move(f12, a0, a1);
+ }
+ // Push the double argument.
+ __ Subu(sp, sp, Operand(kDoubleSize));
+ __ sdc1(f12, MemOperand(sp));
+ __ Move(source_reg, sp);
+
+ // Save registers make sure they don't get clobbered.
+ int source_reg_offset = kDoubleSize;
+ int reg_num = 2;
+ for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(destination_reg)) {
+ __ push(reg);
+ source_reg_offset += kPointerSize;
+ }
+ }
+
+ // Re-push the double argument.
+ __ Subu(sp, sp, Operand(kDoubleSize));
+ __ sdc1(f12, MemOperand(sp));
+
+ // Call through to the actual stub
+ if (inline_fastpath) {
+ __ ldc1(f12, MemOperand(source_reg));
+ __ TryInlineTruncateDoubleToI(destination_reg, f12, &done);
+ if (destination_reg.is(source_reg) && !source_reg.is(sp)) {
+ // Restore clobbered source_reg.
+ __ Addu(source_reg, sp, Operand(source_reg_offset));
+ }
+ }
+ __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
+ __ bind(&done);
+
+ __ Addu(sp, sp, Operand(kDoubleSize));
+
+ // Make sure no registers have been unexpectedly clobbered
+ for (--reg_num; reg_num >= 2; --reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(destination_reg)) {
+ __ lw(at, MemOperand(sp, 0));
+ __ Assert(eq, kRegisterWasClobbered, reg, Operand(at));
+ __ Addu(sp, sp, Operand(kPointerSize));
+ }
+ }
+
+ __ Addu(sp, sp, Operand(kDoubleSize));
+
+ __ Move(v0, destination_reg);
+ Label ok;
+ __ Branch(&ok, eq, v0, Operand(zero_reg));
+ __ bind(&ok);
+
+ // Restore callee save registers.
+ __ MultiPop(kCalleeSaved | ra.bit());
+
+ Label ok1;
+ __ Branch(&ok1, eq, v0, Operand(zero_reg));
+ __ bind(&ok1);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ CPU::FlushICache(buffer, actual_size);
+ return (reinterpret_cast<ConvertDToIFunc>(
+ reinterpret_cast<intptr_t>(buffer)));
+}
+
+#undef __
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
+ double from) {
+#ifdef USE_SIMULATOR
+ Simulator::current(Isolate::Current())->CallFP(FUNCTION_ADDR(func), from, 0.);
+ return Simulator::current(Isolate::Current())->get_register(v0.code());
+#else
+ return (*func)(from);
+#endif
+}
+
+
+TEST(ConvertDToI) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+#if DEBUG
+ // Verify that the tests actually work with the C version. In the release
+ // code, the compiler optimizes it away because it's all constant, but does it
+ // wrong, triggering an assert on gcc.
+ RunAllTruncationTests(&ConvertDToICVersion);
+#endif
+
+ Register source_registers[] = {
+ sp, v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5};
+ Register dest_registers[] = {
+ v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5};
+
+ for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d],
+ false));
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d],
+ true));
+ }
+ }
+}
diff --git a/deps/v8/test/cctest/test-code-stubs-x64.cc b/deps/v8/test/cctest/test-code-stubs-x64.cc
index 4af5b45d7c..6b3a12ccc8 100644
--- a/deps/v8/test/cctest/test-code-stubs-x64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x64.cc
@@ -52,7 +52,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
- assm.set_allow_stub_calls(false);
int offset =
source_reg.is(rsp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
DoubleToIStub stub(source_reg, destination_reg, offset, true);
@@ -65,21 +64,25 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ push(rdi);
if (!source_reg.is(rsp)) {
- __ lea(source_reg, MemOperand(rsp, -8 * kPointerSize - offset));
+ // The argument we pass to the stub is not a heap number, but instead
+ // stack-allocated and offset-wise made to look like a heap number for
+ // the stub. We create that "heap number" after pushing all allocatable
+ // registers.
+ int double_argument_slot =
+ (Register::NumAllocatableRegisters() - 1) * kPointerSize + kDoubleSize;
+ __ lea(source_reg, MemOperand(rsp, -double_argument_slot - offset));
}
- int param_offset = 7 * kPointerSize;
// Save registers make sure they don't get clobbered.
int reg_num = 0;
for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
- Register reg = Register::from_code(reg_num);
+ Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
__ push(reg);
- param_offset += kPointerSize;
}
}
- // Re-push the double argument
+ // Put the double argument into the designated double argument slot.
__ subq(rsp, Immediate(kDoubleSize));
__ movsd(MemOperand(rsp, 0), xmm0);
@@ -90,7 +93,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 0; --reg_num) {
- Register reg = Register::from_code(reg_num);
+ Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
__ cmpq(reg, MemOperand(rsp, 0));
__ Assert(equal, kRegisterWasClobbered);
diff --git a/deps/v8/test/cctest/test-code-stubs.cc b/deps/v8/test/cctest/test-code-stubs.cc
index db00e9ac5a..999febf777 100644
--- a/deps/v8/test/cctest/test-code-stubs.cc
+++ b/deps/v8/test/cctest/test-code-stubs.cc
@@ -49,6 +49,9 @@ int STDCALL ConvertDToICVersion(double d) {
int32_t exponent = (((exponent_bits & shifted_mask) >>
(Double::kPhysicalSignificandSize - 32)) -
HeapNumber::kExponentBias);
+ if (exponent < 0) {
+ return 0;
+ }
uint32_t unsigned_exponent = static_cast<uint32_t>(exponent);
int result = 0;
uint32_t max_exponent =
@@ -113,10 +116,27 @@ void RunAllTruncationTests(ConvertDToICallWrapper callWrapper,
RunOneTruncationTest(Infinity, 0);
RunOneTruncationTest(-NaN, 0);
RunOneTruncationTest(-Infinity, 0);
-
- RunOneTruncationTest(4.5036e+15, 0x1635E000);
+ RunOneTruncationTest(4.94065645841e-324, 0);
+ RunOneTruncationTest(-4.94065645841e-324, 0);
+
+ RunOneTruncationTest(0.9999999999999999, 0);
+ RunOneTruncationTest(-0.9999999999999999, 0);
+ RunOneTruncationTest(4294967296.0, 0);
+ RunOneTruncationTest(-4294967296.0, 0);
+ RunOneTruncationTest(9223372036854775000.0, 4294966272.0);
+ RunOneTruncationTest(-9223372036854775000.0, -4294966272.0);
+ RunOneTruncationTest(4.5036e+15, 372629504);
RunOneTruncationTest(-4.5036e+15, -372629504);
+ RunOneTruncationTest(287524199.5377777, 0x11234567);
+ RunOneTruncationTest(-287524199.5377777, -0x11234567);
+ RunOneTruncationTest(2300193596.302222, 2300193596.0);
+ RunOneTruncationTest(-2300193596.302222, -2300193596.0);
+ RunOneTruncationTest(4600387192.604444, 305419896);
+ RunOneTruncationTest(-4600387192.604444, -305419896);
+ RunOneTruncationTest(4823855600872397.0, 1737075661);
+ RunOneTruncationTest(-4823855600872397.0, -1737075661);
+
RunOneTruncationTest(4503603922337791.0, -1);
RunOneTruncationTest(-4503603922337791.0, 1);
RunOneTruncationTest(4503601774854143.0, 2147483647);
@@ -134,10 +154,19 @@ void RunAllTruncationTests(ConvertDToICallWrapper callWrapper,
RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
+ RunOneTruncationTest(2147483647.0, 2147483647.0);
+ RunOneTruncationTest(-2147483648.0, -2147483648.0);
RunOneTruncationTest(9.6714111686030497e+24, -2147483648.0);
RunOneTruncationTest(-9.6714111686030497e+24, -2147483648.0);
RunOneTruncationTest(9.6714157802890681e+24, -2147483648.0);
RunOneTruncationTest(-9.6714157802890681e+24, -2147483648.0);
+ RunOneTruncationTest(1.9342813113834065e+25, 2147483648.0);
+ RunOneTruncationTest(-1.9342813113834065e+25, 2147483648.0);
+
+ RunOneTruncationTest(3.868562622766813e+25, 0);
+ RunOneTruncationTest(-3.868562622766813e+25, 0);
+ RunOneTruncationTest(1.7976931348623157e+308, 0);
+ RunOneTruncationTest(-1.7976931348623157e+308, 0);
}
#undef NaN
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 9fd68e5222..ae414d7843 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -32,52 +32,10 @@
#include "compiler.h"
#include "disasm.h"
-#include "disassembler.h"
-#include "execution.h"
-#include "factory.h"
-#include "platform.h"
#include "cctest.h"
using namespace v8::internal;
-// --- P r i n t E x t e n s i o n ---
-
-class PrintExtension : public v8::Extension {
- public:
- PrintExtension() : v8::Extension("v8/print", kSource) { }
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<v8::String> name);
- static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
- private:
- static const char* kSource;
-};
-
-
-const char* PrintExtension::kSource = "native function print();";
-
-
-v8::Handle<v8::FunctionTemplate> PrintExtension::GetNativeFunction(
- v8::Handle<v8::String> str) {
- return v8::FunctionTemplate::New(PrintExtension::Print);
-}
-
-
-void PrintExtension::Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
- for (int i = 0; i < args.Length(); i++) {
- if (i != 0) printf(" ");
- v8::HandleScope scope(args.GetIsolate());
- v8::String::Utf8Value str(args[i]);
- if (*str == NULL) return;
- printf("%s", *str);
- }
- printf("\n");
-}
-
-
-static PrintExtension kPrintExtension;
-v8::DeclareExtension kPrintExtensionDeclaration(&kPrintExtension);
-
-
static MaybeObject* GetGlobalProperty(const char* name) {
Isolate* isolate = CcTest::i_isolate();
Handle<String> internalized_name =
@@ -92,7 +50,8 @@ static void SetGlobalProperty(const char* name, Object* value) {
Handle<String> internalized_name =
isolate->factory()->InternalizeUtf8String(name);
Handle<JSObject> global(isolate->context()->global_object());
- SetProperty(isolate, global, internalized_name, object, NONE, kNonStrictMode);
+ Runtime::SetObjectProperty(isolate, global, internalized_name, object, NONE,
+ kNonStrictMode);
}
@@ -101,16 +60,15 @@ static Handle<JSFunction> Compile(const char* source) {
Handle<String> source_code(
isolate->factory()->NewStringFromUtf8(CStrVector(source)));
Handle<SharedFunctionInfo> shared_function =
- Compiler::Compile(source_code,
- Handle<String>(),
- 0,
- 0,
- false,
- Handle<Context>(isolate->native_context()),
- NULL,
- NULL,
- Handle<String>::null(),
- NOT_NATIVES_CODE);
+ Compiler::CompileScript(source_code,
+ Handle<String>(),
+ 0,
+ 0,
+ false,
+ Handle<Context>(isolate->native_context()),
+ NULL, NULL,
+ Handle<String>::null(),
+ NOT_NATIVES_CODE);
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_function, isolate->native_context());
}
@@ -274,6 +232,7 @@ TEST(UncaughtThrow) {
// | JS |
// | C-to-JS |
TEST(C2JSFrames) {
+ FLAG_expose_gc = true;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> context =
CcTest::NewContext(PRINT_EXTENSION | GC_EXTENSION);
@@ -330,7 +289,8 @@ TEST(Regression236) {
TEST(GetScriptLineNumber) {
LocalContext context;
v8::HandleScope scope(CcTest::isolate());
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
+ v8::ScriptOrigin origin =
+ v8::ScriptOrigin(v8::String::NewFromUtf8(CcTest::isolate(), "test"));
const char function_f[] = "function f() {}";
const int max_rows = 1000;
const int buffer_size = max_rows + sizeof(function_f);
@@ -342,10 +302,12 @@ TEST(GetScriptLineNumber) {
if (i > 0)
buffer[i - 1] = '\n';
OS::MemCopy(&buffer[i], function_f, sizeof(function_f) - 1);
- v8::Handle<v8::String> script_body = v8::String::New(buffer.start());
+ v8::Handle<v8::String> script_body =
+ v8::String::NewFromUtf8(CcTest::isolate(), buffer.start());
v8::Script::Compile(script_body, &origin)->Run();
- v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- context->Global()->Get(v8::String::New("f")));
+ v8::Local<v8::Function> f =
+ v8::Local<v8::Function>::Cast(context->Global()->Get(
+ v8::String::NewFromUtf8(CcTest::isolate(), "f")));
CHECK_EQ(i, f->GetScriptLineNumber());
}
}
@@ -363,7 +325,8 @@ TEST(OptimizedCodeSharing) {
v8::HandleScope scope(CcTest::isolate());
for (int i = 0; i < 10; i++) {
LocalContext env;
- env->Global()->Set(v8::String::New("x"), v8::Integer::New(i));
+ env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
+ v8::Integer::New(CcTest::isolate(), i));
CompileRun("function MakeClosure() {"
" return function() { return x; };"
"}"
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 9ef307c6f4..3bba514391 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -31,6 +31,7 @@
#include "cpu-profiler-inl.h"
#include "cctest.h"
#include "platform.h"
+#include "profiler-extension.h"
#include "smart-pointers.h"
#include "utils.h"
#include "../include/v8-profiler.h"
@@ -59,10 +60,6 @@ TEST(StartStop) {
}
-static inline i::Address ToAddress(int n) {
- return reinterpret_cast<i::Address>(n);
-}
-
static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
i::Address frame1,
i::Address frame2 = NULL,
@@ -142,12 +139,12 @@ TEST(CodeEvents) {
i::Code* args4_code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate->heap());
- profiles->StartProfiling("", 1, false);
+ profiles->StartProfiling("", false);
ProfileGenerator generator(profiles);
SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
&generator, NULL, TimeDelta::FromMicroseconds(100)));
processor->Start();
- CpuProfiler profiler(isolate, profiles, &generator, *processor);
+ CpuProfiler profiler(isolate, profiles, &generator, processor.get());
// Enqueue code creation events.
const char* aaa_str = "aaa";
@@ -162,7 +159,7 @@ TEST(CodeEvents) {
profiler.CodeCreateEvent(i::Logger::STUB_TAG, args4_code, 4);
// Enqueue a tick event to enable code events processing.
- EnqueueTickSampleEvent(*processor, aaa_code->address());
+ EnqueueTickSampleEvent(processor.get(), aaa_code->address());
processor->StopSynchronously();
@@ -204,24 +201,24 @@ TEST(TickEvents) {
i::Code* frame3_code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate->heap());
- profiles->StartProfiling("", 1, false);
+ profiles->StartProfiling("", false);
ProfileGenerator generator(profiles);
SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
&generator, NULL, TimeDelta::FromMicroseconds(100)));
processor->Start();
- CpuProfiler profiler(isolate, profiles, &generator, *processor);
+ CpuProfiler profiler(isolate, profiles, &generator, processor.get());
profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame1_code, "bbb");
profiler.CodeCreateEvent(i::Logger::STUB_TAG, frame2_code, 5);
profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame3_code, "ddd");
- EnqueueTickSampleEvent(*processor, frame1_code->instruction_start());
+ EnqueueTickSampleEvent(processor.get(), frame1_code->instruction_start());
EnqueueTickSampleEvent(
- *processor,
+ processor.get(),
frame2_code->instruction_start() + frame2_code->ExecutableSize() / 2,
frame1_code->instruction_start() + frame2_code->ExecutableSize() / 2);
EnqueueTickSampleEvent(
- *processor,
+ processor.get(),
frame3_code->instruction_end() - 1,
frame2_code->instruction_end() - 1,
frame1_code->instruction_end() - 1);
@@ -273,12 +270,12 @@ TEST(Issue1398) {
i::Code* code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate->heap());
- profiles->StartProfiling("", 1, false);
+ profiles->StartProfiling("", false);
ProfileGenerator generator(profiles);
SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
&generator, NULL, TimeDelta::FromMicroseconds(100)));
processor->Start();
- CpuProfiler profiler(isolate, profiles, &generator, *processor);
+ CpuProfiler profiler(isolate, profiles, &generator, processor.get());
profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, code, "bbb");
@@ -336,16 +333,17 @@ TEST(DeleteAllCpuProfiles) {
}
-static const v8::CpuProfile* FindCpuProfile(v8::CpuProfiler* profiler,
- unsigned uid) {
- int length = profiler->GetProfileCount();
+static bool FindCpuProfile(v8::CpuProfiler* v8profiler,
+ const v8::CpuProfile* v8profile) {
+ i::CpuProfiler* profiler = reinterpret_cast<i::CpuProfiler*>(v8profiler);
+ const i::CpuProfile* profile =
+ reinterpret_cast<const i::CpuProfile*>(v8profile);
+ int length = profiler->GetProfilesCount();
for (int i = 0; i < length; i++) {
- const v8::CpuProfile* profile = profiler->GetCpuProfile(i);
- if (profile->GetUid() == uid) {
- return profile;
- }
+ if (profile == profiler->GetProfile(i))
+ return true;
}
- return NULL;
+ return false;
}
@@ -353,46 +351,38 @@ TEST(DeleteCpuProfile) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+ i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(cpu_profiler);
- CHECK_EQ(0, cpu_profiler->GetProfileCount());
- v8::Local<v8::String> name1 = v8::String::New("1");
+ CHECK_EQ(0, iprofiler->GetProfilesCount());
+ v8::Local<v8::String> name1 = v8::String::NewFromUtf8(env->GetIsolate(), "1");
cpu_profiler->StartCpuProfiling(name1);
const v8::CpuProfile* p1 = cpu_profiler->StopCpuProfiling(name1);
CHECK_NE(NULL, p1);
- CHECK_EQ(1, cpu_profiler->GetProfileCount());
- unsigned uid1 = p1->GetUid();
- CHECK_EQ(p1, FindCpuProfile(cpu_profiler, uid1));
+ CHECK_EQ(1, iprofiler->GetProfilesCount());
+ CHECK(FindCpuProfile(cpu_profiler, p1));
const_cast<v8::CpuProfile*>(p1)->Delete();
- CHECK_EQ(0, cpu_profiler->GetProfileCount());
- CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid1));
+ CHECK_EQ(0, iprofiler->GetProfilesCount());
- v8::Local<v8::String> name2 = v8::String::New("2");
+ v8::Local<v8::String> name2 = v8::String::NewFromUtf8(env->GetIsolate(), "2");
cpu_profiler->StartCpuProfiling(name2);
const v8::CpuProfile* p2 = cpu_profiler->StopCpuProfiling(name2);
CHECK_NE(NULL, p2);
- CHECK_EQ(1, cpu_profiler->GetProfileCount());
- unsigned uid2 = p2->GetUid();
- CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid2));
- CHECK_EQ(p2, FindCpuProfile(cpu_profiler, uid2));
- CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid1));
- v8::Local<v8::String> name3 = v8::String::New("3");
+ CHECK_EQ(1, iprofiler->GetProfilesCount());
+ CHECK(FindCpuProfile(cpu_profiler, p2));
+ v8::Local<v8::String> name3 = v8::String::NewFromUtf8(env->GetIsolate(), "3");
cpu_profiler->StartCpuProfiling(name3);
const v8::CpuProfile* p3 = cpu_profiler->StopCpuProfiling(name3);
CHECK_NE(NULL, p3);
- CHECK_EQ(2, cpu_profiler->GetProfileCount());
- unsigned uid3 = p3->GetUid();
- CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid3));
- CHECK_EQ(p3, FindCpuProfile(cpu_profiler, uid3));
- CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid1));
+ CHECK_EQ(2, iprofiler->GetProfilesCount());
+ CHECK_NE(p2, p3);
+ CHECK(FindCpuProfile(cpu_profiler, p3));
+ CHECK(FindCpuProfile(cpu_profiler, p2));
const_cast<v8::CpuProfile*>(p2)->Delete();
- CHECK_EQ(1, cpu_profiler->GetProfileCount());
- CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid2));
- CHECK_EQ(p3, FindCpuProfile(cpu_profiler, uid3));
+ CHECK_EQ(1, iprofiler->GetProfilesCount());
+ CHECK(!FindCpuProfile(cpu_profiler, p2));
+ CHECK(FindCpuProfile(cpu_profiler, p3));
const_cast<v8::CpuProfile*>(p3)->Delete();
- CHECK_EQ(0, cpu_profiler->GetProfileCount());
- CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid3));
- CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid2));
- CHECK_EQ(NULL, FindCpuProfile(cpu_profiler, uid1));
+ CHECK_EQ(0, iprofiler->GetProfilesCount());
}
@@ -401,7 +391,8 @@ TEST(ProfileStartEndTime) {
v8::HandleScope scope(env->GetIsolate());
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name = v8::String::New("test");
+ v8::Local<v8::String> profile_name =
+ v8::String::NewFromUtf8(env->GetIsolate(), "test");
cpu_profiler->StartCpuProfiling(profile_name);
const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
CHECK(profile->GetStartTime() <= profile->GetEndTime());
@@ -409,11 +400,12 @@ TEST(ProfileStartEndTime) {
static const v8::CpuProfile* RunProfiler(
- LocalContext& env, v8::Handle<v8::Function> function,
+ v8::Handle<v8::Context> env, v8::Handle<v8::Function> function,
v8::Handle<v8::Value> argv[], int argc,
unsigned min_js_samples) {
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name = v8::String::New("my_profile");
+ v8::Local<v8::String> profile_name =
+ v8::String::NewFromUtf8(env->GetIsolate(), "my_profile");
cpu_profiler->StartCpuProfiling(profile_name);
@@ -460,10 +452,11 @@ static void CheckChildrenNames(const v8::CpuProfileNode* node,
}
-static const v8::CpuProfileNode* FindChild(const v8::CpuProfileNode* node,
+static const v8::CpuProfileNode* FindChild(v8::Isolate* isolate,
+ const v8::CpuProfileNode* node,
const char* name) {
int count = node->GetChildrenCount();
- v8::Handle<v8::String> nameHandle = v8::String::New(name);
+ v8::Handle<v8::String> nameHandle = v8::String::NewFromUtf8(isolate, name);
for (int i = 0; i < count; i++) {
const v8::CpuProfileNode* child = node->GetChild(i);
if (nameHandle->Equals(child->GetFunctionName())) return child;
@@ -472,9 +465,10 @@ static const v8::CpuProfileNode* FindChild(const v8::CpuProfileNode* node,
}
-static const v8::CpuProfileNode* GetChild(const v8::CpuProfileNode* node,
+static const v8::CpuProfileNode* GetChild(v8::Isolate* isolate,
+ const v8::CpuProfileNode* node,
const char* name) {
- const v8::CpuProfileNode* result = FindChild(node, name);
+ const v8::CpuProfileNode* result = FindChild(isolate, node, name);
if (!result) {
char buffer[100];
i::OS::SNPrintF(Vector<char>(buffer, ARRAY_SIZE(buffer)),
@@ -485,11 +479,12 @@ static const v8::CpuProfileNode* GetChild(const v8::CpuProfileNode* node,
}
-static void CheckSimpleBranch(const v8::CpuProfileNode* node,
+static void CheckSimpleBranch(v8::Isolate* isolate,
+ const v8::CpuProfileNode* node,
const char* names[], int length) {
for (int i = 0; i < length; i++) {
const char* name = names[i];
- node = GetChild(node, name);
+ node = GetChild(isolate, node, name);
int expectedChildrenCount = (i == length - 1) ? 0 : 1;
CHECK_EQ(expectedChildrenCount, node->GetChildrenCount());
}
@@ -549,39 +544,48 @@ TEST(CollectCpuProfile) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Script::Compile(v8::String::New(cpu_profiler_test_source))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ cpu_profiler_test_source))->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("start")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
int32_t profiling_interval_ms = 200;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(profiling_interval_ms) };
+ v8::Handle<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), profiling_interval_ms)
+ };
const v8::CpuProfile* profile =
- RunProfiler(env, function, args, ARRAY_SIZE(args), 200);
+ RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 200);
function->Call(env->Global(), ARRAY_SIZE(args), args);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::New("start");
+ names[0] = v8::String::NewFromUtf8(
+ env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
+ ProfileGenerator::kProgramEntryName);
+ names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
CheckChildrenNames(root, names);
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ const v8::CpuProfileNode* startNode =
+ GetChild(env->GetIsolate(), root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
- const v8::CpuProfileNode* fooNode = GetChild(startNode, "foo");
+ const v8::CpuProfileNode* fooNode =
+ GetChild(env->GetIsolate(), startNode, "foo");
CHECK_EQ(3, fooNode->GetChildrenCount());
const char* barBranch[] = { "bar", "delay", "loop" };
- CheckSimpleBranch(fooNode, barBranch, ARRAY_SIZE(barBranch));
+ CheckSimpleBranch(env->GetIsolate(), fooNode, barBranch,
+ ARRAY_SIZE(barBranch));
const char* bazBranch[] = { "baz", "delay", "loop" };
- CheckSimpleBranch(fooNode, bazBranch, ARRAY_SIZE(bazBranch));
+ CheckSimpleBranch(env->GetIsolate(), fooNode, bazBranch,
+ ARRAY_SIZE(bazBranch));
const char* delayBranch[] = { "delay", "loop" };
- CheckSimpleBranch(fooNode, delayBranch, ARRAY_SIZE(delayBranch));
+ CheckSimpleBranch(env->GetIsolate(), fooNode, delayBranch,
+ ARRAY_SIZE(delayBranch));
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
@@ -610,41 +614,47 @@ TEST(SampleWhenFrameIsNotSetup) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Script::Compile(v8::String::New(cpu_profiler_test_source2))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(
+ env->GetIsolate(), cpu_profiler_test_source2))->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("start")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
int32_t repeat_count = 100;
#if defined(USE_SIMULATOR)
// Simulators are much slower.
repeat_count = 1;
#endif
- v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
+ v8::Handle<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), repeat_count)
+ };
const v8::CpuProfile* profile =
- RunProfiler(env, function, args, ARRAY_SIZE(args), 100);
+ RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::New("start");
+ names[0] = v8::String::NewFromUtf8(
+ env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
+ ProfileGenerator::kProgramEntryName);
+ names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
CheckChildrenNames(root, names);
- const v8::CpuProfileNode* startNode = FindChild(root, "start");
+ const v8::CpuProfileNode* startNode =
+ FindChild(env->GetIsolate(), root, "start");
// On slow machines there may be no meaningfull samples at all, skip the
// check there.
if (startNode && startNode->GetChildrenCount() > 0) {
CHECK_EQ(1, startNode->GetChildrenCount());
- const v8::CpuProfileNode* delayNode = GetChild(startNode, "delay");
+ const v8::CpuProfileNode* delayNode =
+ GetChild(env->GetIsolate(), startNode, "delay");
if (delayNode->GetChildrenCount() > 0) {
CHECK_EQ(1, delayNode->GetChildrenCount());
- GetChild(delayNode, "loop");
+ GetChild(env->GetIsolate(), delayNode, "loop");
}
}
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
@@ -710,38 +720,43 @@ class TestApiCallbacks {
// code.
TEST(NativeAccessorUninitializedIC) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
-
- v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
+ v8::Local<v8::FunctionTemplate> func_template =
+ v8::FunctionTemplate::New(isolate);
v8::Local<v8::ObjectTemplate> instance_template =
func_template->InstanceTemplate();
TestApiCallbacks accessors(100);
- v8::Local<v8::External> data = v8::External::New(&accessors);
+ v8::Local<v8::External> data =
+ v8::External::New(isolate, &accessors);
instance_template->SetAccessor(
- v8::String::New("foo"), &TestApiCallbacks::Getter,
- &TestApiCallbacks::Setter, data);
+ v8::String::NewFromUtf8(isolate, "foo"),
+ &TestApiCallbacks::Getter, &TestApiCallbacks::Setter, data);
v8::Local<v8::Function> func = func_template->GetFunction();
v8::Local<v8::Object> instance = func->NewInstance();
- env->Global()->Set(v8::String::New("instance"), instance);
+ env->Global()->Set(v8::String::NewFromUtf8(isolate, "instance"),
+ instance);
- v8::Script::Compile(v8::String::New(native_accessor_test_source))->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(isolate, native_accessor_test_source))
+ ->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("start")));
+ env->Global()->Get(v8::String::NewFromUtf8(isolate, "start")));
int32_t repeat_count = 1;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
+ v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
const v8::CpuProfile* profile =
- RunProfiler(env, function, args, ARRAY_SIZE(args), 180);
+ RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 180);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
- GetChild(startNode, "get foo");
- GetChild(startNode, "set foo");
+ const v8::CpuProfileNode* startNode =
+ GetChild(isolate, root, "start");
+ GetChild(isolate, startNode, "get foo");
+ GetChild(isolate, startNode, "set foo");
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
@@ -750,48 +765,55 @@ TEST(NativeAccessorUninitializedIC) {
// hot and to trigger optimizations.
TEST(NativeAccessorMonomorphicIC) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
-
- v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
+ v8::Local<v8::FunctionTemplate> func_template =
+ v8::FunctionTemplate::New(isolate);
v8::Local<v8::ObjectTemplate> instance_template =
func_template->InstanceTemplate();
TestApiCallbacks accessors(1);
- v8::Local<v8::External> data = v8::External::New(&accessors);
+ v8::Local<v8::External> data =
+ v8::External::New(isolate, &accessors);
instance_template->SetAccessor(
- v8::String::New("foo"), &TestApiCallbacks::Getter,
- &TestApiCallbacks::Setter, data);
+ v8::String::NewFromUtf8(isolate, "foo"),
+ &TestApiCallbacks::Getter, &TestApiCallbacks::Setter, data);
v8::Local<v8::Function> func = func_template->GetFunction();
v8::Local<v8::Object> instance = func->NewInstance();
- env->Global()->Set(v8::String::New("instance"), instance);
+ env->Global()->Set(v8::String::NewFromUtf8(isolate, "instance"),
+ instance);
- v8::Script::Compile(v8::String::New(native_accessor_test_source))->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(isolate, native_accessor_test_source))
+ ->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("start")));
+ env->Global()->Get(v8::String::NewFromUtf8(isolate, "start")));
{
// Make sure accessors ICs are in monomorphic state before starting
// profiling.
accessors.set_warming_up(true);
int32_t warm_up_iterations = 3;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(warm_up_iterations) };
+ v8::Handle<v8::Value> args[] = {
+ v8::Integer::New(isolate, warm_up_iterations)
+ };
function->Call(env->Global(), ARRAY_SIZE(args), args);
accessors.set_warming_up(false);
}
int32_t repeat_count = 100;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
+ v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
const v8::CpuProfile* profile =
- RunProfiler(env, function, args, ARRAY_SIZE(args), 200);
+ RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 200);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
- GetChild(startNode, "get foo");
- GetChild(startNode, "set foo");
+ const v8::CpuProfileNode* startNode =
+ GetChild(isolate, root, "start");
+ GetChild(isolate, startNode, "get foo");
+ GetChild(isolate, startNode, "set foo");
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
@@ -804,85 +826,105 @@ static const char* native_method_test_source = "function start(count) {\n"
TEST(NativeMethodUninitializedIC) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
TestApiCallbacks callbacks(100);
- v8::Local<v8::External> data = v8::External::New(&callbacks);
+ v8::Local<v8::External> data =
+ v8::External::New(isolate, &callbacks);
- v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
- func_template->SetClassName(v8::String::New("Test_InstanceCostructor"));
+ v8::Local<v8::FunctionTemplate> func_template =
+ v8::FunctionTemplate::New(isolate);
+ func_template->SetClassName(
+ v8::String::NewFromUtf8(isolate, "Test_InstanceCostructor"));
v8::Local<v8::ObjectTemplate> proto_template =
func_template->PrototypeTemplate();
- v8::Local<v8::Signature> signature = v8::Signature::New(func_template);
- proto_template->Set(v8::String::New("fooMethod"), v8::FunctionTemplate::New(
- &TestApiCallbacks::Callback, data, signature, 0));
+ v8::Local<v8::Signature> signature =
+ v8::Signature::New(isolate, func_template);
+ proto_template->Set(v8::String::NewFromUtf8(isolate, "fooMethod"),
+ v8::FunctionTemplate::New(isolate,
+ &TestApiCallbacks::Callback,
+ data, signature, 0));
v8::Local<v8::Function> func = func_template->GetFunction();
v8::Local<v8::Object> instance = func->NewInstance();
- env->Global()->Set(v8::String::New("instance"), instance);
+ env->Global()->Set(v8::String::NewFromUtf8(isolate, "instance"),
+ instance);
- v8::Script::Compile(v8::String::New(native_method_test_source))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(
+ isolate, native_method_test_source))->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("start")));
+ env->Global()->Get(v8::String::NewFromUtf8(isolate, "start")));
int32_t repeat_count = 1;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
+ v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
const v8::CpuProfile* profile =
- RunProfiler(env, function, args, ARRAY_SIZE(args), 100);
+ RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
- GetChild(startNode, "fooMethod");
+ const v8::CpuProfileNode* startNode =
+ GetChild(isolate, root, "start");
+ GetChild(isolate, startNode, "fooMethod");
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
TEST(NativeMethodMonomorphicIC) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
TestApiCallbacks callbacks(1);
- v8::Local<v8::External> data = v8::External::New(&callbacks);
+ v8::Local<v8::External> data =
+ v8::External::New(isolate, &callbacks);
- v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
- func_template->SetClassName(v8::String::New("Test_InstanceCostructor"));
+ v8::Local<v8::FunctionTemplate> func_template =
+ v8::FunctionTemplate::New(isolate);
+ func_template->SetClassName(
+ v8::String::NewFromUtf8(isolate, "Test_InstanceCostructor"));
v8::Local<v8::ObjectTemplate> proto_template =
func_template->PrototypeTemplate();
- v8::Local<v8::Signature> signature = v8::Signature::New(func_template);
- proto_template->Set(v8::String::New("fooMethod"), v8::FunctionTemplate::New(
- &TestApiCallbacks::Callback, data, signature, 0));
+ v8::Local<v8::Signature> signature =
+ v8::Signature::New(isolate, func_template);
+ proto_template->Set(v8::String::NewFromUtf8(isolate, "fooMethod"),
+ v8::FunctionTemplate::New(isolate,
+ &TestApiCallbacks::Callback,
+ data, signature, 0));
v8::Local<v8::Function> func = func_template->GetFunction();
v8::Local<v8::Object> instance = func->NewInstance();
- env->Global()->Set(v8::String::New("instance"), instance);
+ env->Global()->Set(v8::String::NewFromUtf8(isolate, "instance"),
+ instance);
- v8::Script::Compile(v8::String::New(native_method_test_source))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(
+ isolate, native_method_test_source))->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("start")));
+ env->Global()->Get(v8::String::NewFromUtf8(isolate, "start")));
{
// Make sure method ICs are in monomorphic state before starting
// profiling.
callbacks.set_warming_up(true);
int32_t warm_up_iterations = 3;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(warm_up_iterations) };
+ v8::Handle<v8::Value> args[] = {
+ v8::Integer::New(isolate, warm_up_iterations)
+ };
function->Call(env->Global(), ARRAY_SIZE(args), args);
callbacks.set_warming_up(false);
}
int32_t repeat_count = 100;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
+ v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
const v8::CpuProfile* profile =
- RunProfiler(env, function, args, ARRAY_SIZE(args), 100);
+ RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- GetChild(root, "start");
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
- GetChild(startNode, "fooMethod");
+ GetChild(isolate, root, "start");
+ const v8::CpuProfileNode* startNode =
+ GetChild(isolate, root, "start");
+ GetChild(isolate, startNode, "fooMethod");
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
@@ -904,28 +946,34 @@ TEST(BoundFunctionCall) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Script::Compile(v8::String::New(bound_function_test_source))->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), bound_function_test_source))
+ ->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("start")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
int32_t duration_ms = 100;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
+ v8::Handle<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), duration_ms)
+ };
const v8::CpuProfile* profile =
- RunProfiler(env, function, args, ARRAY_SIZE(args), 100);
+ RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::New("start");
+ names[0] = v8::String::NewFromUtf8(
+ env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
+ ProfileGenerator::kProgramEntryName);
+ names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
// Don't allow |foo| node to be at the top level.
CheckChildrenNames(root, names);
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
- GetChild(startNode, "foo");
+ const v8::CpuProfileNode* startNode =
+ GetChild(env->GetIsolate(), root, "start");
+ GetChild(env->GetIsolate(), startNode, "foo");
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
@@ -960,22 +1008,28 @@ TEST(FunctionCallSample) {
// Collect garbage that might have be generated while installing extensions.
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
- v8::Script::Compile(v8::String::New(call_function_test_source))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(
+ env->GetIsolate(), call_function_test_source))->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("start")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
int32_t duration_ms = 100;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
+ v8::Handle<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), duration_ms)
+ };
const v8::CpuProfile* profile =
- RunProfiler(env, function, args, ARRAY_SIZE(args), 100);
+ RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
ScopedVector<v8::Handle<v8::String> > names(4);
- names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::New("start");
- names[3] = v8::String::New(i::ProfileGenerator::kUnresolvedFunctionName);
+ names[0] = v8::String::NewFromUtf8(
+ env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
+ ProfileGenerator::kProgramEntryName);
+ names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
+ names[3] = v8::String::NewFromUtf8(
+ env->GetIsolate(), i::ProfileGenerator::kUnresolvedFunctionName);
// Don't allow |bar| and |call| nodes to be at the top level.
CheckChildrenNames(root, names);
}
@@ -984,25 +1038,25 @@ TEST(FunctionCallSample) {
// won't be |start| node in the profiles.
bool is_gc_stress_testing =
(i::FLAG_gc_interval != -1) || i::FLAG_stress_compaction;
- const v8::CpuProfileNode* startNode = FindChild(root, "start");
+ const v8::CpuProfileNode* startNode =
+ FindChild(env->GetIsolate(), root, "start");
CHECK(is_gc_stress_testing || startNode);
if (startNode) {
ScopedVector<v8::Handle<v8::String> > names(2);
- names[0] = v8::String::New("bar");
- names[1] = v8::String::New("call");
+ names[0] = v8::String::NewFromUtf8(env->GetIsolate(), "bar");
+ names[1] = v8::String::NewFromUtf8(env->GetIsolate(), "call");
CheckChildrenNames(startNode, names);
}
- const v8::CpuProfileNode* unresolvedNode =
- FindChild(root, i::ProfileGenerator::kUnresolvedFunctionName);
+ const v8::CpuProfileNode* unresolvedNode = FindChild(
+ env->GetIsolate(), root, i::ProfileGenerator::kUnresolvedFunctionName);
if (unresolvedNode) {
ScopedVector<v8::Handle<v8::String> > names(1);
- names[0] = v8::String::New("call");
+ names[0] = v8::String::NewFromUtf8(env->GetIsolate(), "call");
CheckChildrenNames(unresolvedNode, names);
}
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
@@ -1035,54 +1089,63 @@ TEST(FunctionApplySample) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Script::Compile(v8::String::New(function_apply_test_source))->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), function_apply_test_source))
+ ->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("start")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
int32_t duration_ms = 100;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
+ v8::Handle<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), duration_ms)
+ };
const v8::CpuProfile* profile =
- RunProfiler(env, function, args, ARRAY_SIZE(args), 100);
+ RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::New("start");
+ names[0] = v8::String::NewFromUtf8(
+ env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
+ ProfileGenerator::kProgramEntryName);
+ names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
// Don't allow |test|, |bar| and |apply| nodes to be at the top level.
CheckChildrenNames(root, names);
}
- const v8::CpuProfileNode* startNode = FindChild(root, "start");
+ const v8::CpuProfileNode* startNode =
+ FindChild(env->GetIsolate(), root, "start");
if (startNode) {
{
ScopedVector<v8::Handle<v8::String> > names(2);
- names[0] = v8::String::New("test");
- names[1] = v8::String::New(ProfileGenerator::kUnresolvedFunctionName);
+ names[0] = v8::String::NewFromUtf8(env->GetIsolate(), "test");
+ names[1] = v8::String::NewFromUtf8(
+ env->GetIsolate(), ProfileGenerator::kUnresolvedFunctionName);
CheckChildrenNames(startNode, names);
}
- const v8::CpuProfileNode* testNode = FindChild(startNode, "test");
+ const v8::CpuProfileNode* testNode =
+ FindChild(env->GetIsolate(), startNode, "test");
if (testNode) {
ScopedVector<v8::Handle<v8::String> > names(2);
- names[0] = v8::String::New("bar");
- names[1] = v8::String::New("apply");
+ names[0] = v8::String::NewFromUtf8(env->GetIsolate(), "bar");
+ names[1] = v8::String::NewFromUtf8(env->GetIsolate(), "apply");
CheckChildrenNames(testNode, names);
}
if (const v8::CpuProfileNode* unresolvedNode =
- FindChild(startNode, ProfileGenerator::kUnresolvedFunctionName)) {
+ FindChild(env->GetIsolate(), startNode,
+ ProfileGenerator::kUnresolvedFunctionName)) {
ScopedVector<v8::Handle<v8::String> > names(1);
- names[0] = v8::String::New("apply");
+ names[0] = v8::String::NewFromUtf8(env->GetIsolate(), "apply");
CheckChildrenNames(unresolvedNode, names);
- GetChild(unresolvedNode, "apply");
+ GetChild(env->GetIsolate(), unresolvedNode, "apply");
}
}
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
@@ -1124,48 +1187,54 @@ static void CallJsFunction(const v8::FunctionCallbackInfo<v8::Value>& info) {
// 55 1 bar #16 5
// 54 54 foo #16 6
TEST(JsNativeJsSample) {
- const char* extensions[] = { "v8/profiler" };
- v8::ExtensionConfiguration config(1, extensions);
- LocalContext env(&config);
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
- CallJsFunction);
+ env->GetIsolate(), CallJsFunction);
v8::Local<v8::Function> func = func_template->GetFunction();
- func->SetName(v8::String::New("CallJsFunction"));
- env->Global()->Set(v8::String::New("CallJsFunction"), func);
+ func->SetName(v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction"));
+ env->Global()->Set(
+ v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction"), func);
- v8::Script::Compile(v8::String::New(js_native_js_test_source))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ js_native_js_test_source))->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("start")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
int32_t duration_ms = 20;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
+ v8::Handle<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), duration_ms)
+ };
const v8::CpuProfile* profile =
RunProfiler(env, function, args, ARRAY_SIZE(args), 10);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::New("start");
+ names[0] = v8::String::NewFromUtf8(
+ env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
+ ProfileGenerator::kProgramEntryName);
+ names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
CheckChildrenNames(root, names);
}
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ const v8::CpuProfileNode* startNode =
+ GetChild(env->GetIsolate(), root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
const v8::CpuProfileNode* nativeFunctionNode =
- GetChild(startNode, "CallJsFunction");
+ GetChild(env->GetIsolate(), startNode, "CallJsFunction");
CHECK_EQ(1, nativeFunctionNode->GetChildrenCount());
- const v8::CpuProfileNode* barNode = GetChild(nativeFunctionNode, "bar");
+ const v8::CpuProfileNode* barNode =
+ GetChild(env->GetIsolate(), nativeFunctionNode, "bar");
CHECK_EQ(1, barNode->GetChildrenCount());
- GetChild(barNode, "foo");
+ GetChild(env->GetIsolate(), barNode, "foo");
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
@@ -1202,47 +1271,53 @@ static const char* js_native_js_runtime_js_test_source =
// 51 51 foo #16 6
// 2 2 (program) #0 2
TEST(JsNativeJsRuntimeJsSample) {
- const char* extensions[] = { "v8/profiler" };
- v8::ExtensionConfiguration config(1, extensions);
- LocalContext env(&config);
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
- CallJsFunction);
+ env->GetIsolate(), CallJsFunction);
v8::Local<v8::Function> func = func_template->GetFunction();
- func->SetName(v8::String::New("CallJsFunction"));
- env->Global()->Set(v8::String::New("CallJsFunction"), func);
+ func->SetName(v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction"));
+ env->Global()->Set(
+ v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction"), func);
- v8::Script::Compile(v8::String::New(js_native_js_runtime_js_test_source))->
- Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(),
+ js_native_js_runtime_js_test_source))->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("start")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
int32_t duration_ms = 20;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
+ v8::Handle<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), duration_ms)
+ };
const v8::CpuProfile* profile =
RunProfiler(env, function, args, ARRAY_SIZE(args), 10);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::New("start");
+ names[0] = v8::String::NewFromUtf8(
+ env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
+ ProfileGenerator::kProgramEntryName);
+ names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
CheckChildrenNames(root, names);
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ const v8::CpuProfileNode* startNode =
+ GetChild(env->GetIsolate(), root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
const v8::CpuProfileNode* nativeFunctionNode =
- GetChild(startNode, "CallJsFunction");
+ GetChild(env->GetIsolate(), startNode, "CallJsFunction");
CHECK_EQ(1, nativeFunctionNode->GetChildrenCount());
- const v8::CpuProfileNode* barNode = GetChild(nativeFunctionNode, "bar");
+ const v8::CpuProfileNode* barNode =
+ GetChild(env->GetIsolate(), nativeFunctionNode, "bar");
CHECK_EQ(1, barNode->GetChildrenCount());
- GetChild(barNode, "foo");
+ GetChild(env->GetIsolate(), barNode, "foo");
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
@@ -1284,55 +1359,63 @@ static const char* js_native1_js_native2_js_test_source =
// 54 54 foo #16 7
// 2 2 (program) #0 2
TEST(JsNative1JsNative2JsSample) {
- const char* extensions[] = { "v8/profiler" };
- v8::ExtensionConfiguration config(1, extensions);
- LocalContext env(&config);
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
- CallJsFunction);
+ env->GetIsolate(), CallJsFunction);
v8::Local<v8::Function> func1 = func_template->GetFunction();
- func1->SetName(v8::String::New("CallJsFunction1"));
- env->Global()->Set(v8::String::New("CallJsFunction1"), func1);
+ func1->SetName(v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction1"));
+ env->Global()->Set(
+ v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction1"), func1);
v8::Local<v8::Function> func2 = v8::FunctionTemplate::New(
- CallJsFunction2)->GetFunction();
- func2->SetName(v8::String::New("CallJsFunction2"));
- env->Global()->Set(v8::String::New("CallJsFunction2"), func2);
-
- v8::Script::Compile(v8::String::New(js_native1_js_native2_js_test_source))->
- Run();
+ env->GetIsolate(), CallJsFunction2)->GetFunction();
+ func2->SetName(v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction2"));
+ env->Global()->Set(
+ v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction2"), func2);
+
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(),
+ js_native1_js_native2_js_test_source))->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("start")));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
int32_t duration_ms = 20;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
+ v8::Handle<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), duration_ms)
+ };
const v8::CpuProfile* profile =
RunProfiler(env, function, args, ARRAY_SIZE(args), 10);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::New("start");
+ names[0] = v8::String::NewFromUtf8(
+ env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
+ ProfileGenerator::kProgramEntryName);
+ names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
CheckChildrenNames(root, names);
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ const v8::CpuProfileNode* startNode =
+ GetChild(env->GetIsolate(), root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
const v8::CpuProfileNode* nativeNode1 =
- GetChild(startNode, "CallJsFunction1");
+ GetChild(env->GetIsolate(), startNode, "CallJsFunction1");
CHECK_EQ(1, nativeNode1->GetChildrenCount());
- const v8::CpuProfileNode* barNode = GetChild(nativeNode1, "bar");
+ const v8::CpuProfileNode* barNode =
+ GetChild(env->GetIsolate(), nativeNode1, "bar");
CHECK_EQ(1, barNode->GetChildrenCount());
- const v8::CpuProfileNode* nativeNode2 = GetChild(barNode, "CallJsFunction2");
+ const v8::CpuProfileNode* nativeNode2 =
+ GetChild(env->GetIsolate(), barNode, "CallJsFunction2");
CHECK_EQ(1, nativeNode2->GetChildrenCount());
- GetChild(nativeNode2, "foo");
+ GetChild(env->GetIsolate(), nativeNode2, "foo");
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
@@ -1345,7 +1428,8 @@ TEST(IdleTime) {
v8::HandleScope scope(env->GetIsolate());
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name = v8::String::New("my_profile");
+ v8::Local<v8::String> profile_name =
+ v8::String::NewFromUtf8(env->GetIsolate(), "my_profile");
cpu_profiler->StartCpuProfiling(profile_name);
i::Isolate* isolate = CcTest::i_isolate();
@@ -1370,30 +1454,36 @@ TEST(IdleTime) {
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::New(ProfileGenerator::kIdleEntryName);
+ names[0] = v8::String::NewFromUtf8(
+ env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
+ ProfileGenerator::kProgramEntryName);
+ names[2] = v8::String::NewFromUtf8(env->GetIsolate(),
+ ProfileGenerator::kIdleEntryName);
CheckChildrenNames(root, names);
const v8::CpuProfileNode* programNode =
- GetChild(root, ProfileGenerator::kProgramEntryName);
+ GetChild(env->GetIsolate(), root, ProfileGenerator::kProgramEntryName);
CHECK_EQ(0, programNode->GetChildrenCount());
CHECK_GE(programNode->GetHitCount(), 3);
const v8::CpuProfileNode* idleNode =
- GetChild(root, ProfileGenerator::kIdleEntryName);
+ GetChild(env->GetIsolate(), root, ProfileGenerator::kIdleEntryName);
CHECK_EQ(0, idleNode->GetChildrenCount());
CHECK_GE(idleNode->GetHitCount(), 3);
- cpu_profiler->DeleteAllCpuProfiles();
+ const_cast<v8::CpuProfile*>(profile)->Delete();
}
-static void CheckFunctionDetails(const v8::CpuProfileNode* node,
- const char* name, const char* script_name, int script_id,
- int line, int column) {
- CHECK_EQ(v8::String::New(name), node->GetFunctionName());
- CHECK_EQ(v8::String::New(script_name), node->GetScriptResourceName());
+static void CheckFunctionDetails(v8::Isolate* isolate,
+ const v8::CpuProfileNode* node,
+ const char* name, const char* script_name,
+ int script_id, int line, int column) {
+ CHECK_EQ(v8::String::NewFromUtf8(isolate, name),
+ node->GetFunctionName());
+ CHECK_EQ(v8::String::NewFromUtf8(isolate, script_name),
+ node->GetScriptResourceName());
CHECK_EQ(script_id, node->GetScriptId());
CHECK_EQ(line, node->GetLineNumber());
CHECK_EQ(column, node->GetColumnNumber());
@@ -1401,24 +1491,26 @@ static void CheckFunctionDetails(const v8::CpuProfileNode* node,
TEST(FunctionDetails) {
- const char* extensions[] = { "v8/profiler" };
- v8::ExtensionConfiguration config(1, extensions);
- LocalContext env(&config);
- v8::HandleScope handleScope(env->GetIsolate());
-
- v8::CpuProfiler* profiler = env->GetIsolate()->GetCpuProfiler();
- CHECK_EQ(0, profiler->GetProfileCount());
- v8::Handle<v8::Script> script_a = v8::Script::Compile(v8::String::New(
- " function foo\n() { try { bar(); } catch(e) {} }\n"
- " function bar() { startProfiling(); }\n"), v8::String::New("script_a"));
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+
+ v8::Handle<v8::Script> script_a = v8::Script::Compile(
+ v8::String::NewFromUtf8(
+ env->GetIsolate(),
+ " function foo\n() { try { bar(); } catch(e) {} }\n"
+ " function bar() { startProfiling(); }\n"),
+ v8::String::NewFromUtf8(env->GetIsolate(), "script_a"));
script_a->Run();
- v8::Handle<v8::Script> script_b = v8::Script::Compile(v8::String::New(
- "\n\n function baz() { try { foo(); } catch(e) {} }\n"
- "\n\nbaz();\n"
- "stopProfiling();\n"), v8::String::New("script_b"));
+ v8::Handle<v8::Script> script_b = v8::Script::Compile(
+ v8::String::NewFromUtf8(
+ env->GetIsolate(),
+ "\n\n function baz() { try { foo(); } catch(e) {} }\n"
+ "\n\nbaz();\n"
+ "stopProfiling();\n"),
+ v8::String::NewFromUtf8(env->GetIsolate(), "script_b"));
script_b->Run();
- CHECK_EQ(1, profiler->GetProfileCount());
- const v8::CpuProfile* profile = profiler->GetCpuProfile(0);
+ const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
const v8::CpuProfileNode* current = profile->GetTopDownRoot();
reinterpret_cast<ProfileNode*>(
const_cast<v8::CpuProfileNode*>(current))->Print(0);
@@ -1429,14 +1521,52 @@ TEST(FunctionDetails) {
// 0 foo 18 #4 TryCatchStatement script_a:2
// 1 bar 18 #5 no reason script_a:3
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* script = GetChild(root,
+ const v8::CpuProfileNode* script = GetChild(env->GetIsolate(), root,
ProfileGenerator::kAnonymousFunctionName);
- CheckFunctionDetails(script, ProfileGenerator::kAnonymousFunctionName,
- "script_b", script_b->GetId(), 1, 1);
- const v8::CpuProfileNode* baz = GetChild(script, "baz");
- CheckFunctionDetails(baz, "baz", "script_b", script_b->GetId(), 3, 16);
- const v8::CpuProfileNode* foo = GetChild(baz, "foo");
- CheckFunctionDetails(foo, "foo", "script_a", script_a->GetId(), 2, 1);
- const v8::CpuProfileNode* bar = GetChild(foo, "bar");
- CheckFunctionDetails(bar, "bar", "script_a", script_a->GetId(), 3, 14);
+ CheckFunctionDetails(env->GetIsolate(), script,
+ ProfileGenerator::kAnonymousFunctionName, "script_b",
+ script_b->GetId(), 1, 1);
+ const v8::CpuProfileNode* baz = GetChild(env->GetIsolate(), script, "baz");
+ CheckFunctionDetails(env->GetIsolate(), baz, "baz", "script_b",
+ script_b->GetId(), 3, 16);
+ const v8::CpuProfileNode* foo = GetChild(env->GetIsolate(), baz, "foo");
+ CheckFunctionDetails(env->GetIsolate(), foo, "foo", "script_a",
+ script_a->GetId(), 2, 1);
+ const v8::CpuProfileNode* bar = GetChild(env->GetIsolate(), foo, "bar");
+ CheckFunctionDetails(env->GetIsolate(), bar, "bar", "script_a",
+ script_a->GetId(), 3, 14);
+}
+
+
+TEST(DontStopOnFinishedProfileDelete) {
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+ v8::Isolate* isolate = env->GetIsolate();
+
+ v8::CpuProfiler* profiler = env->GetIsolate()->GetCpuProfiler();
+ i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
+
+ CHECK_EQ(0, iprofiler->GetProfilesCount());
+ v8::Handle<v8::String> outer = v8::String::NewFromUtf8(isolate, "outer");
+ profiler->StartCpuProfiling(outer);
+ CHECK_EQ(0, iprofiler->GetProfilesCount());
+
+ v8::Handle<v8::String> inner = v8::String::NewFromUtf8(isolate, "inner");
+ profiler->StartCpuProfiling(inner);
+ CHECK_EQ(0, iprofiler->GetProfilesCount());
+
+ const v8::CpuProfile* inner_profile = profiler->StopCpuProfiling(inner);
+ CHECK(inner_profile);
+ CHECK_EQ(1, iprofiler->GetProfilesCount());
+ const_cast<v8::CpuProfile*>(inner_profile)->Delete();
+ inner_profile = NULL;
+ CHECK_EQ(0, iprofiler->GetProfilesCount());
+
+ const v8::CpuProfile* outer_profile = profiler->StopCpuProfiling(outer);
+ CHECK(outer_profile);
+ CHECK_EQ(1, iprofiler->GetProfilesCount());
+ const_cast<v8::CpuProfile*>(outer_profile)->Delete();
+ outer_profile = NULL;
+ CHECK_EQ(0, iprofiler->GetProfilesCount());
}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 1bd1dc3a0d..67ef88516a 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -70,65 +70,6 @@ using ::v8::internal::StrLength;
// Size of temp buffer for formatting small strings.
#define SMALL_STRING_BUFFER_SIZE 80
-// --- A d d i t i o n a l C h e c k H e l p e r s
-
-
-// Helper function used by the CHECK_EQ function when given Address
-// arguments. Should not be called directly.
-static inline void CheckEqualsHelper(const char* file, int line,
- const char* expected_source,
- ::v8::internal::Address expected,
- const char* value_source,
- ::v8::internal::Address value) {
- if (expected != value) {
- V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n# "
- "Expected: %i\n# Found: %i",
- expected_source, value_source, expected, value);
- }
-}
-
-
-// Helper function used by the CHECK_NE function when given Address
-// arguments. Should not be called directly.
-static inline void CheckNonEqualsHelper(const char* file, int line,
- const char* unexpected_source,
- ::v8::internal::Address unexpected,
- const char* value_source,
- ::v8::internal::Address value) {
- if (unexpected == value) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i",
- unexpected_source, value_source, value);
- }
-}
-
-
-// Helper function used by the CHECK function when given code
-// arguments. Should not be called directly.
-static inline void CheckEqualsHelper(const char* file, int line,
- const char* expected_source,
- const Code* expected,
- const char* value_source,
- const Code* value) {
- if (expected != value) {
- V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n# "
- "Expected: %p\n# Found: %p",
- expected_source, value_source, expected, value);
- }
-}
-
-
-static inline void CheckNonEqualsHelper(const char* file, int line,
- const char* expected_source,
- const Code* expected,
- const char* value_source,
- const Code* value) {
- if (expected == value) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p",
- expected_source, value_source, value);
- }
-}
-
-
// --- H e l p e r C l a s s e s
@@ -154,6 +95,7 @@ class DebugLocalContext {
inline v8::Local<v8::Context> context() { return context_; }
inline v8::Context* operator->() { return *context_; }
inline v8::Context* operator*() { return *context_; }
+ inline v8::Isolate* GetIsolate() { return context_->GetIsolate(); }
inline bool IsReady() { return !context_.IsEmpty(); }
void ExposeDebug() {
v8::internal::Isolate* isolate =
@@ -169,10 +111,10 @@ class DebugLocalContext {
v8::Utils::OpenHandle(*context_->Global())));
Handle<v8::internal::String> debug_string =
factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("debug"));
- SetProperty(isolate, global, debug_string,
- Handle<Object>(debug->debug_context()->global_proxy(), isolate),
- DONT_ENUM,
- ::v8::internal::kNonStrictMode);
+ v8::internal::Runtime::SetObjectProperty(isolate, global, debug_string,
+ Handle<Object>(debug->debug_context()->global_proxy(), isolate),
+ DONT_ENUM,
+ ::v8::internal::kNonStrictMode);
}
private:
@@ -188,20 +130,22 @@ class DebugLocalContext {
static v8::Local<v8::Function> CompileFunction(DebugLocalContext* env,
const char* source,
const char* function_name) {
- v8::Script::Compile(v8::String::New(source))->Run();
- return v8::Local<v8::Function>::Cast(
- (*env)->Global()->Get(v8::String::New(function_name)));
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), source))
+ ->Run();
+ return v8::Local<v8::Function>::Cast((*env)->Global()->Get(
+ v8::String::NewFromUtf8(env->GetIsolate(), function_name)));
}
// Compile and run the supplied source and return the requested function.
-static v8::Local<v8::Function> CompileFunction(const char* source,
+static v8::Local<v8::Function> CompileFunction(v8::Isolate* isolate,
+ const char* source,
const char* function_name) {
- v8::Script::Compile(v8::String::New(source))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate, source))->Run();
v8::Local<v8::Object> global =
CcTest::isolate()->GetCurrentContext()->Global();
return v8::Local<v8::Function>::Cast(
- global->Get(v8::String::New(function_name)));
+ global->Get(v8::String::NewFromUtf8(isolate, function_name)));
}
@@ -236,20 +180,22 @@ static int SetBreakPoint(v8::Handle<v8::Function> fun, int position) {
// Set a break point in a function using the Debug object and return the
// associated break point number.
-static int SetBreakPointFromJS(const char* function_name,
+static int SetBreakPointFromJS(v8::Isolate* isolate,
+ const char* function_name,
int line, int position) {
EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
OS::SNPrintF(buffer,
"debug.Debug.setBreakPoint(%s,%d,%d)",
function_name, line, position);
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Handle<v8::String> str = v8::String::New(buffer.start());
+ v8::Handle<v8::String> str = v8::String::NewFromUtf8(isolate, buffer.start());
return v8::Script::Compile(str)->Run()->Int32Value();
}
// Set a break point in a script identified by id using the global Debug object.
-static int SetScriptBreakPointByIdFromJS(int script_id, int line, int column) {
+static int SetScriptBreakPointByIdFromJS(v8::Isolate* isolate, int script_id,
+ int line, int column) {
EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
if (column >= 0) {
// Column specified set script break point on precise location.
@@ -265,7 +211,8 @@ static int SetScriptBreakPointByIdFromJS(int script_id, int line, int column) {
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
{
v8::TryCatch try_catch;
- v8::Handle<v8::String> str = v8::String::New(buffer.start());
+ v8::Handle<v8::String> str =
+ v8::String::NewFromUtf8(isolate, buffer.start());
v8::Handle<v8::Value> value = v8::Script::Compile(str)->Run();
CHECK(!try_catch.HasCaught());
return value->Int32Value();
@@ -275,8 +222,9 @@ static int SetScriptBreakPointByIdFromJS(int script_id, int line, int column) {
// Set a break point in a script identified by name using the global Debug
// object.
-static int SetScriptBreakPointByNameFromJS(const char* script_name,
- int line, int column) {
+static int SetScriptBreakPointByNameFromJS(v8::Isolate* isolate,
+ const char* script_name, int line,
+ int column) {
EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
if (column >= 0) {
// Column specified set script break point on precise location.
@@ -292,7 +240,8 @@ static int SetScriptBreakPointByNameFromJS(const char* script_name,
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
{
v8::TryCatch try_catch;
- v8::Handle<v8::String> str = v8::String::New(buffer.start());
+ v8::Handle<v8::String> str =
+ v8::String::NewFromUtf8(isolate, buffer.start());
v8::Handle<v8::Value> value = v8::Script::Compile(str)->Run();
CHECK(!try_catch.HasCaught());
return value->Int32Value();
@@ -310,55 +259,60 @@ static void ClearBreakPoint(int break_point) {
// Clear a break point using the global Debug object.
-static void ClearBreakPointFromJS(int break_point_number) {
+static void ClearBreakPointFromJS(v8::Isolate* isolate,
+ int break_point_number) {
EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
OS::SNPrintF(buffer,
"debug.Debug.clearBreakPoint(%d)",
break_point_number);
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Script::Compile(v8::String::New(buffer.start()))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate, buffer.start()))->Run();
}
-static void EnableScriptBreakPointFromJS(int break_point_number) {
+static void EnableScriptBreakPointFromJS(v8::Isolate* isolate,
+ int break_point_number) {
EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
OS::SNPrintF(buffer,
"debug.Debug.enableScriptBreakPoint(%d)",
break_point_number);
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Script::Compile(v8::String::New(buffer.start()))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate, buffer.start()))->Run();
}
-static void DisableScriptBreakPointFromJS(int break_point_number) {
+static void DisableScriptBreakPointFromJS(v8::Isolate* isolate,
+ int break_point_number) {
EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
OS::SNPrintF(buffer,
"debug.Debug.disableScriptBreakPoint(%d)",
break_point_number);
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Script::Compile(v8::String::New(buffer.start()))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate, buffer.start()))->Run();
}
-static void ChangeScriptBreakPointConditionFromJS(int break_point_number,
+static void ChangeScriptBreakPointConditionFromJS(v8::Isolate* isolate,
+ int break_point_number,
const char* condition) {
EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
OS::SNPrintF(buffer,
"debug.Debug.changeScriptBreakPointCondition(%d, \"%s\")",
break_point_number, condition);
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Script::Compile(v8::String::New(buffer.start()))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate, buffer.start()))->Run();
}
-static void ChangeScriptBreakPointIgnoreCountFromJS(int break_point_number,
+static void ChangeScriptBreakPointIgnoreCountFromJS(v8::Isolate* isolate,
+ int break_point_number,
int ignoreCount) {
EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
OS::SNPrintF(buffer,
"debug.Debug.changeScriptBreakPointIgnoreCount(%d, %d)",
break_point_number, ignoreCount);
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Script::Compile(v8::String::New(buffer.start()))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate, buffer.start()))->Run();
}
@@ -371,20 +325,25 @@ static void ChangeBreakOnException(bool caught, bool uncaught) {
// Change break on exception using the global Debug object.
-static void ChangeBreakOnExceptionFromJS(bool caught, bool uncaught) {
+static void ChangeBreakOnExceptionFromJS(v8::Isolate* isolate, bool caught,
+ bool uncaught) {
if (caught) {
v8::Script::Compile(
- v8::String::New("debug.Debug.setBreakOnException()"))->Run();
+ v8::String::NewFromUtf8(isolate, "debug.Debug.setBreakOnException()"))
+ ->Run();
} else {
v8::Script::Compile(
- v8::String::New("debug.Debug.clearBreakOnException()"))->Run();
+ v8::String::NewFromUtf8(isolate, "debug.Debug.clearBreakOnException()"))
+ ->Run();
}
if (uncaught) {
v8::Script::Compile(
- v8::String::New("debug.Debug.setBreakOnUncaughtException()"))->Run();
+ v8::String::NewFromUtf8(
+ isolate, "debug.Debug.setBreakOnUncaughtException()"))->Run();
} else {
v8::Script::Compile(
- v8::String::New("debug.Debug.clearBreakOnUncaughtException()"))->Run();
+ v8::String::NewFromUtf8(
+ isolate, "debug.Debug.clearBreakOnUncaughtException()"))->Run();
}
}
@@ -429,12 +388,6 @@ Handle<FixedArray> GetDebuggedFunctions() {
}
-static Handle<Code> ComputeCallDebugBreak(int argc) {
- return CcTest::i_isolate()->stub_cache()->ComputeCallDebugBreak(argc,
- Code::CALL_IC);
-}
-
-
// Check that the debugger has been fully unloaded.
void CheckDebuggerUnloaded(bool check_functions) {
// Check that the debugger context is cleared and that there is no debug
@@ -685,7 +638,9 @@ static void DebugEventBreakPointHitCount(
if (!frame_function_name.IsEmpty()) {
// Get the name of the function.
const int argc = 2;
- v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(0) };
+ v8::Handle<v8::Value> argv[argc] = {
+ exec_state, v8::Integer::New(CcTest::isolate(), 0)
+ };
v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
argc, argv);
if (result->IsUndefined()) {
@@ -799,7 +754,8 @@ static void DebugEventCounter(
exception_hit_count++;
// Check whether the exception was uncaught.
- v8::Local<v8::String> fun_name = v8::String::New("uncaught");
+ v8::Local<v8::String> fun_name =
+ v8::String::NewFromUtf8(CcTest::isolate(), "uncaught");
v8::Local<v8::Function> fun =
v8::Local<v8::Function>::Cast(event_data->Get(fun_name));
v8::Local<v8::Value> result = fun->Call(event_data, 0, NULL);
@@ -856,9 +812,10 @@ static void DebugEventEvaluate(
if (event == v8::Break) {
for (int i = 0; checks[i].expr != NULL; i++) {
const int argc = 3;
- v8::Handle<v8::Value> argv[argc] = { exec_state,
- v8::String::New(checks[i].expr),
- checks[i].expected };
+ v8::Handle<v8::Value> argv[argc] = {
+ exec_state,
+ v8::String::NewFromUtf8(CcTest::isolate(), checks[i].expr),
+ checks[i].expected};
v8::Handle<v8::Value> result =
evaluate_check_function->Call(exec_state, argc, argv);
if (!result->IsTrue()) {
@@ -930,7 +887,9 @@ static void DebugEventStepSequence(
CHECK(break_point_hit_count <
StrLength(expected_step_sequence));
const int argc = 2;
- v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(0) };
+ v8::Handle<v8::Value> argv[argc] = {
+ exec_state, v8::Integer::New(CcTest::isolate(), 0)
+ };
v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
argc, argv);
CHECK(result->IsString());
@@ -1075,13 +1034,13 @@ TEST(DebugStub) {
CheckDebugBreakFunction(&env,
"function f2(){x=1;}", "f2",
0,
- v8::internal::RelocInfo::CODE_TARGET_CONTEXT,
+ v8::internal::RelocInfo::CODE_TARGET,
CcTest::i_isolate()->builtins()->builtin(
Builtins::kStoreIC_DebugBreak));
CheckDebugBreakFunction(&env,
"function f3(){var a=x;}", "f3",
0,
- v8::internal::RelocInfo::CODE_TARGET_CONTEXT,
+ v8::internal::RelocInfo::CODE_TARGET,
CcTest::i_isolate()->builtins()->builtin(
Builtins::kLoadIC_DebugBreak));
@@ -1119,27 +1078,28 @@ TEST(DebugStub) {
// Check the debug break code stubs for call ICs with different number of
// parameters.
- Handle<Code> debug_break_0 = v8::internal::ComputeCallDebugBreak(0);
- Handle<Code> debug_break_1 = v8::internal::ComputeCallDebugBreak(1);
- Handle<Code> debug_break_4 = v8::internal::ComputeCallDebugBreak(4);
+ // TODO(verwaest): XXX update test.
+ // Handle<Code> debug_break_0 = v8::internal::ComputeCallDebugBreak(0);
+ // Handle<Code> debug_break_1 = v8::internal::ComputeCallDebugBreak(1);
+ // Handle<Code> debug_break_4 = v8::internal::ComputeCallDebugBreak(4);
- CheckDebugBreakFunction(&env,
- "function f4_0(){x();}", "f4_0",
- 0,
- v8::internal::RelocInfo::CODE_TARGET_CONTEXT,
- *debug_break_0);
+ // CheckDebugBreakFunction(&env,
+ // "function f4_0(){x();}", "f4_0",
+ // 0,
+ // v8::internal::RelocInfo::CODE_TARGET,
+ // *debug_break_0);
- CheckDebugBreakFunction(&env,
- "function f4_1(){x(1);}", "f4_1",
- 0,
- v8::internal::RelocInfo::CODE_TARGET_CONTEXT,
- *debug_break_1);
+ // CheckDebugBreakFunction(&env,
+ // "function f4_1(){x(1);}", "f4_1",
+ // 0,
+ // v8::internal::RelocInfo::CODE_TARGET,
+ // *debug_break_1);
- CheckDebugBreakFunction(&env,
- "function f4_4(){x(1,2,3,4);}", "f4_4",
- 0,
- v8::internal::RelocInfo::CODE_TARGET_CONTEXT,
- *debug_break_4);
+ // CheckDebugBreakFunction(&env,
+ // "function f4_4(){x(1,2,3,4);}", "f4_4",
+ // 0,
+ // v8::internal::RelocInfo::CODE_TARGET,
+ // *debug_break_4);
}
@@ -1187,9 +1147,10 @@ TEST(BreakPointICStore) {
v8::HandleScope scope(env->GetIsolate());
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Script::Compile(v8::String::New("function foo(){bar=0;}"))->Run();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ "function foo(){bar=0;}"))->Run();
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
// Run without breakpoints.
foo->Call(env->Global(), 0, NULL);
@@ -1218,10 +1179,13 @@ TEST(BreakPointICLoad) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Script::Compile(v8::String::New("bar=1"))->Run();
- v8::Script::Compile(v8::String::New("function foo(){var x=bar;}"))->Run();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "bar=1"))
+ ->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), "function foo(){var x=bar;}"))
+ ->Run();
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
// Run without breakpoints.
foo->Call(env->Global(), 0, NULL);
@@ -1250,10 +1214,12 @@ TEST(BreakPointICCall) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Script::Compile(v8::String::New("function bar(){}"))->Run();
- v8::Script::Compile(v8::String::New("function foo(){bar();}"))->Run();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), "function bar(){}"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ "function foo(){bar();}"))->Run();
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
// Run without breakpoints.
foo->Call(env->Global(), 0, NULL);
@@ -1282,10 +1248,14 @@ TEST(BreakPointICCallWithGC) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::Debug::SetDebugEventListener2(DebugEventBreakPointCollectGarbage);
- v8::Script::Compile(v8::String::New("function bar(){return 1;}"))->Run();
- v8::Script::Compile(v8::String::New("function foo(){return bar();}"))->Run();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), "function bar(){return 1;}"))
+ ->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ "function foo(){return bar();}"))
+ ->Run();
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
// Run without breakpoints.
CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
@@ -1314,11 +1284,14 @@ TEST(BreakPointConstructCallWithGC) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::Debug::SetDebugEventListener2(DebugEventBreakPointCollectGarbage);
- v8::Script::Compile(v8::String::New("function bar(){ this.x = 1;}"))->Run();
- v8::Script::Compile(v8::String::New(
- "function foo(){return new bar(1).x;}"))->Run();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ "function bar(){ this.x = 1;}"))
+ ->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(),
+ "function foo(){return new bar(1).x;}"))->Run();
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
// Run without breakpoints.
CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
@@ -1358,9 +1331,10 @@ TEST(BreakPointReturn) {
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Script::Compile(v8::String::New("function foo(){}"))->Run();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), "function foo(){}"))->Run();
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
// Run without breakpoints.
foo->Call(env->Global(), 0, NULL);
@@ -1527,40 +1501,44 @@ TEST(BreakPointThroughJavaScript) {
env.ExposeDebug();
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Script::Compile(v8::String::New("function bar(){}"))->Run();
- v8::Script::Compile(v8::String::New("function foo(){bar();bar();}"))->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), "function bar(){}"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ "function foo(){bar();bar();}"))
+ ->Run();
// 012345678901234567890
// 1 2
// Break points are set at position 3 and 9
- v8::Local<v8::Script> foo = v8::Script::Compile(v8::String::New("foo()"));
+ v8::Local<v8::Script> foo =
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "foo()"));
// Run without breakpoints.
foo->Run();
CHECK_EQ(0, break_point_hit_count);
// Run with one breakpoint
- int bp1 = SetBreakPointFromJS("foo", 0, 3);
+ int bp1 = SetBreakPointFromJS(env->GetIsolate(), "foo", 0, 3);
foo->Run();
CHECK_EQ(1, break_point_hit_count);
foo->Run();
CHECK_EQ(2, break_point_hit_count);
// Run with two breakpoints
- int bp2 = SetBreakPointFromJS("foo", 0, 9);
+ int bp2 = SetBreakPointFromJS(env->GetIsolate(), "foo", 0, 9);
foo->Run();
CHECK_EQ(4, break_point_hit_count);
foo->Run();
CHECK_EQ(6, break_point_hit_count);
// Run with one breakpoint
- ClearBreakPointFromJS(bp2);
+ ClearBreakPointFromJS(env->GetIsolate(), bp2);
foo->Run();
CHECK_EQ(7, break_point_hit_count);
foo->Run();
CHECK_EQ(8, break_point_hit_count);
// Run without breakpoints.
- ClearBreakPointFromJS(bp1);
+ ClearBreakPointFromJS(env->GetIsolate(), bp1);
foo->Run();
CHECK_EQ(8, break_point_hit_count);
@@ -1583,7 +1561,8 @@ TEST(ScriptBreakPointByNameThroughJavaScript) {
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Local<v8::String> script = v8::String::New(
+ v8::Local<v8::String> script = v8::String::NewFromUtf8(
+ env->GetIsolate(),
"function f() {\n"
" function h() {\n"
" a = 0; // line 2\n"
@@ -1604,12 +1583,12 @@ TEST(ScriptBreakPointByNameThroughJavaScript) {
// Compile the script and get the two functions.
v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::New("test"));
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
v8::Script::Compile(script, &origin)->Run();
- v8::Local<v8::Function> f =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
- v8::Local<v8::Function> g =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
// Call f and g without break points.
break_point_hit_count = 0;
@@ -1619,7 +1598,7 @@ TEST(ScriptBreakPointByNameThroughJavaScript) {
CHECK_EQ(0, break_point_hit_count);
// Call f and g with break point on line 12.
- int sbp1 = SetScriptBreakPointByNameFromJS("test", 12, 0);
+ int sbp1 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 12, 0);
break_point_hit_count = 0;
f->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
@@ -1628,14 +1607,14 @@ TEST(ScriptBreakPointByNameThroughJavaScript) {
// Remove the break point again.
break_point_hit_count = 0;
- ClearBreakPointFromJS(sbp1);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp1);
f->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
g->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
// Call f and g with break point on line 2.
- int sbp2 = SetScriptBreakPointByNameFromJS("test", 2, 0);
+ int sbp2 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 2, 0);
break_point_hit_count = 0;
f->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
@@ -1643,10 +1622,10 @@ TEST(ScriptBreakPointByNameThroughJavaScript) {
CHECK_EQ(2, break_point_hit_count);
// Call f and g with break point on line 2, 4, 12, 14 and 15.
- int sbp3 = SetScriptBreakPointByNameFromJS("test", 4, 0);
- int sbp4 = SetScriptBreakPointByNameFromJS("test", 12, 0);
- int sbp5 = SetScriptBreakPointByNameFromJS("test", 14, 0);
- int sbp6 = SetScriptBreakPointByNameFromJS("test", 15, 0);
+ int sbp3 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 4, 0);
+ int sbp4 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 12, 0);
+ int sbp5 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 14, 0);
+ int sbp6 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 15, 0);
break_point_hit_count = 0;
f->Call(env->Global(), 0, NULL);
CHECK_EQ(2, break_point_hit_count);
@@ -1655,11 +1634,11 @@ TEST(ScriptBreakPointByNameThroughJavaScript) {
// Remove all the break points again.
break_point_hit_count = 0;
- ClearBreakPointFromJS(sbp2);
- ClearBreakPointFromJS(sbp3);
- ClearBreakPointFromJS(sbp4);
- ClearBreakPointFromJS(sbp5);
- ClearBreakPointFromJS(sbp6);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp2);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp3);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp4);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp5);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp6);
f->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
g->Call(env->Global(), 0, NULL);
@@ -1686,7 +1665,8 @@ TEST(ScriptBreakPointByIdThroughJavaScript) {
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Local<v8::String> source = v8::String::New(
+ v8::Local<v8::String> source = v8::String::NewFromUtf8(
+ env->GetIsolate(),
"function f() {\n"
" function h() {\n"
" a = 0; // line 2\n"
@@ -1707,16 +1687,16 @@ TEST(ScriptBreakPointByIdThroughJavaScript) {
// Compile the script and get the two functions.
v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::New("test"));
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
v8::Local<v8::Script> script = v8::Script::Compile(source, &origin);
script->Run();
- v8::Local<v8::Function> f =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
- v8::Local<v8::Function> g =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
// Get the script id knowing that internally it is a 32 integer.
- uint32_t script_id = script->Id()->Uint32Value();
+ int script_id = script->GetId();
// Call f and g without break points.
break_point_hit_count = 0;
@@ -1726,7 +1706,7 @@ TEST(ScriptBreakPointByIdThroughJavaScript) {
CHECK_EQ(0, break_point_hit_count);
// Call f and g with break point on line 12.
- int sbp1 = SetScriptBreakPointByIdFromJS(script_id, 12, 0);
+ int sbp1 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 12, 0);
break_point_hit_count = 0;
f->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
@@ -1735,14 +1715,14 @@ TEST(ScriptBreakPointByIdThroughJavaScript) {
// Remove the break point again.
break_point_hit_count = 0;
- ClearBreakPointFromJS(sbp1);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp1);
f->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
g->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
// Call f and g with break point on line 2.
- int sbp2 = SetScriptBreakPointByIdFromJS(script_id, 2, 0);
+ int sbp2 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 2, 0);
break_point_hit_count = 0;
f->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
@@ -1750,10 +1730,10 @@ TEST(ScriptBreakPointByIdThroughJavaScript) {
CHECK_EQ(2, break_point_hit_count);
// Call f and g with break point on line 2, 4, 12, 14 and 15.
- int sbp3 = SetScriptBreakPointByIdFromJS(script_id, 4, 0);
- int sbp4 = SetScriptBreakPointByIdFromJS(script_id, 12, 0);
- int sbp5 = SetScriptBreakPointByIdFromJS(script_id, 14, 0);
- int sbp6 = SetScriptBreakPointByIdFromJS(script_id, 15, 0);
+ int sbp3 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 4, 0);
+ int sbp4 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 12, 0);
+ int sbp5 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 14, 0);
+ int sbp6 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 15, 0);
break_point_hit_count = 0;
f->Call(env->Global(), 0, NULL);
CHECK_EQ(2, break_point_hit_count);
@@ -1762,11 +1742,11 @@ TEST(ScriptBreakPointByIdThroughJavaScript) {
// Remove all the break points again.
break_point_hit_count = 0;
- ClearBreakPointFromJS(sbp2);
- ClearBreakPointFromJS(sbp3);
- ClearBreakPointFromJS(sbp4);
- ClearBreakPointFromJS(sbp5);
- ClearBreakPointFromJS(sbp6);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp2);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp3);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp4);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp5);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp6);
f->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
g->Call(env->Global(), 0, NULL);
@@ -1794,45 +1774,47 @@ TEST(EnableDisableScriptBreakPoint) {
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Local<v8::String> script = v8::String::New(
+ v8::Local<v8::String> script = v8::String::NewFromUtf8(
+ env->GetIsolate(),
"function f() {\n"
" a = 0; // line 1\n"
"};");
// Compile the script and get function f.
v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::New("test"));
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
v8::Script::Compile(script, &origin)->Run();
- v8::Local<v8::Function> f =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
// Set script break point on line 1 (in function f).
- int sbp = SetScriptBreakPointByNameFromJS("test", 1, 0);
+ int sbp = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 1, 0);
// Call f while enabeling and disabling the script break point.
break_point_hit_count = 0;
f->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
- DisableScriptBreakPointFromJS(sbp);
+ DisableScriptBreakPointFromJS(env->GetIsolate(), sbp);
f->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
- EnableScriptBreakPointFromJS(sbp);
+ EnableScriptBreakPointFromJS(env->GetIsolate(), sbp);
f->Call(env->Global(), 0, NULL);
CHECK_EQ(2, break_point_hit_count);
- DisableScriptBreakPointFromJS(sbp);
+ DisableScriptBreakPointFromJS(env->GetIsolate(), sbp);
f->Call(env->Global(), 0, NULL);
CHECK_EQ(2, break_point_hit_count);
// Reload the script and get f again checking that the disabeling survives.
v8::Script::Compile(script, &origin)->Run();
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
CHECK_EQ(2, break_point_hit_count);
- EnableScriptBreakPointFromJS(sbp);
+ EnableScriptBreakPointFromJS(env->GetIsolate(), sbp);
f->Call(env->Global(), 0, NULL);
CHECK_EQ(3, break_point_hit_count);
@@ -1850,7 +1832,8 @@ TEST(ConditionalScriptBreakPoint) {
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Local<v8::String> script = v8::String::New(
+ v8::Local<v8::String> script = v8::String::NewFromUtf8(
+ env->GetIsolate(),
"count = 0;\n"
"function f() {\n"
" g(count++); // line 2\n"
@@ -1861,26 +1844,26 @@ TEST(ConditionalScriptBreakPoint) {
// Compile the script and get function f.
v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::New("test"));
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
v8::Script::Compile(script, &origin)->Run();
- v8::Local<v8::Function> f =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
// Set script break point on line 5 (in function g).
- int sbp1 = SetScriptBreakPointByNameFromJS("test", 5, 0);
+ int sbp1 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 5, 0);
// Call f with different conditions on the script break point.
break_point_hit_count = 0;
- ChangeScriptBreakPointConditionFromJS(sbp1, "false");
+ ChangeScriptBreakPointConditionFromJS(env->GetIsolate(), sbp1, "false");
f->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
- ChangeScriptBreakPointConditionFromJS(sbp1, "true");
+ ChangeScriptBreakPointConditionFromJS(env->GetIsolate(), sbp1, "true");
break_point_hit_count = 0;
f->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
- ChangeScriptBreakPointConditionFromJS(sbp1, "x % 2 == 0");
+ ChangeScriptBreakPointConditionFromJS(env->GetIsolate(), sbp1, "x % 2 == 0");
break_point_hit_count = 0;
for (int i = 0; i < 10; i++) {
f->Call(env->Global(), 0, NULL);
@@ -1889,7 +1872,8 @@ TEST(ConditionalScriptBreakPoint) {
// Reload the script and get f again checking that the condition survives.
v8::Script::Compile(script, &origin)->Run();
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
break_point_hit_count = 0;
for (int i = 0; i < 10; i++) {
@@ -1911,30 +1895,31 @@ TEST(ScriptBreakPointIgnoreCount) {
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Local<v8::String> script = v8::String::New(
+ v8::Local<v8::String> script = v8::String::NewFromUtf8(
+ env->GetIsolate(),
"function f() {\n"
" a = 0; // line 1\n"
"};");
// Compile the script and get function f.
v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::New("test"));
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
v8::Script::Compile(script, &origin)->Run();
- v8::Local<v8::Function> f =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
// Set script break point on line 1 (in function f).
- int sbp = SetScriptBreakPointByNameFromJS("test", 1, 0);
+ int sbp = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 1, 0);
// Call f with different ignores on the script break point.
break_point_hit_count = 0;
- ChangeScriptBreakPointIgnoreCountFromJS(sbp, 1);
+ ChangeScriptBreakPointIgnoreCountFromJS(env->GetIsolate(), sbp, 1);
f->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
f->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
- ChangeScriptBreakPointIgnoreCountFromJS(sbp, 5);
+ ChangeScriptBreakPointIgnoreCountFromJS(env->GetIsolate(), sbp, 5);
break_point_hit_count = 0;
for (int i = 0; i < 10; i++) {
f->Call(env->Global(), 0, NULL);
@@ -1943,7 +1928,8 @@ TEST(ScriptBreakPointIgnoreCount) {
// Reload the script and get f again checking that the ignore survives.
v8::Script::Compile(script, &origin)->Run();
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
break_point_hit_count = 0;
for (int i = 0; i < 10; i++) {
@@ -1966,7 +1952,8 @@ TEST(ScriptBreakPointReload) {
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
v8::Local<v8::Function> f;
- v8::Local<v8::String> script = v8::String::New(
+ v8::Local<v8::String> script = v8::String::NewFromUtf8(
+ env->GetIsolate(),
"function f() {\n"
" function h() {\n"
" a = 0; // line 2\n"
@@ -1975,15 +1962,18 @@ TEST(ScriptBreakPointReload) {
" return h();\n"
"}");
- v8::ScriptOrigin origin_1 = v8::ScriptOrigin(v8::String::New("1"));
- v8::ScriptOrigin origin_2 = v8::ScriptOrigin(v8::String::New("2"));
+ v8::ScriptOrigin origin_1 =
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "1"));
+ v8::ScriptOrigin origin_2 =
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "2"));
// Set a script break point before the script is loaded.
- SetScriptBreakPointByNameFromJS("1", 2, 0);
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), "1", 2, 0);
// Compile the script and get the function.
v8::Script::Compile(script, &origin_1)->Run();
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
// Call f and check that the script break point is active.
break_point_hit_count = 0;
@@ -1993,7 +1983,8 @@ TEST(ScriptBreakPointReload) {
// Compile the script again with a different script data and get the
// function.
v8::Script::Compile(script, &origin_2)->Run();
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
// Call f and check that no break points are set.
break_point_hit_count = 0;
@@ -2002,7 +1993,8 @@ TEST(ScriptBreakPointReload) {
// Compile the script again and get the function.
v8::Script::Compile(script, &origin_1)->Run();
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
// Call f and check that the script break point is active.
break_point_hit_count = 0;
@@ -2024,28 +2016,32 @@ TEST(ScriptBreakPointMultiple) {
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
v8::Local<v8::Function> f;
- v8::Local<v8::String> script_f = v8::String::New(
- "function f() {\n"
- " a = 0; // line 1\n"
- "}");
+ v8::Local<v8::String> script_f =
+ v8::String::NewFromUtf8(env->GetIsolate(),
+ "function f() {\n"
+ " a = 0; // line 1\n"
+ "}");
v8::Local<v8::Function> g;
- v8::Local<v8::String> script_g = v8::String::New(
- "function g() {\n"
- " b = 0; // line 1\n"
- "}");
+ v8::Local<v8::String> script_g =
+ v8::String::NewFromUtf8(env->GetIsolate(),
+ "function g() {\n"
+ " b = 0; // line 1\n"
+ "}");
v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::New("test"));
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
// Set a script break point before the scripts are loaded.
- int sbp = SetScriptBreakPointByNameFromJS("test", 1, 0);
+ int sbp = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 1, 0);
// Compile the scripts with same script data and get the functions.
v8::Script::Compile(script_f, &origin)->Run();
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
v8::Script::Compile(script_g, &origin)->Run();
- g = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
+ g = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
// Call f and g and check that the script break point is active.
break_point_hit_count = 0;
@@ -2055,7 +2051,7 @@ TEST(ScriptBreakPointMultiple) {
CHECK_EQ(2, break_point_hit_count);
// Clear the script break point.
- ClearBreakPointFromJS(sbp);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp);
// Call f and g and check that the script break point is no longer active.
break_point_hit_count = 0;
@@ -2065,7 +2061,7 @@ TEST(ScriptBreakPointMultiple) {
CHECK_EQ(0, break_point_hit_count);
// Set script break point with the scripts loaded.
- sbp = SetScriptBreakPointByNameFromJS("test", 1, 0);
+ sbp = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 1, 0);
// Call f and g and check that the script break point is active.
break_point_hit_count = 0;
@@ -2089,23 +2085,28 @@ TEST(ScriptBreakPointLineOffset) {
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
v8::Local<v8::Function> f;
- v8::Local<v8::String> script = v8::String::New(
- "function f() {\n"
- " a = 0; // line 8 as this script has line offset 7\n"
- " b = 0; // line 9 as this script has line offset 7\n"
- "}");
+ v8::Local<v8::String> script = v8::String::NewFromUtf8(
+ env->GetIsolate(),
+ "function f() {\n"
+ " a = 0; // line 8 as this script has line offset 7\n"
+ " b = 0; // line 9 as this script has line offset 7\n"
+ "}");
// Create script origin both name and line offset.
- v8::ScriptOrigin origin(v8::String::New("test.html"),
- v8::Integer::New(7));
+ v8::ScriptOrigin origin(
+ v8::String::NewFromUtf8(env->GetIsolate(), "test.html"),
+ v8::Integer::New(env->GetIsolate(), 7));
// Set two script break points before the script is loaded.
- int sbp1 = SetScriptBreakPointByNameFromJS("test.html", 8, 0);
- int sbp2 = SetScriptBreakPointByNameFromJS("test.html", 9, 0);
+ int sbp1 =
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 8, 0);
+ int sbp2 =
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 9, 0);
// Compile the script and get the function.
v8::Script::Compile(script, &origin)->Run();
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
// Call f and check that the script break point is active.
break_point_hit_count = 0;
@@ -2113,8 +2114,8 @@ TEST(ScriptBreakPointLineOffset) {
CHECK_EQ(2, break_point_hit_count);
// Clear the script break points.
- ClearBreakPointFromJS(sbp1);
- ClearBreakPointFromJS(sbp2);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp1);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp2);
// Call f and check that no script break points are active.
break_point_hit_count = 0;
@@ -2122,7 +2123,7 @@ TEST(ScriptBreakPointLineOffset) {
CHECK_EQ(0, break_point_hit_count);
// Set a script break point with the script loaded.
- sbp1 = SetScriptBreakPointByNameFromJS("test.html", 9, 0);
+ sbp1 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 9, 0);
// Call f and check that the script break point is active.
break_point_hit_count = 0;
@@ -2149,32 +2150,40 @@ TEST(ScriptBreakPointLine) {
v8::Local<v8::Function> f;
v8::Local<v8::Function> g;
- v8::Local<v8::String> script = v8::String::New(
- "a = 0 // line 0\n"
- "function f() {\n"
- " a = 1; // line 2\n"
- "}\n"
- " a = 2; // line 4\n"
- " /* xx */ function g() { // line 5\n"
- " function h() { // line 6\n"
- " a = 3; // line 7\n"
- " }\n"
- " h(); // line 9\n"
- " a = 4; // line 10\n"
- " }\n"
- " a=5; // line 12");
+ v8::Local<v8::String> script =
+ v8::String::NewFromUtf8(env->GetIsolate(),
+ "a = 0 // line 0\n"
+ "function f() {\n"
+ " a = 1; // line 2\n"
+ "}\n"
+ " a = 2; // line 4\n"
+ " /* xx */ function g() { // line 5\n"
+ " function h() { // line 6\n"
+ " a = 3; // line 7\n"
+ " }\n"
+ " h(); // line 9\n"
+ " a = 4; // line 10\n"
+ " }\n"
+ " a=5; // line 12");
// Set a couple script break point before the script is loaded.
- int sbp1 = SetScriptBreakPointByNameFromJS("test.html", 0, -1);
- int sbp2 = SetScriptBreakPointByNameFromJS("test.html", 1, -1);
- int sbp3 = SetScriptBreakPointByNameFromJS("test.html", 5, -1);
+ int sbp1 =
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 0, -1);
+ int sbp2 =
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 1, -1);
+ int sbp3 =
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 5, -1);
// Compile the script and get the function.
break_point_hit_count = 0;
- v8::ScriptOrigin origin(v8::String::New("test.html"), v8::Integer::New(0));
+ v8::ScriptOrigin origin(
+ v8::String::NewFromUtf8(env->GetIsolate(), "test.html"),
+ v8::Integer::New(env->GetIsolate(), 0));
v8::Script::Compile(script, &origin)->Run();
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
- g = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ g = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
// Check that a break point was hit when the script was run.
CHECK_EQ(1, break_point_hit_count);
@@ -2191,8 +2200,9 @@ TEST(ScriptBreakPointLine) {
CHECK_EQ("g", last_function_hit);
// Clear the script break point on g and set one on h.
- ClearBreakPointFromJS(sbp3);
- int sbp4 = SetScriptBreakPointByNameFromJS("test.html", 6, -1);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp3);
+ int sbp4 =
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 6, -1);
// Call g and check that the script break point in h is hit.
g->Call(env->Global(), 0, NULL);
@@ -2202,9 +2212,10 @@ TEST(ScriptBreakPointLine) {
// Clear break points in f and h. Set a new one in the script between
// functions f and g and test that there is no break points in f and g any
// more.
- ClearBreakPointFromJS(sbp2);
- ClearBreakPointFromJS(sbp4);
- int sbp5 = SetScriptBreakPointByNameFromJS("test.html", 4, -1);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp2);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp4);
+ int sbp5 =
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 4, -1);
break_point_hit_count = 0;
f->Call(env->Global(), 0, NULL);
g->Call(env->Global(), 0, NULL);
@@ -2217,7 +2228,8 @@ TEST(ScriptBreakPointLine) {
CHECK_EQ(0, StrLength(last_function_hit));
// Set a break point in the code after the last function decleration.
- int sbp6 = SetScriptBreakPointByNameFromJS("test.html", 12, -1);
+ int sbp6 =
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 12, -1);
// Reload the script which should hit three break points.
break_point_hit_count = 0;
@@ -2227,9 +2239,9 @@ TEST(ScriptBreakPointLine) {
// Clear the last break points, and reload the script which should not hit any
// break points.
- ClearBreakPointFromJS(sbp1);
- ClearBreakPointFromJS(sbp5);
- ClearBreakPointFromJS(sbp6);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp1);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp5);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp6);
break_point_hit_count = 0;
v8::Script::Compile(script, &origin)->Run();
CHECK_EQ(0, break_point_hit_count);
@@ -2247,21 +2259,24 @@ TEST(ScriptBreakPointLineTopLevel) {
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Local<v8::String> script = v8::String::New(
- "function f() {\n"
- " a = 1; // line 1\n"
- "}\n"
- "a = 2; // line 3\n");
+ v8::Local<v8::String> script =
+ v8::String::NewFromUtf8(env->GetIsolate(),
+ "function f() {\n"
+ " a = 1; // line 1\n"
+ "}\n"
+ "a = 2; // line 3\n");
v8::Local<v8::Function> f;
{
v8::HandleScope scope(env->GetIsolate());
- v8::Script::Compile(script, v8::String::New("test.html"))->Run();
+ v8::Script::Compile(
+ script, v8::String::NewFromUtf8(env->GetIsolate(), "test.html"))->Run();
}
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
- SetScriptBreakPointByNameFromJS("test.html", 3, -1);
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 3, -1);
// Call f and check that there was no break points.
break_point_hit_count = 0;
@@ -2270,12 +2285,14 @@ TEST(ScriptBreakPointLineTopLevel) {
// Recompile and run script and check that break point was hit.
break_point_hit_count = 0;
- v8::Script::Compile(script, v8::String::New("test.html"))->Run();
+ v8::Script::Compile(
+ script, v8::String::NewFromUtf8(env->GetIsolate(), "test.html"))->Run();
CHECK_EQ(1, break_point_hit_count);
// Call f and check that there are still no break points.
break_point_hit_count = 0;
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
CHECK_EQ(0, break_point_hit_count);
v8::Debug::SetDebugEventListener2(NULL);
@@ -2292,23 +2309,28 @@ TEST(ScriptBreakPointTopLevelCrash) {
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Local<v8::String> script_source = v8::String::New(
- "function f() {\n"
- " return 0;\n"
- "}\n"
- "f()");
+ v8::Local<v8::String> script_source =
+ v8::String::NewFromUtf8(env->GetIsolate(),
+ "function f() {\n"
+ " return 0;\n"
+ "}\n"
+ "f()");
- int sbp1 = SetScriptBreakPointByNameFromJS("test.html", 3, -1);
+ int sbp1 =
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 3, -1);
{
v8::HandleScope scope(env->GetIsolate());
break_point_hit_count = 0;
- v8::Script::Compile(script_source, v8::String::New("test.html"))->Run();
+ v8::Script::Compile(script_source,
+ v8::String::NewFromUtf8(env->GetIsolate(), "test.html"))
+ ->Run();
CHECK_EQ(1, break_point_hit_count);
}
- int sbp2 = SetScriptBreakPointByNameFromJS("test.html", 3, -1);
- ClearBreakPointFromJS(sbp1);
- ClearBreakPointFromJS(sbp2);
+ int sbp2 =
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 3, -1);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp1);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp2);
v8::Debug::SetDebugEventListener2(NULL);
CheckDebuggerUnloaded();
@@ -2347,13 +2369,16 @@ TEST(DebuggerStatement) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Script::Compile(v8::String::New("function bar(){debugger}"))->Run();
- v8::Script::Compile(v8::String::New(
- "function foo(){debugger;debugger;}"))->Run();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
- v8::Local<v8::Function> bar =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("bar")));
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), "function bar(){debugger}"))
+ ->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(),
+ "function foo(){debugger;debugger;}"))->Run();
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
+ v8::Local<v8::Function> bar = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "bar")));
// Run function with debugger statement
bar->Call(env->Global(), 0, NULL);
@@ -2374,9 +2399,11 @@ TEST(DebuggerStatementBreakpoint) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
- v8::Script::Compile(v8::String::New("function foo(){debugger;}"))->Run();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), "function foo(){debugger;}"))
+ ->Run();
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
// The debugger statement triggers breakpint hit
foo->Call(env->Global(), 0, NULL);
@@ -2417,13 +2444,13 @@ TEST(DebugEvaluate) {
{NULL, v8::Handle<v8::Value>()}
};
struct EvaluateCheck checks_hu[] = {
- {"x", v8::String::New("Hello, world!")},
+ {"x", v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")},
{"a", v8::Undefined(isolate)},
{NULL, v8::Handle<v8::Value>()}
};
struct EvaluateCheck checks_hh[] = {
- {"x", v8::String::New("Hello, world!")},
- {"a", v8::String::New("Hello, world!")},
+ {"x", v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")},
+ {"a", v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")},
{NULL, v8::Handle<v8::Value>()}
};
@@ -2443,7 +2470,8 @@ TEST(DebugEvaluate) {
const int foo_break_position_2 = 29;
// Arguments with one parameter "Hello, world!"
- v8::Handle<v8::Value> argv_foo[1] = { v8::String::New("Hello, world!") };
+ v8::Handle<v8::Value> argv_foo[1] = {
+ v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")};
// Call foo with breakpoint set before a=x and undefined as parameter.
int bp = SetBreakPoint(foo, foo_break_position_1);
@@ -2485,7 +2513,7 @@ TEST(DebugEvaluate) {
checks = checks_uu;
v8::Handle<v8::Value> argv_bar_1[2] = {
v8::Undefined(isolate),
- v8::Number::New(barbar_break_position)
+ v8::Number::New(isolate, barbar_break_position)
};
bar->Call(env->Global(), 2, argv_bar_1);
@@ -2493,8 +2521,8 @@ TEST(DebugEvaluate) {
// "Hello, world!".
checks = checks_hu;
v8::Handle<v8::Value> argv_bar_2[2] = {
- v8::String::New("Hello, world!"),
- v8::Number::New(barbar_break_position)
+ v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!"),
+ v8::Number::New(env->GetIsolate(), barbar_break_position)
};
bar->Call(env->Global(), 2, argv_bar_2);
@@ -2502,8 +2530,8 @@ TEST(DebugEvaluate) {
// "Hello, world!".
checks = checks_hh;
v8::Handle<v8::Value> argv_bar_3[2] = {
- v8::String::New("Hello, world!"),
- v8::Number::New(barbar_break_position + 1)
+ v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!"),
+ v8::Number::New(env->GetIsolate(), barbar_break_position + 1)
};
bar->Call(env->Global(), 2, argv_bar_3);
@@ -2690,10 +2718,10 @@ DebugProcessDebugMessagesData process_debug_messages_data;
static void DebugProcessDebugMessagesHandler(
const v8::Debug::Message& message) {
v8::Handle<v8::String> json = message.GetJSON();
- v8::String::AsciiValue ascii(json);
+ v8::String::Utf8Value utf8(json);
EvaluateResult* array_item = process_debug_messages_data.current();
- bool res = GetEvaluateStringResult(*ascii,
+ bool res = GetEvaluateStringResult(*utf8,
array_item->buffer,
EvaluateResult::kBufferSize);
if (res) {
@@ -2713,7 +2741,8 @@ TEST(DebugEvaluateWithoutStack) {
const char* source =
"var v1 = 'Pinguin';\n function getAnimal() { return 'Capy' + 'bara'; }";
- v8::Script::Compile(v8::String::New(source))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), source))
+ ->Run();
v8::Debug::ProcessDebugMessages();
@@ -2833,9 +2862,10 @@ TEST(DebugStepKeyedLoadLoop) {
"foo");
// Create array [0,1,2,3,4,5,6,7,8,9]
- v8::Local<v8::Array> a = v8::Array::New(10);
+ v8::Local<v8::Array> a = v8::Array::New(env->GetIsolate(), 10);
for (int i = 0; i < 10; i++) {
- a->Set(v8::Number::New(i), v8::Number::New(i));
+ a->Set(v8::Number::New(env->GetIsolate(), i),
+ v8::Number::New(env->GetIsolate(), i));
}
// Call function without any break points to ensure inlining is in place.
@@ -2880,9 +2910,10 @@ TEST(DebugStepKeyedStoreLoop) {
"foo");
// Create array [0,1,2,3,4,5,6,7,8,9]
- v8::Local<v8::Array> a = v8::Array::New(10);
+ v8::Local<v8::Array> a = v8::Array::New(env->GetIsolate(), 10);
for (int i = 0; i < 10; i++) {
- a->Set(v8::Number::New(i), v8::Number::New(i));
+ a->Set(v8::Number::New(env->GetIsolate(), i),
+ v8::Number::New(env->GetIsolate(), i));
}
// Call function without any break points to ensure inlining is in place.
@@ -3148,7 +3179,8 @@ TEST(DebugStepIf) {
TEST(DebugStepSwitch) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
v8::Debug::SetDebugEventListener2(DebugEventStep);
@@ -3178,21 +3210,21 @@ TEST(DebugStepSwitch) {
// One case with fall-through.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_1[argc] = { v8::Number::New(1) };
+ v8::Handle<v8::Value> argv_1[argc] = { v8::Number::New(isolate, 1) };
foo->Call(env->Global(), argc, argv_1);
CHECK_EQ(6, break_point_hit_count);
// Another case.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_2[argc] = { v8::Number::New(2) };
+ v8::Handle<v8::Value> argv_2[argc] = { v8::Number::New(isolate, 2) };
foo->Call(env->Global(), argc, argv_2);
CHECK_EQ(5, break_point_hit_count);
// Last case.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_3[argc] = { v8::Number::New(3) };
+ v8::Handle<v8::Value> argv_3[argc] = { v8::Number::New(isolate, 3) };
foo->Call(env->Global(), argc, argv_3);
CHECK_EQ(7, break_point_hit_count);
@@ -3204,7 +3236,8 @@ TEST(DebugStepSwitch) {
TEST(DebugStepWhile) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
v8::Debug::SetDebugEventListener2(DebugEventStep);
@@ -3225,14 +3258,14 @@ TEST(DebugStepWhile) {
// Looping 10 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
+ v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
foo->Call(env->Global(), argc, argv_10);
CHECK_EQ(22, break_point_hit_count);
// Looping 100 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
+ v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
foo->Call(env->Global(), argc, argv_100);
CHECK_EQ(202, break_point_hit_count);
@@ -3244,7 +3277,8 @@ TEST(DebugStepWhile) {
TEST(DebugStepDoWhile) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
v8::Debug::SetDebugEventListener2(DebugEventStep);
@@ -3265,14 +3299,14 @@ TEST(DebugStepDoWhile) {
// Looping 10 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
+ v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
foo->Call(env->Global(), argc, argv_10);
CHECK_EQ(22, break_point_hit_count);
// Looping 100 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
+ v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
foo->Call(env->Global(), argc, argv_100);
CHECK_EQ(202, break_point_hit_count);
@@ -3284,7 +3318,8 @@ TEST(DebugStepDoWhile) {
TEST(DebugStepFor) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
v8::Debug::SetDebugEventListener2(DebugEventStep);
@@ -3306,14 +3341,14 @@ TEST(DebugStepFor) {
// Looping 10 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
+ v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
foo->Call(env->Global(), argc, argv_10);
CHECK_EQ(23, break_point_hit_count);
// Looping 100 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
+ v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
foo->Call(env->Global(), argc, argv_100);
CHECK_EQ(203, break_point_hit_count);
@@ -3325,7 +3360,8 @@ TEST(DebugStepFor) {
TEST(DebugStepForContinue) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
v8::Debug::SetDebugEventListener2(DebugEventStep);
@@ -3355,7 +3391,7 @@ TEST(DebugStepForContinue) {
// Looping 10 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
+ v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
result = foo->Call(env->Global(), argc, argv_10);
CHECK_EQ(5, result->Int32Value());
CHECK_EQ(52, break_point_hit_count);
@@ -3363,7 +3399,7 @@ TEST(DebugStepForContinue) {
// Looping 100 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
+ v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
result = foo->Call(env->Global(), argc, argv_100);
CHECK_EQ(50, result->Int32Value());
CHECK_EQ(457, break_point_hit_count);
@@ -3376,7 +3412,8 @@ TEST(DebugStepForContinue) {
TEST(DebugStepForBreak) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
v8::Debug::SetDebugEventListener2(DebugEventStep);
@@ -3407,7 +3444,7 @@ TEST(DebugStepForBreak) {
// Looping 10 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
+ v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
result = foo->Call(env->Global(), argc, argv_10);
CHECK_EQ(9, result->Int32Value());
CHECK_EQ(55, break_point_hit_count);
@@ -3415,7 +3452,7 @@ TEST(DebugStepForBreak) {
// Looping 100 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
+ v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
result = foo->Call(env->Global(), argc, argv_100);
CHECK_EQ(99, result->Int32Value());
CHECK_EQ(505, break_point_hit_count);
@@ -3489,7 +3526,8 @@ TEST(DebugStepWith) {
" with (b) {}"
"}"
"foo()";
- env->Global()->Set(v8::String::New("b"), v8::Object::New());
+ env->Global()->Set(v8::String::NewFromUtf8(env->GetIsolate(), "b"),
+ v8::Object::New(env->GetIsolate()));
v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
v8::Handle<v8::Value> result;
SetBreakPoint(foo, 8); // "var a = {};"
@@ -3823,12 +3861,14 @@ TEST(PauseInScript) {
const char* script_name = "StepInHandlerTest";
// Set breakpoint in the script.
- SetScriptBreakPointByNameFromJS(script_name, 0, -1);
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), script_name, 0, -1);
break_point_hit_count = 0;
- v8::ScriptOrigin origin(v8::String::New(script_name), v8::Integer::New(0));
- v8::Handle<v8::Script> script = v8::Script::Compile(v8::String::New(src),
- &origin);
+ v8::ScriptOrigin origin(
+ v8::String::NewFromUtf8(env->GetIsolate(), script_name),
+ v8::Integer::New(env->GetIsolate(), 0));
+ v8::Handle<v8::Script> script = v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), src), &origin);
v8::Local<v8::Value> r = script->Run();
CHECK(r->IsFunction());
@@ -3851,8 +3891,6 @@ TEST(BreakOnException) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
- CcTest::i_isolate()->TraceException(false);
-
// Create functions for testing break on exception.
CompileFunction(&env, "function throws(){throw 1;}", "throws");
v8::Local<v8::Function> caught =
@@ -3932,7 +3970,7 @@ TEST(BreakOnException) {
// No break on exception using JavaScript
DebugEventCounterClear();
MessageCallbackCountClear();
- ChangeBreakOnExceptionFromJS(false, false);
+ ChangeBreakOnExceptionFromJS(env->GetIsolate(), false, false);
caught->Call(env->Global(), 0, NULL);
CHECK_EQ(0, exception_hit_count);
CHECK_EQ(0, uncaught_exception_hit_count);
@@ -3945,7 +3983,7 @@ TEST(BreakOnException) {
// Break on uncaught exception using JavaScript
DebugEventCounterClear();
MessageCallbackCountClear();
- ChangeBreakOnExceptionFromJS(false, true);
+ ChangeBreakOnExceptionFromJS(env->GetIsolate(), false, true);
caught->Call(env->Global(), 0, NULL);
CHECK_EQ(0, exception_hit_count);
CHECK_EQ(0, uncaught_exception_hit_count);
@@ -3958,7 +3996,7 @@ TEST(BreakOnException) {
// Break on exception and uncaught exception using JavaScript
DebugEventCounterClear();
MessageCallbackCountClear();
- ChangeBreakOnExceptionFromJS(true, true);
+ ChangeBreakOnExceptionFromJS(env->GetIsolate(), true, true);
caught->Call(env->Global(), 0, NULL);
CHECK_EQ(1, exception_hit_count);
CHECK_EQ(0, message_callback_count);
@@ -3971,7 +4009,7 @@ TEST(BreakOnException) {
// Break on exception using JavaScript
DebugEventCounterClear();
MessageCallbackCountClear();
- ChangeBreakOnExceptionFromJS(true, false);
+ ChangeBreakOnExceptionFromJS(env->GetIsolate(), true, false);
caught->Call(env->Global(), 0, NULL);
CHECK_EQ(1, exception_hit_count);
CHECK_EQ(0, uncaught_exception_hit_count);
@@ -3997,8 +4035,6 @@ TEST(BreakOnCompileException) {
// For this test, we want to break on uncaught exceptions:
ChangeBreakOnException(false, true);
- CcTest::i_isolate()->TraceException(false);
-
// Create a function for checking the function when hitting a break point.
frame_count = CompileFunction(&env, frame_count_source, "frame_count");
@@ -4015,28 +4051,30 @@ TEST(BreakOnCompileException) {
CHECK_EQ(-1, last_js_stack_height);
// Throws SyntaxError: Unexpected end of input
- v8::Script::Compile(v8::String::New("+++"));
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "+++"));
CHECK_EQ(1, exception_hit_count);
CHECK_EQ(1, uncaught_exception_hit_count);
CHECK_EQ(1, message_callback_count);
CHECK_EQ(0, last_js_stack_height); // No JavaScript stack.
// Throws SyntaxError: Unexpected identifier
- v8::Script::Compile(v8::String::New("x x"));
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "x x"));
CHECK_EQ(2, exception_hit_count);
CHECK_EQ(2, uncaught_exception_hit_count);
CHECK_EQ(2, message_callback_count);
CHECK_EQ(0, last_js_stack_height); // No JavaScript stack.
// Throws SyntaxError: Unexpected end of input
- v8::Script::Compile(v8::String::New("eval('+++')"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "eval('+++')"))
+ ->Run();
CHECK_EQ(3, exception_hit_count);
CHECK_EQ(3, uncaught_exception_hit_count);
CHECK_EQ(3, message_callback_count);
CHECK_EQ(1, last_js_stack_height);
// Throws SyntaxError: Unexpected identifier
- v8::Script::Compile(v8::String::New("eval('x x')"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "eval('x x')"))
+ ->Run();
CHECK_EQ(4, exception_hit_count);
CHECK_EQ(4, uncaught_exception_hit_count);
CHECK_EQ(4, message_callback_count);
@@ -4140,7 +4178,8 @@ TEST(DebugBreak) {
i::FLAG_verify_heap = true;
#endif
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Register a debug event listener which sets the break flag and counts.
v8::Debug::SetDebugEventListener2(DebugEventBreak);
@@ -4156,10 +4195,10 @@ TEST(DebugBreak) {
v8::Local<v8::Function> f3 = CompileFunction(&env, src, "f3");
// Call the function to make sure it is compiled.
- v8::Handle<v8::Value> argv[] = { v8::Number::New(1),
- v8::Number::New(1),
- v8::Number::New(1),
- v8::Number::New(1) };
+ v8::Handle<v8::Value> argv[] = { v8::Number::New(isolate, 1),
+ v8::Number::New(isolate, 1),
+ v8::Number::New(isolate, 1),
+ v8::Number::New(isolate, 1) };
// Call all functions to make sure that they are compiled.
f0->Call(env->Global(), 0, NULL);
@@ -4262,18 +4301,22 @@ TEST(NoBreakWhenBootstrapping) {
static void NamedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
- v8::Handle<v8::Array> result = v8::Array::New(3);
- result->Set(v8::Integer::New(0), v8::String::New("a"));
- result->Set(v8::Integer::New(1), v8::String::New("b"));
- result->Set(v8::Integer::New(2), v8::String::New("c"));
+ v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 3);
+ result->Set(v8::Integer::New(info.GetIsolate(), 0),
+ v8::String::NewFromUtf8(info.GetIsolate(), "a"));
+ result->Set(v8::Integer::New(info.GetIsolate(), 1),
+ v8::String::NewFromUtf8(info.GetIsolate(), "b"));
+ result->Set(v8::Integer::New(info.GetIsolate(), 2),
+ v8::String::NewFromUtf8(info.GetIsolate(), "c"));
info.GetReturnValue().Set(result);
}
static void IndexedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
- v8::Handle<v8::Array> result = v8::Array::New(2);
- result->Set(v8::Integer::New(0), v8::Number::New(1));
- result->Set(v8::Integer::New(1), v8::Number::New(10));
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Handle<v8::Array> result = v8::Array::New(isolate, 2);
+ result->Set(v8::Integer::New(isolate, 0), v8::Number::New(isolate, 1));
+ result->Set(v8::Integer::New(isolate, 1), v8::Number::New(isolate, 10));
info.GetReturnValue().Set(result);
}
@@ -4282,13 +4325,13 @@ static void NamedGetter(v8::Local<v8::String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
v8::String::Utf8Value n(name);
if (strcmp(*n, "a") == 0) {
- info.GetReturnValue().Set(v8::String::New("AA"));
+ info.GetReturnValue().Set(v8::String::NewFromUtf8(info.GetIsolate(), "AA"));
return;
} else if (strcmp(*n, "b") == 0) {
- info.GetReturnValue().Set(v8::String::New("BB"));
+ info.GetReturnValue().Set(v8::String::NewFromUtf8(info.GetIsolate(), "BB"));
return;
} else if (strcmp(*n, "c") == 0) {
- info.GetReturnValue().Set(v8::String::New("CC"));
+ info.GetReturnValue().Set(v8::String::NewFromUtf8(info.GetIsolate(), "CC"));
return;
} else {
info.GetReturnValue().SetUndefined();
@@ -4307,30 +4350,35 @@ static void IndexedGetter(uint32_t index,
TEST(InterceptorPropertyMirror) {
// Create a V8 environment with debug access.
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
env.ExposeDebug();
// Create object with named interceptor.
- v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
named->SetNamedPropertyHandler(NamedGetter, NULL, NULL, NULL, NamedEnum);
- env->Global()->Set(v8::String::New("intercepted_named"),
- named->NewInstance());
+ env->Global()->Set(
+ v8::String::NewFromUtf8(isolate, "intercepted_named"),
+ named->NewInstance());
// Create object with indexed interceptor.
- v8::Handle<v8::ObjectTemplate> indexed = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> indexed = v8::ObjectTemplate::New(isolate);
indexed->SetIndexedPropertyHandler(IndexedGetter,
NULL,
NULL,
NULL,
IndexedEnum);
- env->Global()->Set(v8::String::New("intercepted_indexed"),
- indexed->NewInstance());
+ env->Global()->Set(
+ v8::String::NewFromUtf8(isolate, "intercepted_indexed"),
+ indexed->NewInstance());
// Create object with both named and indexed interceptor.
- v8::Handle<v8::ObjectTemplate> both = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> both = v8::ObjectTemplate::New(isolate);
both->SetNamedPropertyHandler(NamedGetter, NULL, NULL, NULL, NamedEnum);
both->SetIndexedPropertyHandler(IndexedGetter, NULL, NULL, NULL, IndexedEnum);
- env->Global()->Set(v8::String::New("intercepted_both"), both->NewInstance());
+ env->Global()->Set(
+ v8::String::NewFromUtf8(isolate, "intercepted_both"),
+ both->NewInstance());
// Get mirrors for the three objects with interceptor.
CompileRun(
@@ -4438,29 +4486,34 @@ TEST(InterceptorPropertyMirror) {
TEST(HiddenPrototypePropertyMirror) {
// Create a V8 environment with debug access.
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
env.ExposeDebug();
- v8::Handle<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New();
- t0->InstanceTemplate()->Set(v8::String::New("x"), v8::Number::New(0));
- v8::Handle<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
+ t0->InstanceTemplate()->Set(v8::String::NewFromUtf8(isolate, "x"),
+ v8::Number::New(isolate, 0));
+ v8::Handle<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
t1->SetHiddenPrototype(true);
- t1->InstanceTemplate()->Set(v8::String::New("y"), v8::Number::New(1));
- v8::Handle<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New();
+ t1->InstanceTemplate()->Set(v8::String::NewFromUtf8(isolate, "y"),
+ v8::Number::New(isolate, 1));
+ v8::Handle<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(isolate);
t2->SetHiddenPrototype(true);
- t2->InstanceTemplate()->Set(v8::String::New("z"), v8::Number::New(2));
- v8::Handle<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New();
- t3->InstanceTemplate()->Set(v8::String::New("u"), v8::Number::New(3));
+ t2->InstanceTemplate()->Set(v8::String::NewFromUtf8(isolate, "z"),
+ v8::Number::New(isolate, 2));
+ v8::Handle<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(isolate);
+ t3->InstanceTemplate()->Set(v8::String::NewFromUtf8(isolate, "u"),
+ v8::Number::New(isolate, 3));
// Create object and set them on the global object.
v8::Handle<v8::Object> o0 = t0->GetFunction()->NewInstance();
- env->Global()->Set(v8::String::New("o0"), o0);
+ env->Global()->Set(v8::String::NewFromUtf8(isolate, "o0"), o0);
v8::Handle<v8::Object> o1 = t1->GetFunction()->NewInstance();
- env->Global()->Set(v8::String::New("o1"), o1);
+ env->Global()->Set(v8::String::NewFromUtf8(isolate, "o1"), o1);
v8::Handle<v8::Object> o2 = t2->GetFunction()->NewInstance();
- env->Global()->Set(v8::String::New("o2"), o2);
+ env->Global()->Set(v8::String::NewFromUtf8(isolate, "o2"), o2);
v8::Handle<v8::Object> o3 = t3->GetFunction()->NewInstance();
- env->Global()->Set(v8::String::New("o3"), o3);
+ env->Global()->Set(v8::String::NewFromUtf8(isolate, "o3"), o3);
// Get mirrors for the four objects.
CompileRun(
@@ -4485,7 +4538,7 @@ TEST(HiddenPrototypePropertyMirror) {
// Set o1 as prototype for o0. o1 has the hidden prototype flag so all
// properties on o1 should be seen on o0.
- o0->Set(v8::String::New("__proto__"), o1);
+ o0->Set(v8::String::NewFromUtf8(isolate, "__proto__"), o1);
CHECK_EQ(2, CompileRun(
"o0_mirror.propertyNames().length")->Int32Value());
CHECK_EQ(0, CompileRun(
@@ -4496,7 +4549,7 @@ TEST(HiddenPrototypePropertyMirror) {
// Set o2 as prototype for o0 (it will end up after o1 as o1 has the hidden
// prototype flag. o2 also has the hidden prototype flag so all properties
// on o2 should be seen on o0 as well as properties on o1.
- o0->Set(v8::String::New("__proto__"), o2);
+ o0->Set(v8::String::NewFromUtf8(isolate, "__proto__"), o2);
CHECK_EQ(3, CompileRun(
"o0_mirror.propertyNames().length")->Int32Value());
CHECK_EQ(0, CompileRun(
@@ -4512,7 +4565,7 @@ TEST(HiddenPrototypePropertyMirror) {
// from o1 and o2 should still be seen on o0.
// Final prototype chain: o0 -> o1 -> o2 -> o3
// Hidden prototypes: ^^ ^^
- o0->Set(v8::String::New("__proto__"), o3);
+ o0->Set(v8::String::NewFromUtf8(isolate, "__proto__"), o3);
CHECK_EQ(3, CompileRun(
"o0_mirror.propertyNames().length")->Int32Value());
CHECK_EQ(1, CompileRun(
@@ -4540,17 +4593,19 @@ static void ProtperyXNativeGetter(
TEST(NativeGetterPropertyMirror) {
// Create a V8 environment with debug access.
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
env.ExposeDebug();
- v8::Handle<v8::String> name = v8::String::New("x");
+ v8::Handle<v8::String> name = v8::String::NewFromUtf8(isolate, "x");
// Create object with named accessor.
- v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
named->SetAccessor(name, &ProtperyXNativeGetter, NULL,
v8::Handle<v8::Value>(), v8::DEFAULT, v8::None);
// Create object with named property getter.
- env->Global()->Set(v8::String::New("instance"), named->NewInstance());
+ env->Global()->Set(v8::String::NewFromUtf8(isolate, "instance"),
+ named->NewInstance());
CHECK_EQ(10, CompileRun("instance.x")->Int32Value());
// Get mirror for the object with property getter.
@@ -4578,17 +4633,19 @@ static void ProtperyXNativeGetterThrowingError(
TEST(NativeGetterThrowingErrorPropertyMirror) {
// Create a V8 environment with debug access.
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
env.ExposeDebug();
- v8::Handle<v8::String> name = v8::String::New("x");
+ v8::Handle<v8::String> name = v8::String::NewFromUtf8(isolate, "x");
// Create object with named accessor.
- v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
named->SetAccessor(name, &ProtperyXNativeGetterThrowingError, NULL,
v8::Handle<v8::Value>(), v8::DEFAULT, v8::None);
// Create object with named property getter.
- env->Global()->Set(v8::String::New("instance"), named->NewInstance());
+ env->Global()->Set(v8::String::NewFromUtf8(isolate, "instance"),
+ named->NewInstance());
// Get mirror for the object with property getter.
CompileRun("var instance_mirror = debug.MakeMirror(instance);");
@@ -4613,17 +4670,20 @@ TEST(NativeGetterThrowingErrorPropertyMirror) {
TEST(NoHiddenProperties) {
// Create a V8 environment with debug access.
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
env.ExposeDebug();
// Create an object in the global scope.
const char* source = "var obj = {a: 1};";
- v8::Script::Compile(v8::String::New(source))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate, source))
+ ->Run();
v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast(
- env->Global()->Get(v8::String::New("obj")));
+ env->Global()->Get(v8::String::NewFromUtf8(isolate, "obj")));
// Set a hidden property on the object.
- obj->SetHiddenValue(v8::String::New("v8::test-debug::a"),
- v8::Int32::New(11));
+ obj->SetHiddenValue(
+ v8::String::NewFromUtf8(isolate, "v8::test-debug::a"),
+ v8::Int32::New(isolate, 11));
// Get mirror for the object with property getter.
CompileRun("var obj_mirror = debug.MakeMirror(obj);");
@@ -4639,26 +4699,34 @@ TEST(NoHiddenProperties) {
"obj_mirror.property('a').value().value() == 1")->BooleanValue());
// Object created by t0 will become hidden prototype of object 'obj'.
- v8::Handle<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New();
- t0->InstanceTemplate()->Set(v8::String::New("b"), v8::Number::New(2));
+ v8::Handle<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
+ t0->InstanceTemplate()->Set(v8::String::NewFromUtf8(isolate, "b"),
+ v8::Number::New(isolate, 2));
t0->SetHiddenPrototype(true);
- v8::Handle<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
- t1->InstanceTemplate()->Set(v8::String::New("c"), v8::Number::New(3));
+ v8::Handle<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
+ t1->InstanceTemplate()->Set(v8::String::NewFromUtf8(isolate, "c"),
+ v8::Number::New(isolate, 3));
// Create proto objects, add hidden properties to them and set them on
// the global object.
v8::Handle<v8::Object> protoObj = t0->GetFunction()->NewInstance();
- protoObj->SetHiddenValue(v8::String::New("v8::test-debug::b"),
- v8::Int32::New(12));
- env->Global()->Set(v8::String::New("protoObj"), protoObj);
+ protoObj->SetHiddenValue(
+ v8::String::NewFromUtf8(isolate, "v8::test-debug::b"),
+ v8::Int32::New(isolate, 12));
+ env->Global()->Set(v8::String::NewFromUtf8(isolate, "protoObj"),
+ protoObj);
v8::Handle<v8::Object> grandProtoObj = t1->GetFunction()->NewInstance();
- grandProtoObj->SetHiddenValue(v8::String::New("v8::test-debug::c"),
- v8::Int32::New(13));
- env->Global()->Set(v8::String::New("grandProtoObj"), grandProtoObj);
+ grandProtoObj->SetHiddenValue(
+ v8::String::NewFromUtf8(isolate, "v8::test-debug::c"),
+ v8::Int32::New(isolate, 13));
+ env->Global()->Set(
+ v8::String::NewFromUtf8(isolate, "grandProtoObj"),
+ grandProtoObj);
// Setting prototypes: obj->protoObj->grandProtoObj
- protoObj->Set(v8::String::New("__proto__"), grandProtoObj);
- obj->Set(v8::String::New("__proto__"), protoObj);
+ protoObj->Set(v8::String::NewFromUtf8(isolate, "__proto__"),
+ grandProtoObj);
+ obj->Set(v8::String::NewFromUtf8(isolate, "__proto__"), protoObj);
// Get mirror for the object with property getter.
CompileRun("var obj_mirror = debug.MakeMirror(obj);");
@@ -4844,8 +4912,8 @@ class MessageQueueDebuggerThread : public v8::internal::Thread {
static void MessageHandler(const v8::Debug::Message& message) {
v8::Handle<v8::String> json = message.GetJSON();
- v8::String::AsciiValue ascii(json);
- if (IsBreakEventMessage(*ascii)) {
+ v8::String::Utf8Value utf8(json);
+ if (IsBreakEventMessage(*utf8)) {
// Lets test script wait until break occurs to send commands.
// Signals when a break is reported.
message_queue_barriers.semaphore_2.Signal();
@@ -5146,14 +5214,17 @@ void V8Thread::Run() {
"\n"
"foo();\n";
- v8::Isolate::Scope isolate_scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Isolate::Scope isolate_scope(isolate);
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::Debug::SetMessageHandler2(&ThreadedMessageHandler);
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
- global_template->Set(v8::String::New("ThreadedAtBarrier1"),
- v8::FunctionTemplate::New(ThreadedAtBarrier1));
- v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate(),
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(env->GetIsolate());
+ global_template->Set(
+ v8::String::NewFromUtf8(env->GetIsolate(), "ThreadedAtBarrier1"),
+ v8::FunctionTemplate::New(isolate, ThreadedAtBarrier1));
+ v8::Handle<v8::Context> context = v8::Context::New(isolate,
NULL,
global_template);
v8::Context::Scope context_scope(context);
@@ -5484,7 +5555,8 @@ static void CheckSourceLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
// can throw exceptions.
static void CheckDataParameter(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::Handle<v8::String> data = v8::String::New("Test");
+ v8::Handle<v8::String> data =
+ v8::String::NewFromUtf8(args.GetIsolate(), "Test");
CHECK(v8::Debug::Call(debugger_call_with_data, data)->IsString());
CHECK(v8::Debug::Call(debugger_call_with_data).IsEmpty());
@@ -5508,75 +5580,97 @@ static void CheckClosure(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(CallFunctionInDebugger) {
// Create and enter a context with the functions CheckFrameCount,
// CheckSourceLine and CheckDataParameter installed.
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
- global_template->Set(v8::String::New("CheckFrameCount"),
- v8::FunctionTemplate::New(CheckFrameCount));
- global_template->Set(v8::String::New("CheckSourceLine"),
- v8::FunctionTemplate::New(CheckSourceLine));
- global_template->Set(v8::String::New("CheckDataParameter"),
- v8::FunctionTemplate::New(CheckDataParameter));
- global_template->Set(v8::String::New("CheckClosure"),
- v8::FunctionTemplate::New(CheckClosure));
- v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate(),
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
+ global_template->Set(
+ v8::String::NewFromUtf8(isolate, "CheckFrameCount"),
+ v8::FunctionTemplate::New(isolate, CheckFrameCount));
+ global_template->Set(
+ v8::String::NewFromUtf8(isolate, "CheckSourceLine"),
+ v8::FunctionTemplate::New(isolate, CheckSourceLine));
+ global_template->Set(
+ v8::String::NewFromUtf8(isolate, "CheckDataParameter"),
+ v8::FunctionTemplate::New(isolate, CheckDataParameter));
+ global_template->Set(
+ v8::String::NewFromUtf8(isolate, "CheckClosure"),
+ v8::FunctionTemplate::New(isolate, CheckClosure));
+ v8::Handle<v8::Context> context = v8::Context::New(isolate,
NULL,
global_template);
v8::Context::Scope context_scope(context);
// Compile a function for checking the number of JavaScript frames.
- v8::Script::Compile(v8::String::New(frame_count_source))->Run();
- frame_count = v8::Local<v8::Function>::Cast(
- context->Global()->Get(v8::String::New("frame_count")));
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(isolate, frame_count_source))->Run();
+ frame_count = v8::Local<v8::Function>::Cast(context->Global()->Get(
+ v8::String::NewFromUtf8(isolate, "frame_count")));
// Compile a function for returning the source line for the top frame.
- v8::Script::Compile(v8::String::New(frame_source_line_source))->Run();
- frame_source_line = v8::Local<v8::Function>::Cast(
- context->Global()->Get(v8::String::New("frame_source_line")));
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate,
+ frame_source_line_source))->Run();
+ frame_source_line = v8::Local<v8::Function>::Cast(context->Global()->Get(
+ v8::String::NewFromUtf8(isolate, "frame_source_line")));
// Compile a function returning the data parameter.
- v8::Script::Compile(v8::String::New(debugger_call_with_data_source))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate,
+ debugger_call_with_data_source))
+ ->Run();
debugger_call_with_data = v8::Local<v8::Function>::Cast(
- context->Global()->Get(v8::String::New("debugger_call_with_data")));
+ context->Global()->Get(v8::String::NewFromUtf8(
+ isolate, "debugger_call_with_data")));
// Compile a function capturing closure.
- debugger_call_with_closure = v8::Local<v8::Function>::Cast(
- v8::Script::Compile(
- v8::String::New(debugger_call_with_closure_source))->Run());
+ debugger_call_with_closure =
+ v8::Local<v8::Function>::Cast(v8::Script::Compile(
+ v8::String::NewFromUtf8(isolate,
+ debugger_call_with_closure_source))->Run());
// Calling a function through the debugger returns 0 frames if there are
// no JavaScript frames.
- CHECK_EQ(v8::Integer::New(0), v8::Debug::Call(frame_count));
+ CHECK_EQ(v8::Integer::New(isolate, 0),
+ v8::Debug::Call(frame_count));
// Test that the number of frames can be retrieved.
- v8::Script::Compile(v8::String::New("CheckFrameCount(1)"))->Run();
- v8::Script::Compile(v8::String::New("function f() {"
- " CheckFrameCount(2);"
- "}; f()"))->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(isolate, "CheckFrameCount(1)"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate,
+ "function f() {"
+ " CheckFrameCount(2);"
+ "}; f()"))->Run();
// Test that the source line can be retrieved.
- v8::Script::Compile(v8::String::New("CheckSourceLine(0)"))->Run();
- v8::Script::Compile(v8::String::New("function f() {\n"
- " CheckSourceLine(1)\n"
- " CheckSourceLine(2)\n"
- " CheckSourceLine(3)\n"
- "}; f()"))->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(isolate, "CheckSourceLine(0)"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate,
+ "function f() {\n"
+ " CheckSourceLine(1)\n"
+ " CheckSourceLine(2)\n"
+ " CheckSourceLine(3)\n"
+ "}; f()"))->Run();
// Test that a parameter can be passed to a function called in the debugger.
- v8::Script::Compile(v8::String::New("CheckDataParameter()"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate,
+ "CheckDataParameter()"))->Run();
// Test that a function with closure can be run in the debugger.
- v8::Script::Compile(v8::String::New("CheckClosure()"))->Run();
-
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(isolate, "CheckClosure()"))->Run();
// Test that the source line is correct when there is a line offset.
- v8::ScriptOrigin origin(v8::String::New("test"),
- v8::Integer::New(7));
- v8::Script::Compile(v8::String::New("CheckSourceLine(7)"), &origin)->Run();
- v8::Script::Compile(v8::String::New("function f() {\n"
- " CheckSourceLine(8)\n"
- " CheckSourceLine(9)\n"
- " CheckSourceLine(10)\n"
- "}; f()"), &origin)->Run();
+ v8::ScriptOrigin origin(v8::String::NewFromUtf8(isolate, "test"),
+ v8::Integer::New(isolate, 7));
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(isolate, "CheckSourceLine(7)"), &origin)
+ ->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate,
+ "function f() {\n"
+ " CheckSourceLine(8)\n"
+ " CheckSourceLine(9)\n"
+ " CheckSourceLine(10)\n"
+ "}; f()"),
+ &origin)->Run();
}
@@ -5639,7 +5733,7 @@ TEST(DebuggerUnload) {
// Get the test functions again.
v8::Local<v8::Function> foo(v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("foo"))));
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo"))));
foo->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
@@ -5989,7 +6083,7 @@ class DebuggerAgentProtocolServerThread : public i::Thread {
void Run();
void WaitForListening() { listening_.Wait(); }
- char* body() { return *body_; }
+ char* body() { return body_.get(); }
private:
int port_;
@@ -6113,7 +6207,8 @@ TEST(DebugGetLoadedScripts) {
env.ExposeDebug();
EmptyExternalStringResource source_ext_str;
- v8::Local<v8::String> source = v8::String::NewExternal(&source_ext_str);
+ v8::Local<v8::String> source =
+ v8::String::NewExternal(env->GetIsolate(), &source_ext_str);
v8::Handle<v8::Script> evil_script(v8::Script::Compile(source));
// "use" evil_script to make the compiler happy.
(void) evil_script;
@@ -6135,7 +6230,11 @@ TEST(DebugGetLoadedScripts) {
i::FLAG_allow_natives_syntax = allow_natives_syntax;
// Some scripts are retrieved - at least the number of native scripts.
- CHECK_GT((*env)->Global()->Get(v8::String::New("count"))->Int32Value(), 8);
+ CHECK_GT((*env)
+ ->Global()
+ ->Get(v8::String::NewFromUtf8(env->GetIsolate(), "count"))
+ ->Int32Value(),
+ 8);
}
@@ -6160,17 +6259,19 @@ TEST(ScriptNameAndData) {
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
// Test function source.
- v8::Local<v8::String> script = v8::String::New(
- "function f() {\n"
- " debugger;\n"
- "}\n");
+ v8::Local<v8::String> script = v8::String::NewFromUtf8(env->GetIsolate(),
+ "function f() {\n"
+ " debugger;\n"
+ "}\n");
- v8::ScriptOrigin origin1 = v8::ScriptOrigin(v8::String::New("name"));
+ v8::ScriptOrigin origin1 =
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "name"));
v8::Handle<v8::Script> script1 = v8::Script::Compile(script, &origin1);
- script1->SetData(v8::String::New("data"));
+ script1->SetData(v8::String::NewFromUtf8(env->GetIsolate(), "data"));
script1->Run();
v8::Local<v8::Function> f;
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
@@ -6180,34 +6281,39 @@ TEST(ScriptNameAndData) {
// Compile the same script again without setting data. As the compilation
// cache is disabled when debugging expect the data to be missing.
v8::Script::Compile(script, &origin1)->Run();
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
CHECK_EQ(2, break_point_hit_count);
CHECK_EQ("name", last_script_name_hit);
CHECK_EQ("", last_script_data_hit); // Undefined results in empty string.
- v8::Local<v8::String> data_obj_source = v8::String::New(
- "({ a: 'abc',\n"
- " b: 123,\n"
- " toString: function() { return this.a + ' ' + this.b; }\n"
- "})\n");
+ v8::Local<v8::String> data_obj_source = v8::String::NewFromUtf8(
+ env->GetIsolate(),
+ "({ a: 'abc',\n"
+ " b: 123,\n"
+ " toString: function() { return this.a + ' ' + this.b; }\n"
+ "})\n");
v8::Local<v8::Value> data_obj = v8::Script::Compile(data_obj_source)->Run();
- v8::ScriptOrigin origin2 = v8::ScriptOrigin(v8::String::New("new name"));
+ v8::ScriptOrigin origin2 =
+ v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "new name"));
v8::Handle<v8::Script> script2 = v8::Script::Compile(script, &origin2);
script2->Run();
script2->SetData(data_obj->ToString());
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
CHECK_EQ(3, break_point_hit_count);
CHECK_EQ("new name", last_script_name_hit);
CHECK_EQ("abc 123", last_script_data_hit);
- v8::Handle<v8::Script> script3 =
- v8::Script::Compile(script, &origin2, NULL,
- v8::String::New("in compile"));
+ v8::Handle<v8::Script> script3 = v8::Script::Compile(
+ script, &origin2, NULL,
+ v8::String::NewFromUtf8(env->GetIsolate(), "in compile"));
CHECK_EQ("in compile", last_script_data_hit);
script3->Run();
- f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
CHECK_EQ(4, break_point_hit_count);
CHECK_EQ("in compile", last_script_data_hit);
@@ -6259,8 +6365,8 @@ TEST(ContextData) {
CHECK(context_2->GetEmbedderData(0)->IsUndefined());
// Set and check different data values.
- v8::Handle<v8::String> data_1 = v8::String::New("1");
- v8::Handle<v8::String> data_2 = v8::String::New("2");
+ v8::Handle<v8::String> data_1 = v8::String::NewFromUtf8(isolate, "1");
+ v8::Handle<v8::String> data_2 = v8::String::NewFromUtf8(isolate, "2");
context_1->SetEmbedderData(0, data_1);
context_2->SetEmbedderData(0, data_2);
CHECK(context_1->GetEmbedderData(0)->StrictEquals(data_1));
@@ -6274,7 +6380,7 @@ TEST(ContextData) {
v8::Context::Scope context_scope(context_1);
expected_context = context_1;
expected_context_data = data_1;
- v8::Local<v8::Function> f = CompileFunction(source, "f");
+ v8::Local<v8::Function> f = CompileFunction(isolate, source, "f");
f->Call(context_1->Global(), 0, NULL);
}
@@ -6284,7 +6390,7 @@ TEST(ContextData) {
v8::Context::Scope context_scope(context_2);
expected_context = context_2;
expected_context_data = data_2;
- v8::Local<v8::Function> f = CompileFunction(source, "f");
+ v8::Local<v8::Function> f = CompileFunction(isolate, source, "f");
f->Call(context_2->Global(), 0, NULL);
}
@@ -6325,10 +6431,10 @@ TEST(DebugBreakInMessageHandler) {
// Test functions.
const char* script = "function f() { debugger; g(); } function g() { }";
CompileRun(script);
- v8::Local<v8::Function> f =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
- v8::Local<v8::Function> g =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
// Call f then g. The debugger statement in f will casue a break which will
// cause another break.
@@ -6355,7 +6461,9 @@ static void DebugEventDebugBreak(
if (!frame_function_name.IsEmpty()) {
// Get the name of the function.
const int argc = 2;
- v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(0) };
+ v8::Handle<v8::Value> argv[argc] = {
+ exec_state, v8::Integer::New(CcTest::isolate(), 0)
+ };
v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
argc, argv);
if (result->IsUndefined()) {
@@ -6391,9 +6499,10 @@ TEST(RegExpDebugBreak) {
"var sourceLineBeginningSkip = /^(?:[ \\v\\h]*(?:\\/\\*.*?\\*\\/)*)*/;\n"
"function f(s) { return s.match(sourceLineBeginningSkip)[0].length; }";
- v8::Local<v8::Function> f = CompileFunction(script, "f");
+ v8::Local<v8::Function> f = CompileFunction(env->GetIsolate(), script, "f");
const int argc = 1;
- v8::Handle<v8::Value> argv[argc] = { v8::String::New(" /* xxx */ a=0;") };
+ v8::Handle<v8::Value> argv[argc] = {
+ v8::String::NewFromUtf8(env->GetIsolate(), " /* xxx */ a=0;")};
v8::Local<v8::Value> result = f->Call(env->Global(), argc, argv);
CHECK_EQ(12, result->Int32Value());
@@ -6425,7 +6534,8 @@ static void ExecuteScriptForContextCheck(
CHECK(context_1->GetEmbedderData(0)->IsUndefined());
// Set and check a data value.
- v8::Handle<v8::String> data_1 = v8::String::New("1");
+ v8::Handle<v8::String> data_1 =
+ v8::String::NewFromUtf8(CcTest::isolate(), "1");
context_1->SetEmbedderData(0, data_1);
CHECK(context_1->GetEmbedderData(0)->StrictEquals(data_1));
@@ -6437,7 +6547,7 @@ static void ExecuteScriptForContextCheck(
v8::Context::Scope context_scope(context_1);
expected_context = context_1;
expected_context_data = data_1;
- v8::Local<v8::Function> f = CompileFunction(source, "f");
+ v8::Local<v8::Function> f = CompileFunction(CcTest::isolate(), source, "f");
f->Call(context_1->Global(), 0, NULL);
}
@@ -6558,8 +6668,10 @@ TEST(ScriptCollectedEvent) {
script_collected_count = 0;
v8::Debug::SetDebugEventListener2(DebugEventScriptCollectedEvent);
{
- v8::Script::Compile(v8::String::New("eval('a=1')"))->Run();
- v8::Script::Compile(v8::String::New("eval('a=2')"))->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), "eval('a=1')"))->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), "eval('a=2')"))->Run();
}
// Do garbage collection to collect the script above which is no longer
@@ -6618,8 +6730,8 @@ TEST(ScriptCollectedEventContext) {
CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
v8::Debug::SetMessageHandler2(ScriptCollectedMessageHandler);
- v8::Script::Compile(v8::String::New("eval('a=1')"))->Run();
- v8::Script::Compile(v8::String::New("eval('a=2')"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate, "eval('a=1')"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(isolate, "eval('a=2')"))->Run();
// Leave context
{
@@ -6628,7 +6740,7 @@ TEST(ScriptCollectedEventContext) {
v8::Local<v8::Context>::New(isolate, context);
local_context->Exit();
}
- context.Dispose();
+ context.Reset();
// Do garbage collection to collect the script above which is no longer
// referenced.
@@ -6663,12 +6775,14 @@ TEST(AfterCompileMessageWhenMessageHandlerIsReset) {
const char* script = "var a=1";
v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
- v8::Script::Compile(v8::String::New(script))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), script))
+ ->Run();
v8::Debug::SetMessageHandler2(NULL);
v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
v8::Debug::DebugBreak(env->GetIsolate());
- v8::Script::Compile(v8::String::New(script))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), script))
+ ->Run();
// Setting listener to NULL should cause debugger unload.
v8::Debug::SetMessageHandler2(NULL);
@@ -6687,13 +6801,14 @@ TEST(BreakMessageWhenMessageHandlerIsReset) {
const char* script = "function f() {};";
v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
- v8::Script::Compile(v8::String::New(script))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), script))
+ ->Run();
v8::Debug::SetMessageHandler2(NULL);
v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
v8::Debug::DebugBreak(env->GetIsolate());
- v8::Local<v8::Function> f =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
// Setting message handler to NULL should cause debugger unload.
@@ -6726,12 +6841,13 @@ TEST(ExceptionMessageWhenMessageHandlerIsReset) {
const char* script = "function f() {throw new Error()};";
v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
- v8::Script::Compile(v8::String::New(script))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), script))
+ ->Run();
v8::Debug::SetMessageHandler2(NULL);
v8::Debug::SetMessageHandler2(ExceptionMessageHandler);
- v8::Local<v8::Function> f =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
// Setting message handler to NULL should cause debugger unload.
@@ -6753,28 +6869,30 @@ TEST(ProvisionalBreakpointOnLineOutOfRange) {
// Set a couple of provisional breakpoint on lines out of the script lines
// range.
- int sbp1 = SetScriptBreakPointByNameFromJS(resource_name, 3,
- -1 /* no column */);
- int sbp2 = SetScriptBreakPointByNameFromJS(resource_name, 5, 5);
+ int sbp1 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), resource_name,
+ 3, -1 /* no column */);
+ int sbp2 =
+ SetScriptBreakPointByNameFromJS(env->GetIsolate(), resource_name, 5, 5);
after_compile_message_count = 0;
v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
v8::ScriptOrigin origin(
- v8::String::New(resource_name),
- v8::Integer::New(10),
- v8::Integer::New(1));
+ v8::String::NewFromUtf8(env->GetIsolate(), resource_name),
+ v8::Integer::New(env->GetIsolate(), 10),
+ v8::Integer::New(env->GetIsolate(), 1));
// Compile a script whose first line number is greater than the breakpoints'
// lines.
- v8::Script::Compile(v8::String::New(script), &origin)->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), script),
+ &origin)->Run();
// If the script is compiled successfully there is exactly one after compile
// event. In case of an exception in debugger code after compile event is not
// sent.
CHECK_EQ(1, after_compile_message_count);
- ClearBreakPointFromJS(sbp1);
- ClearBreakPointFromJS(sbp2);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp1);
+ ClearBreakPointFromJS(env->GetIsolate(), sbp2);
v8::Debug::SetMessageHandler2(NULL);
}
@@ -6932,7 +7050,8 @@ TEST(Backtrace) {
v8::Debug::ProcessDebugMessages();
CHECK_EQ(BacktraceData::frame_counter, 0);
- v8::Handle<v8::String> void0 = v8::String::New("void(0)");
+ v8::Handle<v8::String> void0 =
+ v8::String::NewFromUtf8(env->GetIsolate(), "void(0)");
v8::Handle<v8::Script> script = v8::Script::Compile(void0, void0);
// Check backtrace from "void(0)" script.
@@ -6954,10 +7073,12 @@ TEST(Backtrace) {
TEST(GetMirror) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<v8::Value> obj = v8::Debug::GetMirror(v8::String::New("hodja"));
- v8::Handle<v8::Function> run_test = v8::Handle<v8::Function>::Cast(
- v8::Script::New(
- v8::String::New(
+ v8::Handle<v8::Value> obj =
+ v8::Debug::GetMirror(v8::String::NewFromUtf8(env->GetIsolate(), "hodja"));
+ v8::Handle<v8::Function> run_test =
+ v8::Handle<v8::Function>::Cast(v8::Script::New(
+ v8::String::NewFromUtf8(
+ env->GetIsolate(),
"function runTest(mirror) {"
" return mirror.isString() && (mirror.length() == 5);"
"}"
@@ -7048,7 +7169,8 @@ TEST(CallingContextIsNotDebugContext) {
v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// Create and enter a debugee context.
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
env.ExposeDebug();
// Save handles to the debugger and debugee contexts to be used in
@@ -7057,10 +7179,10 @@ TEST(CallingContextIsNotDebugContext) {
debugger_context = v8::Utils::ToLocal(debug->debug_context());
// Create object with 'a' property accessor.
- v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New();
- named->SetAccessor(v8::String::New("a"),
+ v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
+ named->SetAccessor(v8::String::NewFromUtf8(isolate, "a"),
NamedGetterWithCallingContextCheck);
- env->Global()->Set(v8::String::New("obj"),
+ env->Global()->Set(v8::String::NewFromUtf8(isolate, "obj"),
named->NewInstance());
// Register the debug event listener
@@ -7103,12 +7225,13 @@ static void DebugEventContextChecker(const v8::Debug::EventDetails& details) {
TEST(DebugEventContext) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- expected_callback_data = v8::Int32::New(2010);
expected_context = v8::Context::New(isolate);
+ expected_callback_data = v8::Int32::New(isolate, 2010);
v8::Debug::SetDebugEventListener2(DebugEventContextChecker,
expected_callback_data);
v8::Context::Scope context_scope(expected_context);
- v8::Script::Compile(v8::String::New("(function(){debugger;})();"))->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(isolate, "(function(){debugger;})();"))->Run();
expected_context.Clear();
v8::Debug::SetDebugEventListener2(NULL);
expected_context_data = v8::Handle<v8::Value>();
@@ -7143,7 +7266,9 @@ TEST(DebugEventBreakData) {
was_debug_event_called = false;
was_debug_break_called = false;
v8::Debug::DebugBreakForCommand(NULL, isolate);
- v8::Script::Compile(v8::String::New("(function(x){return x;})(1);"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ "(function(x){return x;})(1);"))
+ ->Run();
CHECK(was_debug_event_called);
CHECK(!was_debug_break_called);
@@ -7152,7 +7277,9 @@ TEST(DebugEventBreakData) {
was_debug_event_called = false;
was_debug_break_called = false;
v8::Debug::DebugBreakForCommand(data1, isolate);
- v8::Script::Compile(v8::String::New("(function(x){return x+1;})(1);"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ "(function(x){return x+1;})(1);"))
+ ->Run();
CHECK(was_debug_event_called);
CHECK(!was_debug_break_called);
@@ -7160,7 +7287,9 @@ TEST(DebugEventBreakData) {
was_debug_event_called = false;
was_debug_break_called = false;
v8::Debug::DebugBreak(isolate);
- v8::Script::Compile(v8::String::New("(function(x){return x+2;})(1);"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ "(function(x){return x+2;})(1);"))
+ ->Run();
CHECK(!was_debug_event_called);
CHECK(was_debug_break_called);
@@ -7170,7 +7299,9 @@ TEST(DebugEventBreakData) {
was_debug_break_called = false;
v8::Debug::DebugBreak(isolate);
v8::Debug::DebugBreakForCommand(data2, isolate);
- v8::Script::Compile(v8::String::New("(function(x){return x+3;})(1);"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ "(function(x){return x+3;})(1);"))
+ ->Run();
CHECK(was_debug_event_called);
CHECK(was_debug_break_called);
@@ -7192,7 +7323,9 @@ static void DebugEventBreakDeoptimize(
if (!frame_function_name.IsEmpty()) {
// Get the name of the function.
const int argc = 2;
- v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(0) };
+ v8::Handle<v8::Value> argv[argc] = {
+ exec_state, v8::Integer::New(CcTest::isolate(), 0)
+ };
v8::Handle<v8::Value> result =
frame_function_name->Call(exec_state, argc, argv);
if (!result->IsUndefined()) {
@@ -7233,11 +7366,13 @@ TEST(DeoptimizeDuringDebugBreak) {
v8::Debug::SetDebugEventListener2(DebugEventBreakDeoptimize);
// Compile and run function bar which will optimize it for some flag settings.
- v8::Script::Compile(v8::String::New("function bar(){}; bar()"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(
+ env->GetIsolate(), "function bar(){}; bar()"))->Run();
// Set debug break and call bar again.
v8::Debug::DebugBreak(env->GetIsolate());
- v8::Script::Compile(v8::String::New("bar()"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "bar()"))
+ ->Run();
CHECK(debug_event_break_deoptimize_done);
@@ -7247,24 +7382,27 @@ TEST(DeoptimizeDuringDebugBreak) {
static void DebugEventBreakWithOptimizedStack(
const v8::Debug::EventDetails& event_details) {
+ v8::Isolate* isolate = event_details.GetEventContext()->GetIsolate();
v8::DebugEvent event = event_details.GetEvent();
v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
if (event == v8::Break) {
if (!frame_function_name.IsEmpty()) {
for (int i = 0; i < 2; i++) {
const int argc = 2;
- v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(i) };
+ v8::Handle<v8::Value> argv[argc] = {
+ exec_state, v8::Integer::New(isolate, i)
+ };
// Get the name of the function in frame i.
v8::Handle<v8::Value> result =
frame_function_name->Call(exec_state, argc, argv);
CHECK(result->IsString());
v8::Handle<v8::String> function_name(result->ToString());
- CHECK(function_name->Equals(v8::String::New("loop")));
+ CHECK(function_name->Equals(v8::String::NewFromUtf8(isolate, "loop")));
// Get the name of the first argument in frame i.
result = frame_argument_name->Call(exec_state, argc, argv);
CHECK(result->IsString());
v8::Handle<v8::String> argument_name(result->ToString());
- CHECK(argument_name->Equals(v8::String::New("count")));
+ CHECK(argument_name->Equals(v8::String::NewFromUtf8(isolate, "count")));
// Get the value of the first argument in frame i. If the
// funtion is optimized the value will be undefined, otherwise
// the value will be '1 - i'.
@@ -7277,7 +7415,7 @@ static void DebugEventBreakWithOptimizedStack(
result = frame_local_name->Call(exec_state, argc, argv);
CHECK(result->IsString());
v8::Handle<v8::String> local_name(result->ToString());
- CHECK(local_name->Equals(v8::String::New("local")));
+ CHECK(local_name->Equals(v8::String::NewFromUtf8(isolate, "local")));
// Get the value of the first local variable. If the function
// is optimized the value will be undefined, otherwise it will
// be 42.
@@ -7315,7 +7453,7 @@ TEST(DebugBreakStackInspection) {
CompileFunction(&env, frame_local_value_source, "frame_local_value");
v8::Handle<v8::FunctionTemplate> schedule_break_template =
- v8::FunctionTemplate::New(ScheduleBreak);
+ v8::FunctionTemplate::New(env->GetIsolate(), ScheduleBreak);
v8::Handle<v8::Function> schedule_break =
schedule_break_template->GetFunction();
env->Global()->Set(v8_str("scheduleBreak"), schedule_break);
@@ -7326,7 +7464,7 @@ TEST(DebugBreakStackInspection) {
" if (count < 1) { scheduleBreak(); loop(count + 1); }"
"}"
"loop(0);";
- v8::Script::Compile(v8::String::New(src))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), src))->Run();
}
@@ -7340,9 +7478,9 @@ static void TestDebugBreakInLoop(const char* loop_head,
for (int i = 0; loop_bodies[i] != NULL; i++) {
// Perform a lazy deoptimization after various numbers of breaks
// have been hit.
- for (int j = 0; j < 11; j++) {
+ for (int j = 0; j < 7; j++) {
break_point_hit_count_deoptimize = j;
- if (j == 10) {
+ if (j == 6) {
break_point_hit_count_deoptimize = kBreaksPerTest;
}
@@ -7470,7 +7608,8 @@ TEST(DebugBreakInline) {
"%OptimizeFunctionOnNextCall(g); \n"
"g(true);";
v8::Debug::SetDebugEventListener2(DebugBreakInlineListener);
- inline_script = v8::Script::Compile(v8::String::New(source));
+ inline_script =
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), source));
inline_script->Run();
}
@@ -7556,7 +7695,7 @@ TEST(LiveEditDisabled) {
v8::internal::FLAG_allow_natives_syntax = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetLiveEditEnabled(false), env->GetIsolate();
+ v8::Debug::SetLiveEditEnabled(false, env->GetIsolate());
CompileRun("%LiveEditCompareStrings('', '')");
}
diff --git a/deps/v8/test/cctest/test-declarative-accessors.cc b/deps/v8/test/cctest/test-declarative-accessors.cc
index fb22ccdbab..f2169a9fb8 100644
--- a/deps/v8/test/cctest/test-declarative-accessors.cc
+++ b/deps/v8/test/cctest/test-declarative-accessors.cc
@@ -42,8 +42,7 @@ class HandleArray : public Malloced {
void Reset() {
for (unsigned i = 0; i < kArraySize; i++) {
if (handles_[i].IsEmpty()) continue;
- handles_[i].Dispose();
- handles_[i].Clear();
+ handles_[i].Reset();
}
}
v8::Persistent<v8::Value> handles_[kArraySize];
@@ -96,7 +95,8 @@ static v8::Local<v8::ObjectTemplate> CreateConstructor(
const char* descriptor_name = NULL,
v8::Handle<v8::DeclaredAccessorDescriptor> descriptor =
v8::Handle<v8::DeclaredAccessorDescriptor>()) {
- v8::Local<v8::FunctionTemplate> constructor = v8::FunctionTemplate::New();
+ v8::Local<v8::FunctionTemplate> constructor =
+ v8::FunctionTemplate::New(context->GetIsolate());
v8::Local<v8::ObjectTemplate> obj_template = constructor->InstanceTemplate();
// Setup object template.
if (descriptor_name != NULL && !descriptor.IsEmpty()) {
@@ -147,17 +147,17 @@ static void VerifyRead(v8::Handle<v8::DeclaredAccessorDescriptor> descriptor,
static v8::Handle<v8::Value> Convert(int32_t value, v8::Isolate* isolate) {
- return v8::Integer::New(value, isolate);
+ return v8::Integer::New(isolate, value);
}
-static v8::Handle<v8::Value> Convert(float value, v8::Isolate*) {
- return v8::Number::New(value);
+static v8::Handle<v8::Value> Convert(float value, v8::Isolate* isolate) {
+ return v8::Number::New(isolate, value);
}
-static v8::Handle<v8::Value> Convert(double value, v8::Isolate*) {
- return v8::Number::New(value);
+static v8::Handle<v8::Value> Convert(double value, v8::Isolate* isolate) {
+ return v8::Number::New(isolate, value);
}
@@ -178,7 +178,7 @@ static void TestPrimitiveValue(
v8::Handle<v8::Value> expected = Convert(value, helper->isolate_);
helper->array_->Reset();
helper->array_->As<T*>()[index] = value;
- VerifyRead(descriptor, internal_field, *helper->array_, expected);
+ VerifyRead(descriptor, internal_field, helper->array_.get(), expected);
}
@@ -224,7 +224,7 @@ static void TestBitmaskCompare(T bitmask,
CHECK(false);
break;
}
- AlignedArray* array = *helper->array_;
+ AlignedArray* array = helper->array_.get();
array->Reset();
VerifyRead(descriptor, internal_field, array, v8::False(helper->isolate_));
array->As<T*>()[index] = compare_value;
@@ -252,7 +252,7 @@ TEST(PointerCompareRead) {
OOD::NewInternalFieldDereference(helper.isolate_, internal_field)
->NewRawShift(helper.isolate_, static_cast<uint16_t>(index*sizeof(ptr)))
->NewPointerCompare(helper.isolate_, ptr);
- AlignedArray* array = *helper.array_;
+ AlignedArray* array = helper.array_.get();
VerifyRead(descriptor, internal_field, array, v8::False(helper.isolate_));
array->As<uintptr_t*>()[index] = reinterpret_cast<uintptr_t>(ptr);
VerifyRead(descriptor, internal_field, array, v8::True(helper.isolate_));
@@ -274,13 +274,15 @@ TEST(PointerDereferenceRead) {
->NewRawShift(helper.isolate_,
static_cast<uint16_t>(second_index*sizeof(int16_t)))
->NewPrimitiveValue(helper.isolate_, v8::kDescriptorInt16Type, 0);
- AlignedArray* array = *helper.array_;
+ AlignedArray* array = helper.array_.get();
array->As<uintptr_t**>()[first_index] =
&array->As<uintptr_t*>()[pointed_to_index];
- VerifyRead(descriptor, internal_field, array, v8::Integer::New(0));
+ VerifyRead(descriptor, internal_field, array,
+ v8::Integer::New(helper.isolate_, 0));
second_index += pointed_to_index*sizeof(uintptr_t)/sizeof(uint16_t);
array->As<uint16_t*>()[second_index] = expected;
- VerifyRead(descriptor, internal_field, array, v8::Integer::New(expected));
+ VerifyRead(descriptor, internal_field, array,
+ v8::Integer::New(helper.isolate_, expected));
}
@@ -293,7 +295,7 @@ TEST(HandleDereferenceRead) {
OOD::NewInternalFieldDereference(helper.isolate_, internal_field)
->NewRawShift(helper.isolate_, index*kPointerSize)
->NewHandleDereference(helper.isolate_);
- HandleArray* array = *helper.handle_array_;
+ HandleArray* array = helper.handle_array_.get();
v8::Handle<v8::String> expected = v8_str("whatever");
array->handles_[index].Reset(helper.isolate_, expected);
VerifyRead(descriptor, internal_field, array, expected);
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index de27286dac..1f22c9ff3a 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -56,7 +56,7 @@ class DeclarationContext {
HandleScope scope(isolate);
Local<Context> context = Local<Context>::New(isolate, context_);
context->Exit();
- context_.Dispose();
+ context_.Reset();
}
}
@@ -96,6 +96,8 @@ class DeclarationContext {
static void HandleQuery(Local<String> key,
const v8::PropertyCallbackInfo<v8::Integer>& info);
+ v8::Isolate* isolate() const { return CcTest::isolate(); }
+
private:
bool is_initialized_;
Persistent<Context> context_;
@@ -118,8 +120,8 @@ void DeclarationContext::InitializeIfNeeded() {
if (is_initialized_) return;
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
- Local<FunctionTemplate> function = FunctionTemplate::New();
- Local<Value> data = External::New(this);
+ Local<FunctionTemplate> function = FunctionTemplate::New(isolate);
+ Local<Value> data = External::New(CcTest::isolate(), this);
GetHolder(function)->SetNamedPropertyHandler(&HandleGet,
&HandleSet,
&HandleQuery,
@@ -147,7 +149,8 @@ void DeclarationContext::Check(const char* source,
HandleScope scope(CcTest::isolate());
TryCatch catcher;
catcher.SetVerbose(true);
- Local<Script> script = Script::Compile(String::New(source));
+ Local<Script> script =
+ Script::Compile(String::NewFromUtf8(CcTest::isolate(), source));
if (expectations == EXPECT_ERROR) {
CHECK(script.IsEmpty());
return;
@@ -243,7 +246,7 @@ TEST(Unknown) {
1, // access
2, // declaration + initialization
2, // declaration + initialization
- EXPECT_RESULT, Number::New(0));
+ EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
}
{ DeclarationContext context;
@@ -277,7 +280,7 @@ TEST(Unknown) {
class PresentPropertyContext: public DeclarationContext {
protected:
virtual v8::Handle<Integer> Query(Local<String> key) {
- return Integer::New(v8::None);
+ return Integer::New(isolate(), v8::None);
}
};
@@ -299,7 +302,7 @@ TEST(Present) {
1, // access
1, // initialization
2, // declaration + initialization
- EXPECT_RESULT, Number::New(0));
+ EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
}
{ PresentPropertyContext context;
@@ -323,7 +326,7 @@ TEST(Present) {
1, // access
1, // initialization
1, // (re-)declaration
- EXPECT_RESULT, Number::New(0));
+ EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
}
}
@@ -355,7 +358,7 @@ TEST(Absent) {
1, // access
2, // declaration + initialization
2, // declaration + initialization
- EXPECT_RESULT, Number::New(0));
+ EXPECT_RESULT, Number::New(isolate, 0));
}
{ AbsentPropertyContext context;
@@ -415,7 +418,7 @@ class AppearingPropertyContext: public DeclarationContext {
// Return that the property is present so we only get the
// setter called when initializing with a value.
state_ = UNKNOWN;
- return Integer::New(v8::None);
+ return Integer::New(isolate(), v8::None);
default:
CHECK(state_ == UNKNOWN);
break;
@@ -446,7 +449,7 @@ TEST(Appearing) {
1, // access
2, // declaration + initialization
2, // declaration + initialization
- EXPECT_RESULT, Number::New(0));
+ EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
}
{ AppearingPropertyContext context;
@@ -501,7 +504,7 @@ class ReappearingPropertyContext: public DeclarationContext {
// Ignore the second declaration by returning
// that the property is already there.
state_ = INITIALIZE;
- return Integer::New(v8::None);
+ return Integer::New(isolate(), v8::None);
case INITIALIZE:
// Force an initialization by returning that
// the property is absent. This will make sure
@@ -538,10 +541,12 @@ TEST(Reappearing) {
class ExistsInPrototypeContext: public DeclarationContext {
+ public:
+ ExistsInPrototypeContext() { InitializeIfNeeded(); }
protected:
virtual v8::Handle<Integer> Query(Local<String> key) {
// Let it seem that the property exists in the prototype object.
- return Integer::New(v8::None);
+ return Integer::New(isolate(), v8::None);
}
// Use the prototype as the holder for the interceptors.
@@ -562,7 +567,7 @@ TEST(ExistsInPrototype) {
0,
0,
0,
- EXPECT_RESULT, Number::New(87));
+ EXPECT_RESULT, Number::New(CcTest::isolate(), 87));
}
{ ExistsInPrototypeContext context;
@@ -578,7 +583,7 @@ TEST(ExistsInPrototype) {
0,
0,
0,
- EXPECT_RESULT, Number::New(0));
+ EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
}
{ ExistsInPrototypeContext context;
@@ -594,7 +599,7 @@ TEST(ExistsInPrototype) {
0,
0,
0,
- EXPECT_RESULT, Number::New(0));
+ EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
}
}
@@ -633,23 +638,23 @@ TEST(AbsentInPrototype) {
class ExistsInHiddenPrototypeContext: public DeclarationContext {
public:
ExistsInHiddenPrototypeContext() {
- hidden_proto_ = FunctionTemplate::New();
+ hidden_proto_ = FunctionTemplate::New(CcTest::isolate());
hidden_proto_->SetHiddenPrototype(true);
}
protected:
virtual v8::Handle<Integer> Query(Local<String> key) {
// Let it seem that the property exists in the hidden prototype object.
- return Integer::New(v8::None);
+ return Integer::New(isolate(), v8::None);
}
// Install the hidden prototype after the global object has been created.
virtual void PostInitializeContext(Handle<Context> context) {
Local<Object> global_object = context->Global();
Local<Object> hidden_proto = hidden_proto_->GetFunction()->NewInstance();
- context->DetachGlobal();
- context->Global()->SetPrototype(hidden_proto);
- context->ReattachGlobal(global_object);
+ Local<Object> inner_global =
+ Local<Object>::Cast(global_object->GetPrototype());
+ inner_global->SetPrototype(hidden_proto);
}
// Use the hidden prototype as the holder for the interceptors.
@@ -679,7 +684,7 @@ TEST(ExistsInHiddenPrototype) {
1, // access
1, // initialization
2, // declaration + initialization
- EXPECT_RESULT, Number::New(0));
+ EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
}
{ ExistsInHiddenPrototypeContext context;
@@ -705,7 +710,7 @@ TEST(ExistsInHiddenPrototype) {
0,
0,
1, // (re-)declaration
- EXPECT_RESULT, Number::New(0));
+ EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
}
}
@@ -729,7 +734,8 @@ class SimpleContext {
HandleScope scope(context_->GetIsolate());
TryCatch catcher;
catcher.SetVerbose(true);
- Local<Script> script = Script::Compile(String::New(source));
+ Local<Script> script =
+ Script::Compile(String::NewFromUtf8(context_->GetIsolate(), source));
if (expectations == EXPECT_ERROR) {
CHECK(script.IsEmpty());
return;
@@ -757,40 +763,41 @@ class SimpleContext {
TEST(CrossScriptReferences) {
- HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
{ SimpleContext context;
context.Check("var x = 1; x",
- EXPECT_RESULT, Number::New(1));
+ EXPECT_RESULT, Number::New(isolate, 1));
context.Check("var x = 2; x",
- EXPECT_RESULT, Number::New(2));
+ EXPECT_RESULT, Number::New(isolate, 2));
context.Check("const x = 3; x",
- EXPECT_RESULT, Number::New(3));
+ EXPECT_RESULT, Number::New(isolate, 3));
context.Check("const x = 4; x",
- EXPECT_RESULT, Number::New(4));
+ EXPECT_RESULT, Number::New(isolate, 4));
context.Check("x = 5; x",
- EXPECT_RESULT, Number::New(5));
+ EXPECT_RESULT, Number::New(isolate, 5));
context.Check("var x = 6; x",
- EXPECT_RESULT, Number::New(6));
+ EXPECT_RESULT, Number::New(isolate, 6));
context.Check("this.x",
- EXPECT_RESULT, Number::New(6));
+ EXPECT_RESULT, Number::New(isolate, 6));
context.Check("function x() { return 7 }; x()",
- EXPECT_RESULT, Number::New(7));
+ EXPECT_RESULT, Number::New(isolate, 7));
}
{ SimpleContext context;
context.Check("const x = 1; x",
- EXPECT_RESULT, Number::New(1));
+ EXPECT_RESULT, Number::New(isolate, 1));
context.Check("var x = 2; x", // assignment ignored
- EXPECT_RESULT, Number::New(1));
+ EXPECT_RESULT, Number::New(isolate, 1));
context.Check("const x = 3; x",
- EXPECT_RESULT, Number::New(1));
+ EXPECT_RESULT, Number::New(isolate, 1));
context.Check("x = 4; x", // assignment ignored
- EXPECT_RESULT, Number::New(1));
+ EXPECT_RESULT, Number::New(isolate, 1));
context.Check("var x = 5; x", // assignment ignored
- EXPECT_RESULT, Number::New(1));
+ EXPECT_RESULT, Number::New(isolate, 1));
context.Check("this.x",
- EXPECT_RESULT, Number::New(1));
+ EXPECT_RESULT, Number::New(isolate, 1));
context.Check("function x() { return 7 }; x",
EXPECT_EXCEPTION);
}
@@ -802,7 +809,8 @@ TEST(CrossScriptReferencesHarmony) {
i::FLAG_harmony_scoping = true;
i::FLAG_harmony_modules = true;
- HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
const char* decs[] = {
"var x = 1; x", "x", "this.x",
@@ -815,12 +823,14 @@ TEST(CrossScriptReferencesHarmony) {
for (int i = 0; decs[i] != NULL; i += 3) {
SimpleContext context;
- context.Check(decs[i], EXPECT_RESULT, Number::New(1));
- context.Check(decs[i+1], EXPECT_RESULT, Number::New(1));
+ context.Check(decs[i], EXPECT_RESULT, Number::New(isolate, 1));
+ context.Check(decs[i+1], EXPECT_RESULT, Number::New(isolate, 1));
// TODO(rossberg): The current ES6 draft spec does not reflect lexical
// bindings on the global object. However, this will probably change, in
// which case we reactivate the following test.
- if (i/3 < 2) context.Check(decs[i+2], EXPECT_RESULT, Number::New(1));
+ if (i/3 < 2) {
+ context.Check(decs[i+2], EXPECT_RESULT, Number::New(isolate, 1));
+ }
}
}
@@ -852,12 +862,14 @@ TEST(CrossScriptConflicts) {
for (int i = 0; firsts[i] != NULL; ++i) {
for (int j = 0; seconds[j] != NULL; ++j) {
SimpleContext context;
- context.Check(firsts[i], EXPECT_RESULT, Number::New(1));
+ context.Check(firsts[i], EXPECT_RESULT,
+ Number::New(CcTest::isolate(), 1));
// TODO(rossberg): All tests should actually be errors in Harmony,
// but we currently do not detect the cases where the first declaration
// is not lexical.
context.Check(seconds[j],
- i < 2 ? EXPECT_RESULT : EXPECT_ERROR, Number::New(2));
+ i < 2 ? EXPECT_RESULT : EXPECT_ERROR,
+ Number::New(CcTest::isolate(), 2));
}
}
}
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index 765b1ce55f..dbbb3edb09 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -77,27 +77,23 @@ class AlwaysOptimizeAllowNativesSyntaxNoInlining {
// Utility class to set --allow-natives-syntax and --nouse-inlining when
// constructed and return to their default state when destroyed.
-class AllowNativesSyntaxNoInliningNoConcurrent {
+class AllowNativesSyntaxNoInlining {
public:
- AllowNativesSyntaxNoInliningNoConcurrent()
+ AllowNativesSyntaxNoInlining()
: allow_natives_syntax_(i::FLAG_allow_natives_syntax),
- use_inlining_(i::FLAG_use_inlining),
- concurrent_recompilation_(i::FLAG_concurrent_recompilation) {
+ use_inlining_(i::FLAG_use_inlining) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_use_inlining = false;
- i::FLAG_concurrent_recompilation = false;
}
- ~AllowNativesSyntaxNoInliningNoConcurrent() {
+ ~AllowNativesSyntaxNoInlining() {
i::FLAG_allow_natives_syntax = allow_natives_syntax_;
i::FLAG_use_inlining = use_inlining_;
- i::FLAG_concurrent_recompilation = concurrent_recompilation_;
}
private:
bool allow_natives_syntax_;
bool use_inlining_;
- bool concurrent_recompilation_;
};
@@ -239,8 +235,8 @@ TEST(DeoptimizeRecursive) {
CHECK_EQ(11, env->Global()->Get(v8_str("calls"))->Int32Value());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
- v8::Local<v8::Function> fun =
- v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ v8::Local<v8::Function> fun = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::NewFromUtf8(CcTest::isolate(), "f")));
CHECK(!fun.IsEmpty());
}
@@ -341,13 +337,14 @@ TEST(DeoptimizeConstructorMultiple) {
TEST(DeoptimizeBinaryOperationADDString) {
+ i::FLAG_concurrent_recompilation = false;
+ AllowNativesSyntaxNoInlining options;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
const char* f_source = "function f(x, y) { return x + y; };";
{
- AllowNativesSyntaxNoInliningNoConcurrent options;
// Compile function f and collect to type feedback to insert binary op stub
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
@@ -405,7 +402,7 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
binary_op);
char* f_source = f_source_buffer.start();
- AllowNativesSyntaxNoInliningNoConcurrent options;
+ AllowNativesSyntaxNoInlining options;
// Compile function f and collect to type feedback to insert binary op stub
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
@@ -431,6 +428,7 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
TEST(DeoptimizeBinaryOperationADD) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -443,6 +441,7 @@ TEST(DeoptimizeBinaryOperationADD) {
TEST(DeoptimizeBinaryOperationSUB) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -455,6 +454,7 @@ TEST(DeoptimizeBinaryOperationSUB) {
TEST(DeoptimizeBinaryOperationMUL) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -467,6 +467,7 @@ TEST(DeoptimizeBinaryOperationMUL) {
TEST(DeoptimizeBinaryOperationDIV) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -479,6 +480,7 @@ TEST(DeoptimizeBinaryOperationDIV) {
TEST(DeoptimizeBinaryOperationMOD) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -491,13 +493,14 @@ TEST(DeoptimizeBinaryOperationMOD) {
TEST(DeoptimizeCompare) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
const char* f_source = "function f(x, y) { return x < y; };";
{
- AllowNativesSyntaxNoInliningNoConcurrent options;
+ AllowNativesSyntaxNoInlining options;
// Compile function f and collect to type feedback to insert compare ic
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
@@ -534,6 +537,7 @@ TEST(DeoptimizeCompare) {
TEST(DeoptimizeLoadICStoreIC) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -544,7 +548,7 @@ TEST(DeoptimizeLoadICStoreIC) {
const char* g2_source = "function g2(x, y) { x[y] = 1; };";
{
- AllowNativesSyntaxNoInliningNoConcurrent options;
+ AllowNativesSyntaxNoInlining options;
// Compile functions and collect to type feedback to insert ic
// calls in the optimized code.
i::FLAG_prepare_always_opt = true;
@@ -609,11 +613,11 @@ TEST(DeoptimizeLoadICStoreIC) {
CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
CHECK_EQ(4, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
TEST(DeoptimizeLoadICStoreICNested) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -624,7 +628,7 @@ TEST(DeoptimizeLoadICStoreICNested) {
const char* g2_source = "function g2(x, y) { x[y] = 1; };";
{
- AllowNativesSyntaxNoInliningNoConcurrent options;
+ AllowNativesSyntaxNoInlining options;
// Compile functions and collect to type feedback to insert ic
// calls in the optimized code.
i::FLAG_prepare_always_opt = true;
@@ -690,5 +694,4 @@ TEST(DeoptimizeLoadICStoreICNested) {
CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index 44f64f7881..6e62a2243c 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -47,7 +47,7 @@ TEST(ObjectHashTable) {
Handle<ObjectHashTable> table = factory->NewObjectHashTable(23);
Handle<JSObject> a = factory->NewJSArray(7);
Handle<JSObject> b = factory->NewJSArray(11);
- table = PutIntoObjectHashTable(table, a, b);
+ table = ObjectHashTable::Put(table, a, b);
CHECK_EQ(table->NumberOfElements(), 1);
CHECK_EQ(table->Lookup(*a), *b);
CHECK_EQ(table->Lookup(*b), CcTest::heap()->the_hole_value());
@@ -59,12 +59,12 @@ TEST(ObjectHashTable) {
CHECK_EQ(table->Lookup(*b), CcTest::heap()->the_hole_value());
// Keys that are overwritten should not change number of elements.
- table = PutIntoObjectHashTable(table, a, factory->NewJSArray(13));
+ table = ObjectHashTable::Put(table, a, factory->NewJSArray(13));
CHECK_EQ(table->NumberOfElements(), 1);
CHECK_NE(table->Lookup(*a), *b);
// Keys mapped to the hole should be removed permanently.
- table = PutIntoObjectHashTable(table, a, factory->the_hole_value());
+ table = ObjectHashTable::Put(table, a, factory->the_hole_value());
CHECK_EQ(table->NumberOfElements(), 0);
CHECK_EQ(table->NumberOfDeletedElements(), 1);
CHECK_EQ(table->Lookup(*a), CcTest::heap()->the_hole_value());
@@ -74,21 +74,21 @@ TEST(ObjectHashTable) {
for (int i = 0; i < 100; i++) {
Handle<JSReceiver> key = factory->NewJSArray(7);
Handle<JSObject> value = factory->NewJSArray(11);
- table = PutIntoObjectHashTable(table, key, value);
+ table = ObjectHashTable::Put(table, key, value);
CHECK_EQ(table->NumberOfElements(), i + 1);
CHECK_NE(table->FindEntry(*key), ObjectHashTable::kNotFound);
CHECK_EQ(table->Lookup(*key), *value);
- CHECK(key->GetIdentityHash(OMIT_CREATION)->ToObjectChecked()->IsSmi());
+ CHECK(key->GetIdentityHash()->IsSmi());
}
// Keys never added to the map which already have an identity hash
// code should not be found.
for (int i = 0; i < 100; i++) {
Handle<JSReceiver> key = factory->NewJSArray(7);
- CHECK(key->GetIdentityHash(ALLOW_CREATION)->ToObjectChecked()->IsSmi());
+ CHECK(JSReceiver::GetOrCreateIdentityHash(key)->IsSmi());
CHECK_EQ(table->FindEntry(*key), ObjectHashTable::kNotFound);
CHECK_EQ(table->Lookup(*key), CcTest::heap()->the_hole_value());
- CHECK(key->GetIdentityHash(OMIT_CREATION)->ToObjectChecked()->IsSmi());
+ CHECK(key->GetIdentityHash()->IsSmi());
}
// Keys that don't have an identity hash should not be found and also
@@ -96,7 +96,7 @@ TEST(ObjectHashTable) {
for (int i = 0; i < 100; i++) {
Handle<JSReceiver> key = factory->NewJSArray(7);
CHECK_EQ(table->Lookup(*key), CcTest::heap()->the_hole_value());
- CHECK_EQ(key->GetIdentityHash(OMIT_CREATION),
+ CHECK_EQ(key->GetIdentityHash(),
CcTest::heap()->undefined_value());
}
}
@@ -175,13 +175,17 @@ TEST(ObjectHashSetCausesGC) {
SimulateFullSpace(CcTest::heap()->old_pointer_space());
// Calling Contains() should not cause GC ever.
+ int gc_count = isolate->heap()->gc_count();
CHECK(!table->Contains(*key));
+ CHECK(gc_count == isolate->heap()->gc_count());
- // Calling Remove() should not cause GC ever.
- CHECK(!table->Remove(*key)->IsFailure());
+ // Calling Remove() will not cause GC in this case.
+ table = ObjectHashSet::Remove(table, key);
+ CHECK(gc_count == isolate->heap()->gc_count());
- // Calling Add() should request GC by returning a failure.
- CHECK(table->Add(*key)->IsRetryAfterGC());
+ // Calling Add() should cause GC.
+ table = ObjectHashSet::Add(table, key);
+ CHECK(gc_count < isolate->heap()->gc_count());
}
#endif
@@ -211,6 +215,8 @@ TEST(ObjectHashTableCausesGC) {
CHECK(table->Lookup(*key)->IsTheHole());
// Calling Put() should request GC by returning a failure.
- CHECK(table->Put(*key, *key)->IsRetryAfterGC());
+ int gc_count = isolate->heap()->gc_count();
+ ObjectHashTable::Put(table, key, key);
+ CHECK(gc_count < isolate->heap()->gc_count());
}
#endif
diff --git a/deps/v8/test/cctest/test-disasm-a64.cc b/deps/v8/test/cctest/test-disasm-a64.cc
new file mode 100644
index 0000000000..0ada0b1a56
--- /dev/null
+++ b/deps/v8/test/cctest/test-disasm-a64.cc
@@ -0,0 +1,1761 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdio.h>
+#include <cstring>
+#include "cctest.h"
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "a64/assembler-a64.h"
+#include "a64/macro-assembler-a64.h"
+#include "a64/disasm-a64.h"
+#include "a64/utils-a64.h"
+
+using namespace v8::internal;
+
+#define TEST_(name) TEST(DISASM_##name)
+
+#define EXP_SIZE (256)
+#define INSTR_SIZE (1024)
+#define SET_UP_CLASS(ASMCLASS) \
+ InitializeVM(); \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ byte* buf = static_cast<byte*>(malloc(INSTR_SIZE)); \
+ uint32_t encoding = 0; \
+ ASMCLASS* assm = new ASMCLASS(isolate, buf, INSTR_SIZE); \
+ Decoder* decoder = new Decoder(); \
+ Disassembler* disasm = new Disassembler(); \
+ decoder->AppendVisitor(disasm)
+
+#define SET_UP() SET_UP_CLASS(Assembler)
+
+#define COMPARE(ASM, EXP) \
+ assm->Reset(); \
+ assm->ASM; \
+ assm->GetCode(NULL); \
+ decoder->Decode(reinterpret_cast<Instruction*>(buf)); \
+ encoding = *reinterpret_cast<uint32_t*>(buf); \
+ if (strcmp(disasm->GetOutput(), EXP) != 0) { \
+ printf("%u : Encoding: %08" PRIx32 "\nExpected: %s\nFound: %s\n", \
+ __LINE__, encoding, EXP, disasm->GetOutput()); \
+ abort(); \
+ }
+
+#define COMPARE_PREFIX(ASM, EXP) \
+ assm->Reset(); \
+ assm->ASM; \
+ assm->GetCode(NULL); \
+ decoder->Decode(reinterpret_cast<Instruction*>(buf)); \
+ encoding = *reinterpret_cast<uint32_t*>(buf); \
+ if (strncmp(disasm->GetOutput(), EXP, strlen(EXP)) != 0) { \
+ printf("%u : Encoding: %08" PRIx32 "\nExpected: %s\nFound: %s\n", \
+ __LINE__, encoding, EXP, disasm->GetOutput()); \
+ abort(); \
+ }
+
+#define CLEANUP() \
+ delete disasm; \
+ delete decoder; \
+ delete assm
+
+
+static bool vm_initialized = false;
+
+
+static void InitializeVM() {
+ if (!vm_initialized) {
+ CcTest::InitializeVM();
+ vm_initialized = true;
+ }
+}
+
+
+TEST_(bootstrap) {
+ SET_UP();
+
+ // Instructions generated by C compiler, disassembled by objdump, and
+ // reformatted to suit our disassembly style.
+ COMPARE(dci(0xa9ba7bfd), "stp fp, lr, [csp, #-96]!");
+ COMPARE(dci(0x910003fd), "mov fp, csp");
+ COMPARE(dci(0x9100e3a0), "add x0, fp, #0x38 (56)");
+ COMPARE(dci(0xb900001f), "str wzr, [x0]");
+ COMPARE(dci(0x528000e1), "movz w1, #0x7");
+ COMPARE(dci(0xb9001c01), "str w1, [x0, #28]");
+ COMPARE(dci(0x390043a0), "strb w0, [fp, #16]");
+ COMPARE(dci(0x790027a0), "strh w0, [fp, #18]");
+ COMPARE(dci(0xb9400400), "ldr w0, [x0, #4]");
+ COMPARE(dci(0x0b000021), "add w1, w1, w0");
+ COMPARE(dci(0x531b6800), "lsl w0, w0, #5");
+ COMPARE(dci(0x521e0400), "eor w0, w0, #0xc");
+ COMPARE(dci(0x72af0f00), "movk w0, #0x7878, lsl #16");
+ COMPARE(dci(0xd360fc00), "lsr x0, x0, #32");
+ COMPARE(dci(0x13037c01), "asr w1, w0, #3");
+ COMPARE(dci(0x4b000021), "sub w1, w1, w0");
+ COMPARE(dci(0x2a0103e0), "mov w0, w1");
+ COMPARE(dci(0x93407c00), "sxtw x0, w0");
+ COMPARE(dci(0x2a000020), "orr w0, w1, w0");
+ COMPARE(dci(0xa8c67bfd), "ldp fp, lr, [csp], #96");
+
+ CLEANUP();
+}
+
+
+TEST_(mov_mvn) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Mov(w0, Operand(0x1234)), "movz w0, #0x1234");
+ COMPARE(Mov(x1, Operand(0x1234)), "movz x1, #0x1234");
+ COMPARE(Mov(w2, Operand(w3)), "mov w2, w3");
+ COMPARE(Mov(x4, Operand(x5)), "mov x4, x5");
+ COMPARE(Mov(w6, Operand(w7, LSL, 5)), "lsl w6, w7, #5");
+ COMPARE(Mov(x8, Operand(x9, ASR, 42)), "asr x8, x9, #42");
+ COMPARE(Mov(w10, Operand(w11, UXTB)), "uxtb w10, w11");
+ COMPARE(Mov(x12, Operand(x13, UXTB, 1)), "ubfiz x12, x13, #1, #8");
+ COMPARE(Mov(w14, Operand(w15, SXTH, 2)), "sbfiz w14, w15, #2, #16");
+ COMPARE(Mov(x16, Operand(x20, SXTW, 3)), "sbfiz x16, x20, #3, #32");
+
+ COMPARE(Mov(x0, csp), "mov x0, csp");
+ COMPARE(Mov(w0, wcsp), "mov w0, wcsp");
+ COMPARE(Mov(x0, xzr), "mov x0, xzr");
+ COMPARE(Mov(w0, wzr), "mov w0, wzr");
+ COMPARE(mov(x0, csp), "mov x0, csp");
+ COMPARE(mov(w0, wcsp), "mov w0, wcsp");
+ COMPARE(mov(x0, xzr), "mov x0, xzr");
+ COMPARE(mov(w0, wzr), "mov w0, wzr");
+
+ COMPARE(Mvn(w0, Operand(0x1)), "movn w0, #0x1");
+ COMPARE(Mvn(x1, Operand(0xfff)), "movn x1, #0xfff");
+ COMPARE(Mvn(w2, Operand(w3)), "mvn w2, w3");
+ COMPARE(Mvn(x4, Operand(x5)), "mvn x4, x5");
+ COMPARE(Mvn(w6, Operand(w7, LSL, 12)), "mvn w6, w7, lsl #12");
+ COMPARE(Mvn(x8, Operand(x9, ASR, 63)), "mvn x8, x9, asr #63");
+
+ CLEANUP();
+}
+
+
+TEST_(move_immediate) {
+ SET_UP();
+
+ COMPARE(movz(w0, 0x1234), "movz w0, #0x1234");
+ COMPARE(movz(x1, 0xabcd0000), "movz x1, #0xabcd0000");
+ COMPARE(movz(x2, 0x555500000000), "movz x2, #0x555500000000");
+ COMPARE(movz(x3, 0xaaaa000000000000), "movz x3, #0xaaaa000000000000");
+ COMPARE(movz(x4, 0xabcd, 16), "movz x4, #0xabcd0000");
+ COMPARE(movz(x5, 0x5555, 32), "movz x5, #0x555500000000");
+ COMPARE(movz(x6, 0xaaaa, 48), "movz x6, #0xaaaa000000000000");
+
+ COMPARE(movk(w7, 0x1234), "movk w7, #0x1234");
+ COMPARE(movk(x8, 0xabcd0000), "movk x8, #0xabcd, lsl #16");
+ COMPARE(movk(x9, 0x555500000000), "movk x9, #0x5555, lsl #32");
+ COMPARE(movk(x10, 0xaaaa000000000000), "movk x10, #0xaaaa, lsl #48");
+ COMPARE(movk(w11, 0xabcd, 16), "movk w11, #0xabcd, lsl #16");
+ COMPARE(movk(x12, 0x5555, 32), "movk x12, #0x5555, lsl #32");
+ COMPARE(movk(x13, 0xaaaa, 48), "movk x13, #0xaaaa, lsl #48");
+
+ COMPARE(movn(w14, 0x1234), "movn w14, #0x1234");
+ COMPARE(movn(x15, 0xabcd0000), "movn x15, #0xabcd0000");
+ COMPARE(movn(x16, 0x555500000000), "movn x16, #0x555500000000");
+ COMPARE(movn(x17, 0xaaaa000000000000), "movn x17, #0xaaaa000000000000");
+ COMPARE(movn(w18, 0xabcd, 16), "movn w18, #0xabcd0000");
+ COMPARE(movn(x19, 0x5555, 32), "movn x19, #0x555500000000");
+ COMPARE(movn(x20, 0xaaaa, 48), "movn x20, #0xaaaa000000000000");
+
+ COMPARE(movk(w21, 0), "movk w21, #0x0");
+ COMPARE(movk(x22, 0, 0), "movk x22, #0x0");
+ COMPARE(movk(w23, 0, 16), "movk w23, #0x0, lsl #16");
+ COMPARE(movk(x24, 0, 32), "movk x24, #0x0, lsl #32");
+ COMPARE(movk(x25, 0, 48), "movk x25, #0x0, lsl #48");
+
+ CLEANUP();
+}
+
+
+TEST(move_immediate_2) {
+ SET_UP_CLASS(MacroAssembler);
+
+ // Move instructions expected for certain immediates. This is really a macro
+ // assembler test, to ensure it generates immediates efficiently.
+ COMPARE(Mov(w0, 0), "movz w0, #0x0");
+ COMPARE(Mov(w0, 0x0000ffff), "movz w0, #0xffff");
+ COMPARE(Mov(w0, 0x00010000), "movz w0, #0x10000");
+ COMPARE(Mov(w0, 0xffff0000), "movz w0, #0xffff0000");
+ COMPARE(Mov(w0, 0x0001ffff), "movn w0, #0xfffe0000");
+ COMPARE(Mov(w0, 0xffff8000), "movn w0, #0x7fff");
+ COMPARE(Mov(w0, 0xfffffffe), "movn w0, #0x1");
+ COMPARE(Mov(w0, 0xffffffff), "movn w0, #0x0");
+ COMPARE(Mov(w0, 0x00ffff00), "mov w0, #0xffff00");
+ COMPARE(Mov(w0, 0xfffe7fff), "mov w0, #0xfffe7fff");
+ COMPARE(Mov(w0, 0xfffeffff), "movn w0, #0x10000");
+ COMPARE(Mov(w0, 0xffff7fff), "movn w0, #0x8000");
+
+ COMPARE(Mov(x0, 0), "movz x0, #0x0");
+ COMPARE(Mov(x0, 0x0000ffff), "movz x0, #0xffff");
+ COMPARE(Mov(x0, 0x00010000), "movz x0, #0x10000");
+ COMPARE(Mov(x0, 0xffff0000), "movz x0, #0xffff0000");
+ COMPARE(Mov(x0, 0x0001ffff), "mov x0, #0x1ffff");
+ COMPARE(Mov(x0, 0xffff8000), "mov x0, #0xffff8000");
+ COMPARE(Mov(x0, 0xfffffffe), "mov x0, #0xfffffffe");
+ COMPARE(Mov(x0, 0xffffffff), "mov x0, #0xffffffff");
+ COMPARE(Mov(x0, 0x00ffff00), "mov x0, #0xffff00");
+ COMPARE(Mov(x0, 0xffff000000000000), "movz x0, #0xffff000000000000");
+ COMPARE(Mov(x0, 0x0000ffff00000000), "movz x0, #0xffff00000000");
+ COMPARE(Mov(x0, 0x00000000ffff0000), "movz x0, #0xffff0000");
+ COMPARE(Mov(x0, 0xffffffffffff0000), "movn x0, #0xffff");
+ COMPARE(Mov(x0, 0xffffffff0000ffff), "movn x0, #0xffff0000");
+ COMPARE(Mov(x0, 0xffff0000ffffffff), "movn x0, #0xffff00000000");
+ COMPARE(Mov(x0, 0x0000ffffffffffff), "movn x0, #0xffff000000000000");
+ COMPARE(Mov(x0, 0xfffe7fffffffffff), "mov x0, #0xfffe7fffffffffff");
+ COMPARE(Mov(x0, 0xfffeffffffffffff), "movn x0, #0x1000000000000");
+ COMPARE(Mov(x0, 0xffff7fffffffffff), "movn x0, #0x800000000000");
+ COMPARE(Mov(x0, 0xfffffffe7fffffff), "mov x0, #0xfffffffe7fffffff");
+ COMPARE(Mov(x0, 0xfffffffeffffffff), "movn x0, #0x100000000");
+ COMPARE(Mov(x0, 0xffffffff7fffffff), "movn x0, #0x80000000");
+ COMPARE(Mov(x0, 0xfffffffffffe7fff), "mov x0, #0xfffffffffffe7fff");
+ COMPARE(Mov(x0, 0xfffffffffffeffff), "movn x0, #0x10000");
+ COMPARE(Mov(x0, 0xffffffffffff7fff), "movn x0, #0x8000");
+ COMPARE(Mov(x0, 0xffffffffffffffff), "movn x0, #0x0");
+
+ COMPARE(Movk(w0, 0x1234, 0), "movk w0, #0x1234");
+ COMPARE(Movk(x1, 0x2345, 0), "movk x1, #0x2345");
+ COMPARE(Movk(w2, 0x3456, 16), "movk w2, #0x3456, lsl #16");
+ COMPARE(Movk(x3, 0x4567, 16), "movk x3, #0x4567, lsl #16");
+ COMPARE(Movk(x4, 0x5678, 32), "movk x4, #0x5678, lsl #32");
+ COMPARE(Movk(x5, 0x6789, 48), "movk x5, #0x6789, lsl #48");
+
+ CLEANUP();
+}
+
+
+TEST_(add_immediate) {
+ SET_UP();
+
+ COMPARE(add(w0, w1, Operand(0xff)), "add w0, w1, #0xff (255)");
+ COMPARE(add(x2, x3, Operand(0x3ff)), "add x2, x3, #0x3ff (1023)");
+ COMPARE(add(w4, w5, Operand(0xfff)), "add w4, w5, #0xfff (4095)");
+ COMPARE(add(x6, x7, Operand(0x1000)), "add x6, x7, #0x1000 (4096)");
+ COMPARE(add(w8, w9, Operand(0xff000)), "add w8, w9, #0xff000 (1044480)");
+ COMPARE(add(x10, x11, Operand(0x3ff000)),
+ "add x10, x11, #0x3ff000 (4190208)");
+ COMPARE(add(w12, w13, Operand(0xfff000)),
+ "add w12, w13, #0xfff000 (16773120)");
+ COMPARE(adds(w14, w15, Operand(0xff)), "adds w14, w15, #0xff (255)");
+ COMPARE(adds(x16, x17, Operand(0xaa000)),
+ "adds x16, x17, #0xaa000 (696320)");
+ COMPARE(cmn(w18, Operand(0xff)), "cmn w18, #0xff (255)");
+ COMPARE(cmn(x19, Operand(0xff000)), "cmn x19, #0xff000 (1044480)");
+ COMPARE(add(w0, wcsp, Operand(0)), "mov w0, wcsp");
+ COMPARE(add(csp, x0, Operand(0)), "mov csp, x0");
+
+ COMPARE(add(w1, wcsp, Operand(8)), "add w1, wcsp, #0x8 (8)");
+ COMPARE(add(x2, csp, Operand(16)), "add x2, csp, #0x10 (16)");
+ COMPARE(add(wcsp, wcsp, Operand(42)), "add wcsp, wcsp, #0x2a (42)");
+ COMPARE(cmn(csp, Operand(24)), "cmn csp, #0x18 (24)");
+ COMPARE(adds(wzr, wcsp, Operand(9)), "cmn wcsp, #0x9 (9)");
+
+ CLEANUP();
+}
+
+
+TEST_(sub_immediate) {
+ SET_UP();
+
+ COMPARE(sub(w0, w1, Operand(0xff)), "sub w0, w1, #0xff (255)");
+ COMPARE(sub(x2, x3, Operand(0x3ff)), "sub x2, x3, #0x3ff (1023)");
+ COMPARE(sub(w4, w5, Operand(0xfff)), "sub w4, w5, #0xfff (4095)");
+ COMPARE(sub(x6, x7, Operand(0x1000)), "sub x6, x7, #0x1000 (4096)");
+ COMPARE(sub(w8, w9, Operand(0xff000)), "sub w8, w9, #0xff000 (1044480)");
+ COMPARE(sub(x10, x11, Operand(0x3ff000)),
+ "sub x10, x11, #0x3ff000 (4190208)");
+ COMPARE(sub(w12, w13, Operand(0xfff000)),
+ "sub w12, w13, #0xfff000 (16773120)");
+ COMPARE(subs(w14, w15, Operand(0xff)), "subs w14, w15, #0xff (255)");
+ COMPARE(subs(x16, x17, Operand(0xaa000)),
+ "subs x16, x17, #0xaa000 (696320)");
+ COMPARE(cmp(w18, Operand(0xff)), "cmp w18, #0xff (255)");
+ COMPARE(cmp(x19, Operand(0xff000)), "cmp x19, #0xff000 (1044480)");
+
+ COMPARE(add(w1, wcsp, Operand(8)), "add w1, wcsp, #0x8 (8)");
+ COMPARE(add(x2, csp, Operand(16)), "add x2, csp, #0x10 (16)");
+ COMPARE(add(wcsp, wcsp, Operand(42)), "add wcsp, wcsp, #0x2a (42)");
+ COMPARE(cmn(csp, Operand(24)), "cmn csp, #0x18 (24)");
+ COMPARE(adds(wzr, wcsp, Operand(9)), "cmn wcsp, #0x9 (9)");
+
+ CLEANUP();
+}
+
+
+TEST_(add_shifted) {
+ SET_UP();
+
+ COMPARE(add(w0, w1, Operand(w2)), "add w0, w1, w2");
+ COMPARE(add(x3, x4, Operand(x5)), "add x3, x4, x5");
+ COMPARE(add(w6, w7, Operand(w8, LSL, 1)), "add w6, w7, w8, lsl #1");
+ COMPARE(add(x9, x10, Operand(x11, LSL, 2)), "add x9, x10, x11, lsl #2");
+ COMPARE(add(w12, w13, Operand(w14, LSR, 3)), "add w12, w13, w14, lsr #3");
+ COMPARE(add(x15, x16, Operand(x17, LSR, 4)), "add x15, x16, x17, lsr #4");
+ COMPARE(add(w18, w19, Operand(w20, ASR, 5)), "add w18, w19, w20, asr #5");
+ COMPARE(add(x21, x22, Operand(x23, ASR, 6)), "add x21, x22, x23, asr #6");
+ COMPARE(cmn(w24, Operand(w25)), "cmn w24, w25");
+ COMPARE(cmn(x26, Operand(cp, LSL, 63)), "cmn x26, cp, lsl #63");
+
+ COMPARE(add(x0, csp, Operand(x1)), "add x0, csp, x1");
+ COMPARE(add(w2, wcsp, Operand(w3)), "add w2, wcsp, w3");
+ COMPARE(add(x4, csp, Operand(x5, LSL, 1)), "add x4, csp, x5, lsl #1");
+ COMPARE(add(x4, xzr, Operand(x5, LSL, 1)), "add x4, xzr, x5, lsl #1");
+ COMPARE(add(w6, wcsp, Operand(w7, LSL, 3)), "add w6, wcsp, w7, lsl #3");
+ COMPARE(adds(xzr, csp, Operand(x8, LSL, 4)), "cmn csp, x8, lsl #4");
+ COMPARE(adds(xzr, xzr, Operand(x8, LSL, 5)), "cmn xzr, x8, lsl #5");
+
+ CLEANUP();
+}
+
+
+TEST_(sub_shifted) {
+ SET_UP();
+
+ COMPARE(sub(w0, w1, Operand(w2)), "sub w0, w1, w2");
+ COMPARE(sub(x3, x4, Operand(x5)), "sub x3, x4, x5");
+ COMPARE(sub(w6, w7, Operand(w8, LSL, 1)), "sub w6, w7, w8, lsl #1");
+ COMPARE(sub(x9, x10, Operand(x11, LSL, 2)), "sub x9, x10, x11, lsl #2");
+ COMPARE(sub(w12, w13, Operand(w14, LSR, 3)), "sub w12, w13, w14, lsr #3");
+ COMPARE(sub(x15, x16, Operand(x17, LSR, 4)), "sub x15, x16, x17, lsr #4");
+ COMPARE(sub(w18, w19, Operand(w20, ASR, 5)), "sub w18, w19, w20, asr #5");
+ COMPARE(sub(x21, x22, Operand(x23, ASR, 6)), "sub x21, x22, x23, asr #6");
+ COMPARE(cmp(w24, Operand(w25)), "cmp w24, w25");
+ COMPARE(cmp(x26, Operand(cp, LSL, 63)), "cmp x26, cp, lsl #63");
+ COMPARE(neg(w28, Operand(w29)), "neg w28, w29");
+ COMPARE(neg(lr, Operand(x0, LSR, 62)), "neg lr, x0, lsr #62");
+ COMPARE(negs(w1, Operand(w2)), "negs w1, w2");
+ COMPARE(negs(x3, Operand(x4, ASR, 61)), "negs x3, x4, asr #61");
+
+ COMPARE(sub(x0, csp, Operand(x1)), "sub x0, csp, x1");
+ COMPARE(sub(w2, wcsp, Operand(w3)), "sub w2, wcsp, w3");
+ COMPARE(sub(x4, csp, Operand(x5, LSL, 1)), "sub x4, csp, x5, lsl #1");
+ COMPARE(sub(x4, xzr, Operand(x5, LSL, 1)), "neg x4, x5, lsl #1");
+ COMPARE(sub(w6, wcsp, Operand(w7, LSL, 3)), "sub w6, wcsp, w7, lsl #3");
+ COMPARE(subs(xzr, csp, Operand(x8, LSL, 4)), "cmp csp, x8, lsl #4");
+ COMPARE(subs(xzr, xzr, Operand(x8, LSL, 5)), "cmp xzr, x8, lsl #5");
+
+ CLEANUP();
+}
+
+
+TEST_(add_extended) {
+ SET_UP();
+
+ COMPARE(add(w0, w1, Operand(w2, UXTB)), "add w0, w1, w2, uxtb");
+ COMPARE(adds(x3, x4, Operand(w5, UXTB, 1)), "adds x3, x4, w5, uxtb #1");
+ COMPARE(add(w6, w7, Operand(w8, UXTH, 2)), "add w6, w7, w8, uxth #2");
+ COMPARE(adds(x9, x10, Operand(x11, UXTW, 3)), "adds x9, x10, w11, uxtw #3");
+ COMPARE(add(x12, x13, Operand(x14, UXTX, 4)), "add x12, x13, x14, uxtx #4");
+ COMPARE(adds(w15, w16, Operand(w17, SXTB, 4)), "adds w15, w16, w17, sxtb #4");
+ COMPARE(add(x18, x19, Operand(x20, SXTB, 3)), "add x18, x19, w20, sxtb #3");
+ COMPARE(adds(w21, w22, Operand(w23, SXTH, 2)), "adds w21, w22, w23, sxth #2");
+ COMPARE(add(x24, x25, Operand(x26, SXTW, 1)), "add x24, x25, w26, sxtw #1");
+ COMPARE(adds(cp, jssp, Operand(fp, SXTX)), "adds cp, jssp, fp, sxtx");
+ COMPARE(cmn(w0, Operand(w1, UXTB, 2)), "cmn w0, w1, uxtb #2");
+ COMPARE(cmn(x2, Operand(x3, SXTH, 4)), "cmn x2, w3, sxth #4");
+
+ COMPARE(add(w0, wcsp, Operand(w1, UXTB)), "add w0, wcsp, w1, uxtb");
+ COMPARE(add(x2, csp, Operand(x3, UXTH, 1)), "add x2, csp, w3, uxth #1");
+ COMPARE(add(wcsp, wcsp, Operand(w4, UXTW, 2)), "add wcsp, wcsp, w4, lsl #2");
+ COMPARE(cmn(csp, Operand(xzr, UXTX, 3)), "cmn csp, xzr, lsl #3");
+ COMPARE(cmn(csp, Operand(xzr, LSL, 4)), "cmn csp, xzr, lsl #4");
+
+ CLEANUP();
+}
+
+
+TEST_(sub_extended) {
+ SET_UP();
+
+ COMPARE(sub(w0, w1, Operand(w2, UXTB)), "sub w0, w1, w2, uxtb");
+ COMPARE(subs(x3, x4, Operand(w5, UXTB, 1)), "subs x3, x4, w5, uxtb #1");
+ COMPARE(sub(w6, w7, Operand(w8, UXTH, 2)), "sub w6, w7, w8, uxth #2");
+ COMPARE(subs(x9, x10, Operand(x11, UXTW, 3)), "subs x9, x10, w11, uxtw #3");
+ COMPARE(sub(x12, x13, Operand(x14, UXTX, 4)), "sub x12, x13, x14, uxtx #4");
+ COMPARE(subs(w15, w16, Operand(w17, SXTB, 4)), "subs w15, w16, w17, sxtb #4");
+ COMPARE(sub(x18, x19, Operand(x20, SXTB, 3)), "sub x18, x19, w20, sxtb #3");
+ COMPARE(subs(w21, w22, Operand(w23, SXTH, 2)), "subs w21, w22, w23, sxth #2");
+ COMPARE(sub(x24, x25, Operand(x26, SXTW, 1)), "sub x24, x25, w26, sxtw #1");
+ COMPARE(subs(cp, jssp, Operand(fp, SXTX)), "subs cp, jssp, fp, sxtx");
+ COMPARE(cmp(w0, Operand(w1, SXTB, 1)), "cmp w0, w1, sxtb #1");
+ COMPARE(cmp(x2, Operand(x3, UXTH, 3)), "cmp x2, w3, uxth #3");
+
+ COMPARE(sub(w0, wcsp, Operand(w1, UXTB)), "sub w0, wcsp, w1, uxtb");
+ COMPARE(sub(x2, csp, Operand(x3, UXTH, 1)), "sub x2, csp, w3, uxth #1");
+ COMPARE(sub(wcsp, wcsp, Operand(w4, UXTW, 2)), "sub wcsp, wcsp, w4, lsl #2");
+ COMPARE(cmp(csp, Operand(xzr, UXTX, 3)), "cmp csp, xzr, lsl #3");
+ COMPARE(cmp(csp, Operand(xzr, LSL, 4)), "cmp csp, xzr, lsl #4");
+
+ CLEANUP();
+}
+
+
+TEST_(adc_subc_ngc) {
+ SET_UP();
+
+ COMPARE(adc(w0, w1, Operand(w2)), "adc w0, w1, w2");
+ COMPARE(adc(x3, x4, Operand(x5)), "adc x3, x4, x5");
+ COMPARE(adcs(w6, w7, Operand(w8)), "adcs w6, w7, w8");
+ COMPARE(adcs(x9, x10, Operand(x11)), "adcs x9, x10, x11");
+ COMPARE(sbc(w12, w13, Operand(w14)), "sbc w12, w13, w14");
+ COMPARE(sbc(x15, x16, Operand(x17)), "sbc x15, x16, x17");
+ COMPARE(sbcs(w18, w19, Operand(w20)), "sbcs w18, w19, w20");
+ COMPARE(sbcs(x21, x22, Operand(x23)), "sbcs x21, x22, x23");
+ COMPARE(ngc(w24, Operand(w25)), "ngc w24, w25");
+ COMPARE(ngc(x26, Operand(cp)), "ngc x26, cp");
+ COMPARE(ngcs(w28, Operand(w29)), "ngcs w28, w29");
+ COMPARE(ngcs(lr, Operand(x0)), "ngcs lr, x0");
+
+ CLEANUP();
+}
+
+
+TEST_(mul_and_div) {
+ SET_UP();
+
+ COMPARE(mul(w0, w1, w2), "mul w0, w1, w2");
+ COMPARE(mul(x3, x4, x5), "mul x3, x4, x5");
+ COMPARE(mul(w30, w0, w1), "mul w30, w0, w1");
+ COMPARE(mul(lr, x0, x1), "mul lr, x0, x1");
+ COMPARE(mneg(w0, w1, w2), "mneg w0, w1, w2");
+ COMPARE(mneg(x3, x4, x5), "mneg x3, x4, x5");
+ COMPARE(mneg(w30, w0, w1), "mneg w30, w0, w1");
+ COMPARE(mneg(lr, x0, x1), "mneg lr, x0, x1");
+ COMPARE(smull(x0, w0, w1), "smull x0, w0, w1");
+ COMPARE(smull(lr, w30, w0), "smull lr, w30, w0");
+ COMPARE(smulh(x0, x1, x2), "smulh x0, x1, x2");
+
+ COMPARE(madd(w0, w1, w2, w3), "madd w0, w1, w2, w3");
+ COMPARE(madd(x4, x5, x6, x7), "madd x4, x5, x6, x7");
+ COMPARE(madd(w8, w9, w10, wzr), "mul w8, w9, w10");
+ COMPARE(madd(x11, x12, x13, xzr), "mul x11, x12, x13");
+ COMPARE(msub(w14, w15, w16, w17), "msub w14, w15, w16, w17");
+ COMPARE(msub(x18, x19, x20, x21), "msub x18, x19, x20, x21");
+ COMPARE(msub(w22, w23, w24, wzr), "mneg w22, w23, w24");
+ COMPARE(msub(x25, x26, x0, xzr), "mneg x25, x26, x0");
+
+ COMPARE(sdiv(w0, w1, w2), "sdiv w0, w1, w2");
+ COMPARE(sdiv(x3, x4, x5), "sdiv x3, x4, x5");
+ COMPARE(udiv(w6, w7, w8), "udiv w6, w7, w8");
+ COMPARE(udiv(x9, x10, x11), "udiv x9, x10, x11");
+
+ CLEANUP();
+}
+
+
+TEST(maddl_msubl) {
+ SET_UP();
+
+ COMPARE(smaddl(x0, w1, w2, x3), "smaddl x0, w1, w2, x3");
+ COMPARE(smaddl(x25, w21, w22, x16), "smaddl x25, w21, w22, x16");
+ COMPARE(umaddl(x0, w1, w2, x3), "umaddl x0, w1, w2, x3");
+ COMPARE(umaddl(x25, w21, w22, x16), "umaddl x25, w21, w22, x16");
+
+ COMPARE(smsubl(x0, w1, w2, x3), "smsubl x0, w1, w2, x3");
+ COMPARE(smsubl(x25, w21, w22, x16), "smsubl x25, w21, w22, x16");
+ COMPARE(umsubl(x0, w1, w2, x3), "umsubl x0, w1, w2, x3");
+ COMPARE(umsubl(x25, w21, w22, x16), "umsubl x25, w21, w22, x16");
+
+ CLEANUP();
+}
+
+
+TEST_(dp_1_source) {
+ SET_UP();
+
+ COMPARE(rbit(w0, w1), "rbit w0, w1");
+ COMPARE(rbit(x2, x3), "rbit x2, x3");
+ COMPARE(rev16(w4, w5), "rev16 w4, w5");
+ COMPARE(rev16(x6, x7), "rev16 x6, x7");
+ COMPARE(rev32(x8, x9), "rev32 x8, x9");
+ COMPARE(rev(w10, w11), "rev w10, w11");
+ COMPARE(rev(x12, x13), "rev x12, x13");
+ COMPARE(clz(w14, w15), "clz w14, w15");
+ COMPARE(clz(x16, x17), "clz x16, x17");
+ COMPARE(cls(w18, w19), "cls w18, w19");
+ COMPARE(cls(x20, x21), "cls x20, x21");
+
+ CLEANUP();
+}
+
+
+TEST_(bitfield) {
+ SET_UP();
+
+ COMPARE(sxtb(w0, w1), "sxtb w0, w1");
+ COMPARE(sxtb(x2, x3), "sxtb x2, w3");
+ COMPARE(sxth(w4, w5), "sxth w4, w5");
+ COMPARE(sxth(x6, x7), "sxth x6, w7");
+ COMPARE(sxtw(x8, x9), "sxtw x8, w9");
+ COMPARE(sxtb(x0, w1), "sxtb x0, w1");
+ COMPARE(sxth(x2, w3), "sxth x2, w3");
+ COMPARE(sxtw(x4, w5), "sxtw x4, w5");
+
+ COMPARE(uxtb(w10, w11), "uxtb w10, w11");
+ COMPARE(uxtb(x12, x13), "uxtb x12, w13");
+ COMPARE(uxth(w14, w15), "uxth w14, w15");
+ COMPARE(uxth(x16, x17), "uxth x16, w17");
+ COMPARE(uxtw(x18, x19), "ubfx x18, x19, #0, #32");
+
+ COMPARE(asr(w20, w21, 10), "asr w20, w21, #10");
+ COMPARE(asr(x22, x23, 20), "asr x22, x23, #20");
+ COMPARE(lsr(w24, w25, 10), "lsr w24, w25, #10");
+ COMPARE(lsr(x26, cp, 20), "lsr x26, cp, #20");
+ COMPARE(lsl(w28, w29, 10), "lsl w28, w29, #10");
+ COMPARE(lsl(lr, x0, 20), "lsl lr, x0, #20");
+
+ COMPARE(sbfiz(w1, w2, 1, 20), "sbfiz w1, w2, #1, #20");
+ COMPARE(sbfiz(x3, x4, 2, 19), "sbfiz x3, x4, #2, #19");
+ COMPARE(sbfx(w5, w6, 3, 18), "sbfx w5, w6, #3, #18");
+ COMPARE(sbfx(x7, x8, 4, 17), "sbfx x7, x8, #4, #17");
+ COMPARE(bfi(w9, w10, 5, 16), "bfi w9, w10, #5, #16");
+ COMPARE(bfi(x11, x12, 6, 15), "bfi x11, x12, #6, #15");
+ COMPARE(bfxil(w13, w14, 7, 14), "bfxil w13, w14, #7, #14");
+ COMPARE(bfxil(x15, x16, 8, 13), "bfxil x15, x16, #8, #13");
+ COMPARE(ubfiz(w17, w18, 9, 12), "ubfiz w17, w18, #9, #12");
+ COMPARE(ubfiz(x19, x20, 10, 11), "ubfiz x19, x20, #10, #11");
+ COMPARE(ubfx(w21, w22, 11, 10), "ubfx w21, w22, #11, #10");
+ COMPARE(ubfx(x23, x24, 12, 9), "ubfx x23, x24, #12, #9");
+
+ CLEANUP();
+}
+
+
+TEST_(extract) {
+ SET_UP();
+
+ COMPARE(extr(w0, w1, w2, 0), "extr w0, w1, w2, #0");
+ COMPARE(extr(x3, x4, x5, 1), "extr x3, x4, x5, #1");
+ COMPARE(extr(w6, w7, w8, 31), "extr w6, w7, w8, #31");
+ COMPARE(extr(x9, x10, x11, 63), "extr x9, x10, x11, #63");
+ COMPARE(extr(w12, w13, w13, 10), "ror w12, w13, #10");
+ COMPARE(extr(x14, x15, x15, 42), "ror x14, x15, #42");
+
+ CLEANUP();
+}
+
+
+TEST_(logical_immediate) {
+ SET_UP();
+ #define RESULT_SIZE (256)
+
+ char result[RESULT_SIZE];
+
+ // Test immediate encoding - 64-bit destination.
+ // 64-bit patterns.
+ uint64_t value = 0x7fffffff;
+ for (int i = 0; i < 64; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 32-bit patterns.
+ value = 0x00003fff00003fffL;
+ for (int i = 0; i < 32; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 16-bit patterns.
+ value = 0x001f001f001f001fL;
+ for (int i = 0; i < 16; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 8-bit patterns.
+ value = 0x0e0e0e0e0e0e0e0eL;
+ for (int i = 0; i < 8; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 4-bit patterns.
+ value = 0x6666666666666666L;
+ for (int i = 0; i < 4; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 2-bit patterns.
+ COMPARE(and_(x0, x0, Operand(0x5555555555555555L)),
+ "and x0, x0, #0x5555555555555555");
+ COMPARE(and_(x0, x0, Operand(0xaaaaaaaaaaaaaaaaL)),
+ "and x0, x0, #0xaaaaaaaaaaaaaaaa");
+
+ // Test immediate encoding - 32-bit destination.
+ COMPARE(and_(w0, w0, Operand(0xff8007ff)),
+ "and w0, w0, #0xff8007ff"); // 32-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0xf87ff87f)),
+ "and w0, w0, #0xf87ff87f"); // 16-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0x87878787)),
+ "and w0, w0, #0x87878787"); // 8-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0x66666666)),
+ "and w0, w0, #0x66666666"); // 4-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0x55555555)),
+ "and w0, w0, #0x55555555"); // 2-bit pattern.
+
+ // Test other instructions.
+ COMPARE(tst(w1, Operand(0x11111111)),
+ "tst w1, #0x11111111");
+ COMPARE(tst(x2, Operand(0x8888888888888888L)),
+ "tst x2, #0x8888888888888888");
+ COMPARE(orr(w7, w8, Operand(0xaaaaaaaa)),
+ "orr w7, w8, #0xaaaaaaaa");
+ COMPARE(orr(x9, x10, Operand(0x5555555555555555L)),
+ "orr x9, x10, #0x5555555555555555");
+ COMPARE(eor(w15, w16, Operand(0x00000001)),
+ "eor w15, w16, #0x1");
+ COMPARE(eor(x17, x18, Operand(0x0000000000000003L)),
+ "eor x17, x18, #0x3");
+ COMPARE(ands(w23, w24, Operand(0x0000000f)), "ands w23, w24, #0xf");
+ COMPARE(ands(x25, x26, Operand(0x800000000000000fL)),
+ "ands x25, x26, #0x800000000000000f");
+
+ // Test inverse.
+ COMPARE(bic(w3, w4, Operand(0x20202020)),
+ "and w3, w4, #0xdfdfdfdf");
+ COMPARE(bic(x5, x6, Operand(0x4040404040404040L)),
+ "and x5, x6, #0xbfbfbfbfbfbfbfbf");
+ COMPARE(orn(w11, w12, Operand(0x40004000)),
+ "orr w11, w12, #0xbfffbfff");
+ COMPARE(orn(x13, x14, Operand(0x8181818181818181L)),
+ "orr x13, x14, #0x7e7e7e7e7e7e7e7e");
+ COMPARE(eon(w19, w20, Operand(0x80000001)),
+ "eor w19, w20, #0x7ffffffe");
+ COMPARE(eon(x21, x22, Operand(0xc000000000000003L)),
+ "eor x21, x22, #0x3ffffffffffffffc");
+ COMPARE(bics(w27, w28, Operand(0xfffffff7)), "ands w27, w28, #0x8");
+ COMPARE(bics(fp, x0, Operand(0xfffffffeffffffffL)),
+ "ands fp, x0, #0x100000000");
+
+ // Test stack pointer.
+ COMPARE(and_(wcsp, wzr, Operand(7)), "and wcsp, wzr, #0x7");
+ COMPARE(ands(xzr, xzr, Operand(7)), "tst xzr, #0x7");
+ COMPARE(orr(csp, xzr, Operand(15)), "orr csp, xzr, #0xf");
+ COMPARE(eor(wcsp, w0, Operand(31)), "eor wcsp, w0, #0x1f");
+
+ // Test move aliases.
+ COMPARE(orr(w0, wzr, Operand(0x00000780)), "orr w0, wzr, #0x780");
+ COMPARE(orr(w1, wzr, Operand(0x00007800)), "orr w1, wzr, #0x7800");
+ COMPARE(orr(w2, wzr, Operand(0x00078000)), "mov w2, #0x78000");
+ COMPARE(orr(w3, wzr, Operand(0x00780000)), "orr w3, wzr, #0x780000");
+ COMPARE(orr(w4, wzr, Operand(0x07800000)), "orr w4, wzr, #0x7800000");
+ COMPARE(orr(x5, xzr, Operand(0xffffffffffffc001UL)),
+ "orr x5, xzr, #0xffffffffffffc001");
+ COMPARE(orr(x6, xzr, Operand(0xfffffffffffc001fUL)),
+ "mov x6, #0xfffffffffffc001f");
+ COMPARE(orr(x7, xzr, Operand(0xffffffffffc001ffUL)),
+ "mov x7, #0xffffffffffc001ff");
+ COMPARE(orr(x8, xzr, Operand(0xfffffffffc001fffUL)),
+ "mov x8, #0xfffffffffc001fff");
+ COMPARE(orr(x9, xzr, Operand(0xffffffffc001ffffUL)),
+ "orr x9, xzr, #0xffffffffc001ffff");
+
+ CLEANUP();
+}
+
+
+TEST_(logical_shifted) {
+ SET_UP();
+
+ COMPARE(and_(w0, w1, Operand(w2)), "and w0, w1, w2");
+ COMPARE(and_(x3, x4, Operand(x5, LSL, 1)), "and x3, x4, x5, lsl #1");
+ COMPARE(and_(w6, w7, Operand(w8, LSR, 2)), "and w6, w7, w8, lsr #2");
+ COMPARE(and_(x9, x10, Operand(x11, ASR, 3)), "and x9, x10, x11, asr #3");
+ COMPARE(and_(w12, w13, Operand(w14, ROR, 4)), "and w12, w13, w14, ror #4");
+
+ COMPARE(bic(w15, w16, Operand(w17)), "bic w15, w16, w17");
+ COMPARE(bic(x18, x19, Operand(x20, LSL, 5)), "bic x18, x19, x20, lsl #5");
+ COMPARE(bic(w21, w22, Operand(w23, LSR, 6)), "bic w21, w22, w23, lsr #6");
+ COMPARE(bic(x24, x25, Operand(x26, ASR, 7)), "bic x24, x25, x26, asr #7");
+ COMPARE(bic(w27, w28, Operand(w29, ROR, 8)), "bic w27, w28, w29, ror #8");
+
+ COMPARE(orr(w0, w1, Operand(w2)), "orr w0, w1, w2");
+ COMPARE(orr(x3, x4, Operand(x5, LSL, 9)), "orr x3, x4, x5, lsl #9");
+ COMPARE(orr(w6, w7, Operand(w8, LSR, 10)), "orr w6, w7, w8, lsr #10");
+ COMPARE(orr(x9, x10, Operand(x11, ASR, 11)), "orr x9, x10, x11, asr #11");
+ COMPARE(orr(w12, w13, Operand(w14, ROR, 12)), "orr w12, w13, w14, ror #12");
+
+ COMPARE(orn(w15, w16, Operand(w17)), "orn w15, w16, w17");
+ COMPARE(orn(x18, x19, Operand(x20, LSL, 13)), "orn x18, x19, x20, lsl #13");
+ COMPARE(orn(w21, w22, Operand(w23, LSR, 14)), "orn w21, w22, w23, lsr #14");
+ COMPARE(orn(x24, x25, Operand(x26, ASR, 15)), "orn x24, x25, x26, asr #15");
+ COMPARE(orn(w27, w28, Operand(w29, ROR, 16)), "orn w27, w28, w29, ror #16");
+
+ COMPARE(eor(w0, w1, Operand(w2)), "eor w0, w1, w2");
+ COMPARE(eor(x3, x4, Operand(x5, LSL, 17)), "eor x3, x4, x5, lsl #17");
+ COMPARE(eor(w6, w7, Operand(w8, LSR, 18)), "eor w6, w7, w8, lsr #18");
+ COMPARE(eor(x9, x10, Operand(x11, ASR, 19)), "eor x9, x10, x11, asr #19");
+ COMPARE(eor(w12, w13, Operand(w14, ROR, 20)), "eor w12, w13, w14, ror #20");
+
+ COMPARE(eon(w15, w16, Operand(w17)), "eon w15, w16, w17");
+ COMPARE(eon(x18, x19, Operand(x20, LSL, 21)), "eon x18, x19, x20, lsl #21");
+ COMPARE(eon(w21, w22, Operand(w23, LSR, 22)), "eon w21, w22, w23, lsr #22");
+ COMPARE(eon(x24, x25, Operand(x26, ASR, 23)), "eon x24, x25, x26, asr #23");
+ COMPARE(eon(w27, w28, Operand(w29, ROR, 24)), "eon w27, w28, w29, ror #24");
+
+ COMPARE(ands(w0, w1, Operand(w2)), "ands w0, w1, w2");
+ COMPARE(ands(x3, x4, Operand(x5, LSL, 1)), "ands x3, x4, x5, lsl #1");
+ COMPARE(ands(w6, w7, Operand(w8, LSR, 2)), "ands w6, w7, w8, lsr #2");
+ COMPARE(ands(x9, x10, Operand(x11, ASR, 3)), "ands x9, x10, x11, asr #3");
+ COMPARE(ands(w12, w13, Operand(w14, ROR, 4)), "ands w12, w13, w14, ror #4");
+
+ COMPARE(bics(w15, w16, Operand(w17)), "bics w15, w16, w17");
+ COMPARE(bics(x18, x19, Operand(x20, LSL, 5)), "bics x18, x19, x20, lsl #5");
+ COMPARE(bics(w21, w22, Operand(w23, LSR, 6)), "bics w21, w22, w23, lsr #6");
+ COMPARE(bics(x24, x25, Operand(x26, ASR, 7)), "bics x24, x25, x26, asr #7");
+ COMPARE(bics(w27, w28, Operand(w29, ROR, 8)), "bics w27, w28, w29, ror #8");
+
+ COMPARE(tst(w0, Operand(w1)), "tst w0, w1");
+ COMPARE(tst(w2, Operand(w3, ROR, 10)), "tst w2, w3, ror #10");
+ COMPARE(tst(x0, Operand(x1)), "tst x0, x1");
+ COMPARE(tst(x2, Operand(x3, ROR, 42)), "tst x2, x3, ror #42");
+
+ COMPARE(orn(w0, wzr, Operand(w1)), "mvn w0, w1");
+ COMPARE(orn(w2, wzr, Operand(w3, ASR, 5)), "mvn w2, w3, asr #5");
+ COMPARE(orn(x0, xzr, Operand(x1)), "mvn x0, x1");
+ COMPARE(orn(x2, xzr, Operand(x3, ASR, 42)), "mvn x2, x3, asr #42");
+
+ COMPARE(orr(w0, wzr, Operand(w1)), "mov w0, w1");
+ COMPARE(orr(x0, xzr, Operand(x1)), "mov x0, x1");
+ COMPARE(orr(w16, wzr, Operand(w17, LSL, 1)), "orr w16, wzr, w17, lsl #1");
+ COMPARE(orr(x16, xzr, Operand(x17, ASR, 2)), "orr x16, xzr, x17, asr #2");
+
+ CLEANUP();
+}
+
+
+TEST_(dp_2_source) {
+ SET_UP();
+
+ COMPARE(lslv(w0, w1, w2), "lsl w0, w1, w2");
+ COMPARE(lslv(x3, x4, x5), "lsl x3, x4, x5");
+ COMPARE(lsrv(w6, w7, w8), "lsr w6, w7, w8");
+ COMPARE(lsrv(x9, x10, x11), "lsr x9, x10, x11");
+ COMPARE(asrv(w12, w13, w14), "asr w12, w13, w14");
+ COMPARE(asrv(x15, x16, x17), "asr x15, x16, x17");
+ COMPARE(rorv(w18, w19, w20), "ror w18, w19, w20");
+ COMPARE(rorv(x21, x22, x23), "ror x21, x22, x23");
+
+ CLEANUP();
+}
+
+
+TEST_(adr) {
+ SET_UP();
+
+ COMPARE(adr(x0, 0), "adr x0, #+0x0");
+ COMPARE(adr(x1, 1), "adr x1, #+0x1");
+ COMPARE(adr(x2, -1), "adr x2, #-0x1");
+ COMPARE(adr(x3, 4), "adr x3, #+0x4");
+ COMPARE(adr(x4, -4), "adr x4, #-0x4");
+ COMPARE(adr(x5, 0x000fffff), "adr x5, #+0xfffff");
+ COMPARE(adr(x6, -0x00100000), "adr x6, #-0x100000");
+ COMPARE(adr(xzr, 0), "adr xzr, #+0x0");
+
+ CLEANUP();
+}
+
+
+TEST_(branch) {
+ SET_UP();
+
+ #define INST_OFF(x) ((x) >> kInstructionSizeLog2)
+ COMPARE(b(INST_OFF(0x4)), "b #+0x4");
+ COMPARE(b(INST_OFF(-0x4)), "b #-0x4");
+ COMPARE(b(INST_OFF(0x7fffffc)), "b #+0x7fffffc");
+ COMPARE(b(INST_OFF(-0x8000000)), "b #-0x8000000");
+ COMPARE(b(INST_OFF(0xffffc), eq), "b.eq #+0xffffc");
+ COMPARE(b(INST_OFF(-0x100000), mi), "b.mi #-0x100000");
+ COMPARE(bl(INST_OFF(0x4)), "bl #+0x4");
+ COMPARE(bl(INST_OFF(-0x4)), "bl #-0x4");
+ COMPARE(bl(INST_OFF(0xffffc)), "bl #+0xffffc");
+ COMPARE(bl(INST_OFF(-0x100000)), "bl #-0x100000");
+ COMPARE(cbz(w0, INST_OFF(0xffffc)), "cbz w0, #+0xffffc");
+ COMPARE(cbz(x1, INST_OFF(-0x100000)), "cbz x1, #-0x100000");
+ COMPARE(cbnz(w2, INST_OFF(0xffffc)), "cbnz w2, #+0xffffc");
+ COMPARE(cbnz(x3, INST_OFF(-0x100000)), "cbnz x3, #-0x100000");
+ COMPARE(tbz(w4, 0, INST_OFF(0x7ffc)), "tbz w4, #0, #+0x7ffc");
+ COMPARE(tbz(x5, 63, INST_OFF(-0x8000)), "tbz x5, #63, #-0x8000");
+ COMPARE(tbz(w6, 31, INST_OFF(0)), "tbz w6, #31, #+0x0");
+ COMPARE(tbz(x7, 31, INST_OFF(0x4)), "tbz w7, #31, #+0x4");
+ COMPARE(tbz(x8, 32, INST_OFF(0x8)), "tbz x8, #32, #+0x8");
+ COMPARE(tbnz(w8, 0, INST_OFF(0x7ffc)), "tbnz w8, #0, #+0x7ffc");
+ COMPARE(tbnz(x9, 63, INST_OFF(-0x8000)), "tbnz x9, #63, #-0x8000");
+ COMPARE(tbnz(w10, 31, INST_OFF(0)), "tbnz w10, #31, #+0x0");
+ COMPARE(tbnz(x11, 31, INST_OFF(0x4)), "tbnz w11, #31, #+0x4");
+ COMPARE(tbnz(x12, 32, INST_OFF(0x8)), "tbnz x12, #32, #+0x8");
+ COMPARE(br(x0), "br x0");
+ COMPARE(blr(x1), "blr x1");
+ COMPARE(ret(x2), "ret x2");
+ COMPARE(ret(lr), "ret")
+
+ CLEANUP();
+}
+
+
+TEST_(load_store) {
+ SET_UP();
+
+ COMPARE(ldr(w0, MemOperand(x1)), "ldr w0, [x1]");
+ COMPARE(ldr(w2, MemOperand(x3, 4)), "ldr w2, [x3, #4]");
+ COMPARE(ldr(w4, MemOperand(x5, 16380)), "ldr w4, [x5, #16380]");
+ COMPARE(ldr(x6, MemOperand(x7)), "ldr x6, [x7]");
+ COMPARE(ldr(x8, MemOperand(x9, 8)), "ldr x8, [x9, #8]");
+ COMPARE(ldr(x10, MemOperand(x11, 32760)), "ldr x10, [x11, #32760]");
+ COMPARE(str(w12, MemOperand(x13)), "str w12, [x13]");
+ COMPARE(str(w14, MemOperand(x15, 4)), "str w14, [x15, #4]");
+ COMPARE(str(w16, MemOperand(x17, 16380)), "str w16, [x17, #16380]");
+ COMPARE(str(x18, MemOperand(x19)), "str x18, [x19]");
+ COMPARE(str(x20, MemOperand(x21, 8)), "str x20, [x21, #8]");
+ COMPARE(str(x22, MemOperand(x23, 32760)), "str x22, [x23, #32760]");
+
+ COMPARE(ldr(w0, MemOperand(x1, 4, PreIndex)), "ldr w0, [x1, #4]!");
+ COMPARE(ldr(w2, MemOperand(x3, 255, PreIndex)), "ldr w2, [x3, #255]!");
+ COMPARE(ldr(w4, MemOperand(x5, -256, PreIndex)), "ldr w4, [x5, #-256]!");
+ COMPARE(ldr(x6, MemOperand(x7, 8, PreIndex)), "ldr x6, [x7, #8]!");
+ COMPARE(ldr(x8, MemOperand(x9, 255, PreIndex)), "ldr x8, [x9, #255]!");
+ COMPARE(ldr(x10, MemOperand(x11, -256, PreIndex)), "ldr x10, [x11, #-256]!");
+ COMPARE(str(w12, MemOperand(x13, 4, PreIndex)), "str w12, [x13, #4]!");
+ COMPARE(str(w14, MemOperand(x15, 255, PreIndex)), "str w14, [x15, #255]!");
+ COMPARE(str(w16, MemOperand(x17, -256, PreIndex)), "str w16, [x17, #-256]!");
+ COMPARE(str(x18, MemOperand(x19, 8, PreIndex)), "str x18, [x19, #8]!");
+ COMPARE(str(x20, MemOperand(x21, 255, PreIndex)), "str x20, [x21, #255]!");
+ COMPARE(str(x22, MemOperand(x23, -256, PreIndex)), "str x22, [x23, #-256]!");
+
+ COMPARE(ldr(w0, MemOperand(x1, 4, PostIndex)), "ldr w0, [x1], #4");
+ COMPARE(ldr(w2, MemOperand(x3, 255, PostIndex)), "ldr w2, [x3], #255");
+ COMPARE(ldr(w4, MemOperand(x5, -256, PostIndex)), "ldr w4, [x5], #-256");
+ COMPARE(ldr(x6, MemOperand(x7, 8, PostIndex)), "ldr x6, [x7], #8");
+ COMPARE(ldr(x8, MemOperand(x9, 255, PostIndex)), "ldr x8, [x9], #255");
+ COMPARE(ldr(x10, MemOperand(x11, -256, PostIndex)), "ldr x10, [x11], #-256");
+ COMPARE(str(w12, MemOperand(x13, 4, PostIndex)), "str w12, [x13], #4");
+ COMPARE(str(w14, MemOperand(x15, 255, PostIndex)), "str w14, [x15], #255");
+ COMPARE(str(w16, MemOperand(x17, -256, PostIndex)), "str w16, [x17], #-256");
+ COMPARE(str(x18, MemOperand(x19, 8, PostIndex)), "str x18, [x19], #8");
+ COMPARE(str(x20, MemOperand(x21, 255, PostIndex)), "str x20, [x21], #255");
+ COMPARE(str(x22, MemOperand(x23, -256, PostIndex)), "str x22, [x23], #-256");
+
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldr(w24, MemOperand(jssp)), "ldr w24, [jssp]");
+ COMPARE(ldr(x25, MemOperand(jssp, 8)), "ldr x25, [jssp, #8]");
+ COMPARE(str(w26, MemOperand(jssp, 4, PreIndex)), "str w26, [jssp, #4]!");
+ COMPARE(str(cp, MemOperand(jssp, -8, PostIndex)), "str cp, [jssp], #-8");
+
+ COMPARE(ldrsw(x0, MemOperand(x1)), "ldrsw x0, [x1]");
+ COMPARE(ldrsw(x2, MemOperand(x3, 8)), "ldrsw x2, [x3, #8]");
+ COMPARE(ldrsw(x4, MemOperand(x5, 42, PreIndex)), "ldrsw x4, [x5, #42]!");
+ COMPARE(ldrsw(x6, MemOperand(x7, -11, PostIndex)), "ldrsw x6, [x7], #-11");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_regoffset) {
+ SET_UP();
+
+ COMPARE(ldr(w0, MemOperand(x1, w2, UXTW)), "ldr w0, [x1, w2, uxtw]");
+ COMPARE(ldr(w3, MemOperand(x4, w5, UXTW, 2)), "ldr w3, [x4, w5, uxtw #2]");
+ COMPARE(ldr(w6, MemOperand(x7, x8)), "ldr w6, [x7, x8]");
+ COMPARE(ldr(w9, MemOperand(x10, x11, LSL, 2)), "ldr w9, [x10, x11, lsl #2]");
+ COMPARE(ldr(w12, MemOperand(x13, w14, SXTW)), "ldr w12, [x13, w14, sxtw]");
+ COMPARE(ldr(w15, MemOperand(x16, w17, SXTW, 2)),
+ "ldr w15, [x16, w17, sxtw #2]");
+ COMPARE(ldr(w18, MemOperand(x19, x20, SXTX)), "ldr w18, [x19, x20, sxtx]");
+ COMPARE(ldr(w21, MemOperand(x22, x23, SXTX, 2)),
+ "ldr w21, [x22, x23, sxtx #2]");
+ COMPARE(ldr(x0, MemOperand(x1, w2, UXTW)), "ldr x0, [x1, w2, uxtw]");
+ COMPARE(ldr(x3, MemOperand(x4, w5, UXTW, 3)), "ldr x3, [x4, w5, uxtw #3]");
+ COMPARE(ldr(x6, MemOperand(x7, x8)), "ldr x6, [x7, x8]");
+ COMPARE(ldr(x9, MemOperand(x10, x11, LSL, 3)), "ldr x9, [x10, x11, lsl #3]");
+ COMPARE(ldr(x12, MemOperand(x13, w14, SXTW)), "ldr x12, [x13, w14, sxtw]");
+ COMPARE(ldr(x15, MemOperand(x16, w17, SXTW, 3)),
+ "ldr x15, [x16, w17, sxtw #3]");
+ COMPARE(ldr(x18, MemOperand(x19, x20, SXTX)), "ldr x18, [x19, x20, sxtx]");
+ COMPARE(ldr(x21, MemOperand(x22, x23, SXTX, 3)),
+ "ldr x21, [x22, x23, sxtx #3]");
+
+ COMPARE(str(w0, MemOperand(x1, w2, UXTW)), "str w0, [x1, w2, uxtw]");
+ COMPARE(str(w3, MemOperand(x4, w5, UXTW, 2)), "str w3, [x4, w5, uxtw #2]");
+ COMPARE(str(w6, MemOperand(x7, x8)), "str w6, [x7, x8]");
+ COMPARE(str(w9, MemOperand(x10, x11, LSL, 2)), "str w9, [x10, x11, lsl #2]");
+ COMPARE(str(w12, MemOperand(x13, w14, SXTW)), "str w12, [x13, w14, sxtw]");
+ COMPARE(str(w15, MemOperand(x16, w17, SXTW, 2)),
+ "str w15, [x16, w17, sxtw #2]");
+ COMPARE(str(w18, MemOperand(x19, x20, SXTX)), "str w18, [x19, x20, sxtx]");
+ COMPARE(str(w21, MemOperand(x22, x23, SXTX, 2)),
+ "str w21, [x22, x23, sxtx #2]");
+ COMPARE(str(x0, MemOperand(x1, w2, UXTW)), "str x0, [x1, w2, uxtw]");
+ COMPARE(str(x3, MemOperand(x4, w5, UXTW, 3)), "str x3, [x4, w5, uxtw #3]");
+ COMPARE(str(x6, MemOperand(x7, x8)), "str x6, [x7, x8]");
+ COMPARE(str(x9, MemOperand(x10, x11, LSL, 3)), "str x9, [x10, x11, lsl #3]");
+ COMPARE(str(x12, MemOperand(x13, w14, SXTW)), "str x12, [x13, w14, sxtw]");
+ COMPARE(str(x15, MemOperand(x16, w17, SXTW, 3)),
+ "str x15, [x16, w17, sxtw #3]");
+ COMPARE(str(x18, MemOperand(x19, x20, SXTX)), "str x18, [x19, x20, sxtx]");
+ COMPARE(str(x21, MemOperand(x22, x23, SXTX, 3)),
+ "str x21, [x22, x23, sxtx #3]");
+
+ COMPARE(ldrb(w0, MemOperand(x1, w2, UXTW)), "ldrb w0, [x1, w2, uxtw]");
+ COMPARE(ldrb(w6, MemOperand(x7, x8)), "ldrb w6, [x7, x8]");
+ COMPARE(ldrb(w12, MemOperand(x13, w14, SXTW)), "ldrb w12, [x13, w14, sxtw]");
+ COMPARE(ldrb(w18, MemOperand(x19, x20, SXTX)), "ldrb w18, [x19, x20, sxtx]");
+ COMPARE(strb(w0, MemOperand(x1, w2, UXTW)), "strb w0, [x1, w2, uxtw]");
+ COMPARE(strb(w6, MemOperand(x7, x8)), "strb w6, [x7, x8]");
+ COMPARE(strb(w12, MemOperand(x13, w14, SXTW)), "strb w12, [x13, w14, sxtw]");
+ COMPARE(strb(w18, MemOperand(x19, x20, SXTX)), "strb w18, [x19, x20, sxtx]");
+
+ COMPARE(ldrh(w0, MemOperand(x1, w2, UXTW)), "ldrh w0, [x1, w2, uxtw]");
+ COMPARE(ldrh(w3, MemOperand(x4, w5, UXTW, 1)), "ldrh w3, [x4, w5, uxtw #1]");
+ COMPARE(ldrh(w6, MemOperand(x7, x8)), "ldrh w6, [x7, x8]");
+ COMPARE(ldrh(w9, MemOperand(x10, x11, LSL, 1)),
+ "ldrh w9, [x10, x11, lsl #1]");
+ COMPARE(ldrh(w12, MemOperand(x13, w14, SXTW)), "ldrh w12, [x13, w14, sxtw]");
+ COMPARE(ldrh(w15, MemOperand(x16, w17, SXTW, 1)),
+ "ldrh w15, [x16, w17, sxtw #1]");
+ COMPARE(ldrh(w18, MemOperand(x19, x20, SXTX)), "ldrh w18, [x19, x20, sxtx]");
+ COMPARE(ldrh(w21, MemOperand(x22, x23, SXTX, 1)),
+ "ldrh w21, [x22, x23, sxtx #1]");
+ COMPARE(strh(w0, MemOperand(x1, w2, UXTW)), "strh w0, [x1, w2, uxtw]");
+ COMPARE(strh(w3, MemOperand(x4, w5, UXTW, 1)), "strh w3, [x4, w5, uxtw #1]");
+ COMPARE(strh(w6, MemOperand(x7, x8)), "strh w6, [x7, x8]");
+ COMPARE(strh(w9, MemOperand(x10, x11, LSL, 1)),
+ "strh w9, [x10, x11, lsl #1]");
+ COMPARE(strh(w12, MemOperand(x13, w14, SXTW)), "strh w12, [x13, w14, sxtw]");
+ COMPARE(strh(w15, MemOperand(x16, w17, SXTW, 1)),
+ "strh w15, [x16, w17, sxtw #1]");
+ COMPARE(strh(w18, MemOperand(x19, x20, SXTX)), "strh w18, [x19, x20, sxtx]");
+ COMPARE(strh(w21, MemOperand(x22, x23, SXTX, 1)),
+ "strh w21, [x22, x23, sxtx #1]");
+
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldr(x0, MemOperand(jssp, wzr, SXTW)), "ldr x0, [jssp, wzr, sxtw]");
+ COMPARE(str(x1, MemOperand(jssp, xzr)), "str x1, [jssp, xzr]");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_byte) {
+ SET_UP();
+
+ COMPARE(ldrb(w0, MemOperand(x1)), "ldrb w0, [x1]");
+ COMPARE(ldrb(x2, MemOperand(x3)), "ldrb w2, [x3]");
+ COMPARE(ldrb(w4, MemOperand(x5, 4095)), "ldrb w4, [x5, #4095]");
+ COMPARE(ldrb(w6, MemOperand(x7, 255, PreIndex)), "ldrb w6, [x7, #255]!");
+ COMPARE(ldrb(w8, MemOperand(x9, -256, PreIndex)), "ldrb w8, [x9, #-256]!");
+ COMPARE(ldrb(w10, MemOperand(x11, 255, PostIndex)), "ldrb w10, [x11], #255");
+ COMPARE(ldrb(w12, MemOperand(x13, -256, PostIndex)),
+ "ldrb w12, [x13], #-256");
+ COMPARE(strb(w14, MemOperand(x15)), "strb w14, [x15]");
+ COMPARE(strb(x16, MemOperand(x17)), "strb w16, [x17]");
+ COMPARE(strb(w18, MemOperand(x19, 4095)), "strb w18, [x19, #4095]");
+ COMPARE(strb(w20, MemOperand(x21, 255, PreIndex)), "strb w20, [x21, #255]!");
+ COMPARE(strb(w22, MemOperand(x23, -256, PreIndex)),
+ "strb w22, [x23, #-256]!");
+ COMPARE(strb(w24, MemOperand(x25, 255, PostIndex)), "strb w24, [x25], #255");
+ COMPARE(strb(w26, MemOperand(cp, -256, PostIndex)),
+ "strb w26, [cp], #-256");
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldrb(w28, MemOperand(jssp, 3, PostIndex)), "ldrb w28, [jssp], #3");
+ COMPARE(strb(fp, MemOperand(jssp, -42, PreIndex)), "strb w29, [jssp, #-42]!");
+ COMPARE(ldrsb(w0, MemOperand(x1)), "ldrsb w0, [x1]");
+ COMPARE(ldrsb(x2, MemOperand(x3, 8)), "ldrsb x2, [x3, #8]");
+ COMPARE(ldrsb(w4, MemOperand(x5, 42, PreIndex)), "ldrsb w4, [x5, #42]!");
+ COMPARE(ldrsb(x6, MemOperand(x7, -11, PostIndex)), "ldrsb x6, [x7], #-11");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_half) {
+ SET_UP();
+
+ COMPARE(ldrh(w0, MemOperand(x1)), "ldrh w0, [x1]");
+ COMPARE(ldrh(x2, MemOperand(x3)), "ldrh w2, [x3]");
+ COMPARE(ldrh(w4, MemOperand(x5, 8190)), "ldrh w4, [x5, #8190]");
+ COMPARE(ldrh(w6, MemOperand(x7, 255, PreIndex)), "ldrh w6, [x7, #255]!");
+ COMPARE(ldrh(w8, MemOperand(x9, -256, PreIndex)), "ldrh w8, [x9, #-256]!");
+ COMPARE(ldrh(w10, MemOperand(x11, 255, PostIndex)), "ldrh w10, [x11], #255");
+ COMPARE(ldrh(w12, MemOperand(x13, -256, PostIndex)),
+ "ldrh w12, [x13], #-256");
+ COMPARE(strh(w14, MemOperand(x15)), "strh w14, [x15]");
+ COMPARE(strh(x16, MemOperand(x17)), "strh w16, [x17]");
+ COMPARE(strh(w18, MemOperand(x19, 8190)), "strh w18, [x19, #8190]");
+ COMPARE(strh(w20, MemOperand(x21, 255, PreIndex)), "strh w20, [x21, #255]!");
+ COMPARE(strh(w22, MemOperand(x23, -256, PreIndex)),
+ "strh w22, [x23, #-256]!");
+ COMPARE(strh(w24, MemOperand(x25, 255, PostIndex)), "strh w24, [x25], #255");
+ COMPARE(strh(w26, MemOperand(cp, -256, PostIndex)),
+ "strh w26, [cp], #-256");
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldrh(w28, MemOperand(jssp, 3, PostIndex)), "ldrh w28, [jssp], #3");
+ COMPARE(strh(fp, MemOperand(jssp, -42, PreIndex)), "strh w29, [jssp, #-42]!");
+ COMPARE(ldrh(w30, MemOperand(x0, 255)), "ldurh w30, [x0, #255]");
+ COMPARE(ldrh(x1, MemOperand(x2, -256)), "ldurh w1, [x2, #-256]");
+ COMPARE(strh(w3, MemOperand(x4, 255)), "sturh w3, [x4, #255]");
+ COMPARE(strh(x5, MemOperand(x6, -256)), "sturh w5, [x6, #-256]");
+ COMPARE(ldrsh(w0, MemOperand(x1)), "ldrsh w0, [x1]");
+ COMPARE(ldrsh(w2, MemOperand(x3, 8)), "ldrsh w2, [x3, #8]");
+ COMPARE(ldrsh(w4, MemOperand(x5, 42, PreIndex)), "ldrsh w4, [x5, #42]!");
+ COMPARE(ldrsh(x6, MemOperand(x7, -11, PostIndex)), "ldrsh x6, [x7], #-11");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_fp) {
+ SET_UP();
+
+ COMPARE(ldr(s0, MemOperand(x1)), "ldr s0, [x1]");
+ COMPARE(ldr(s2, MemOperand(x3, 4)), "ldr s2, [x3, #4]");
+ COMPARE(ldr(s4, MemOperand(x5, 16380)), "ldr s4, [x5, #16380]");
+ COMPARE(ldr(d6, MemOperand(x7)), "ldr d6, [x7]");
+ COMPARE(ldr(d8, MemOperand(x9, 8)), "ldr d8, [x9, #8]");
+ COMPARE(ldr(d10, MemOperand(x11, 32760)), "ldr d10, [x11, #32760]");
+ COMPARE(str(s12, MemOperand(x13)), "str s12, [x13]");
+ COMPARE(str(s14, MemOperand(x15, 4)), "str s14, [x15, #4]");
+ COMPARE(str(s16, MemOperand(x17, 16380)), "str s16, [x17, #16380]");
+ COMPARE(str(d18, MemOperand(x19)), "str d18, [x19]");
+ COMPARE(str(d20, MemOperand(x21, 8)), "str d20, [x21, #8]");
+ COMPARE(str(d22, MemOperand(x23, 32760)), "str d22, [x23, #32760]");
+
+ COMPARE(ldr(s0, MemOperand(x1, 4, PreIndex)), "ldr s0, [x1, #4]!");
+ COMPARE(ldr(s2, MemOperand(x3, 255, PreIndex)), "ldr s2, [x3, #255]!");
+ COMPARE(ldr(s4, MemOperand(x5, -256, PreIndex)), "ldr s4, [x5, #-256]!");
+ COMPARE(ldr(d6, MemOperand(x7, 8, PreIndex)), "ldr d6, [x7, #8]!");
+ COMPARE(ldr(d8, MemOperand(x9, 255, PreIndex)), "ldr d8, [x9, #255]!");
+ COMPARE(ldr(d10, MemOperand(x11, -256, PreIndex)), "ldr d10, [x11, #-256]!");
+ COMPARE(str(s12, MemOperand(x13, 4, PreIndex)), "str s12, [x13, #4]!");
+ COMPARE(str(s14, MemOperand(x15, 255, PreIndex)), "str s14, [x15, #255]!");
+ COMPARE(str(s16, MemOperand(x17, -256, PreIndex)), "str s16, [x17, #-256]!");
+ COMPARE(str(d18, MemOperand(x19, 8, PreIndex)), "str d18, [x19, #8]!");
+ COMPARE(str(d20, MemOperand(x21, 255, PreIndex)), "str d20, [x21, #255]!");
+ COMPARE(str(d22, MemOperand(x23, -256, PreIndex)), "str d22, [x23, #-256]!");
+
+ COMPARE(ldr(s0, MemOperand(x1, 4, PostIndex)), "ldr s0, [x1], #4");
+ COMPARE(ldr(s2, MemOperand(x3, 255, PostIndex)), "ldr s2, [x3], #255");
+ COMPARE(ldr(s4, MemOperand(x5, -256, PostIndex)), "ldr s4, [x5], #-256");
+ COMPARE(ldr(d6, MemOperand(x7, 8, PostIndex)), "ldr d6, [x7], #8");
+ COMPARE(ldr(d8, MemOperand(x9, 255, PostIndex)), "ldr d8, [x9], #255");
+ COMPARE(ldr(d10, MemOperand(x11, -256, PostIndex)), "ldr d10, [x11], #-256");
+ COMPARE(str(s12, MemOperand(x13, 4, PostIndex)), "str s12, [x13], #4");
+ COMPARE(str(s14, MemOperand(x15, 255, PostIndex)), "str s14, [x15], #255");
+ COMPARE(str(s16, MemOperand(x17, -256, PostIndex)), "str s16, [x17], #-256");
+ COMPARE(str(d18, MemOperand(x19, 8, PostIndex)), "str d18, [x19], #8");
+ COMPARE(str(d20, MemOperand(x21, 255, PostIndex)), "str d20, [x21], #255");
+ COMPARE(str(d22, MemOperand(x23, -256, PostIndex)), "str d22, [x23], #-256");
+
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldr(s24, MemOperand(jssp)), "ldr s24, [jssp]");
+ COMPARE(ldr(d25, MemOperand(jssp, 8)), "ldr d25, [jssp, #8]");
+ COMPARE(str(s26, MemOperand(jssp, 4, PreIndex)), "str s26, [jssp, #4]!");
+ COMPARE(str(d27, MemOperand(jssp, -8, PostIndex)), "str d27, [jssp], #-8");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_unscaled) {
+ SET_UP();
+
+ COMPARE(ldr(w0, MemOperand(x1, 1)), "ldur w0, [x1, #1]");
+ COMPARE(ldr(w2, MemOperand(x3, -1)), "ldur w2, [x3, #-1]");
+ COMPARE(ldr(w4, MemOperand(x5, 255)), "ldur w4, [x5, #255]");
+ COMPARE(ldr(w6, MemOperand(x7, -256)), "ldur w6, [x7, #-256]");
+ COMPARE(ldr(x8, MemOperand(x9, 1)), "ldur x8, [x9, #1]");
+ COMPARE(ldr(x10, MemOperand(x11, -1)), "ldur x10, [x11, #-1]");
+ COMPARE(ldr(x12, MemOperand(x13, 255)), "ldur x12, [x13, #255]");
+ COMPARE(ldr(x14, MemOperand(x15, -256)), "ldur x14, [x15, #-256]");
+ COMPARE(str(w16, MemOperand(x17, 1)), "stur w16, [x17, #1]");
+ COMPARE(str(w18, MemOperand(x19, -1)), "stur w18, [x19, #-1]");
+ COMPARE(str(w20, MemOperand(x21, 255)), "stur w20, [x21, #255]");
+ COMPARE(str(w22, MemOperand(x23, -256)), "stur w22, [x23, #-256]");
+ COMPARE(str(x24, MemOperand(x25, 1)), "stur x24, [x25, #1]");
+ COMPARE(str(x26, MemOperand(cp, -1)), "stur x26, [cp, #-1]");
+ COMPARE(str(jssp, MemOperand(fp, 255)), "stur jssp, [fp, #255]");
+ COMPARE(str(lr, MemOperand(x0, -256)), "stur lr, [x0, #-256]");
+ COMPARE(ldr(w0, MemOperand(csp, 1)), "ldur w0, [csp, #1]");
+ COMPARE(str(x1, MemOperand(csp, -1)), "stur x1, [csp, #-1]");
+ COMPARE(ldrb(w2, MemOperand(x3, -2)), "ldurb w2, [x3, #-2]");
+ COMPARE(ldrsb(w4, MemOperand(x5, -3)), "ldursb w4, [x5, #-3]");
+ COMPARE(ldrsb(x6, MemOperand(x7, -4)), "ldursb x6, [x7, #-4]");
+ COMPARE(ldrh(w8, MemOperand(x9, -5)), "ldurh w8, [x9, #-5]");
+ COMPARE(ldrsh(w10, MemOperand(x11, -6)), "ldursh w10, [x11, #-6]");
+ COMPARE(ldrsh(x12, MemOperand(x13, -7)), "ldursh x12, [x13, #-7]");
+ COMPARE(ldrsw(x14, MemOperand(x15, -8)), "ldursw x14, [x15, #-8]");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_pair) {
+ SET_UP();
+
+ COMPARE(ldp(w0, w1, MemOperand(x2)), "ldp w0, w1, [x2]");
+ COMPARE(ldp(x3, x4, MemOperand(x5)), "ldp x3, x4, [x5]");
+ COMPARE(ldp(w6, w7, MemOperand(x8, 4)), "ldp w6, w7, [x8, #4]");
+ COMPARE(ldp(x9, x10, MemOperand(x11, 8)), "ldp x9, x10, [x11, #8]");
+ COMPARE(ldp(w12, w13, MemOperand(x14, 252)), "ldp w12, w13, [x14, #252]");
+ COMPARE(ldp(x15, x16, MemOperand(x17, 504)), "ldp x15, x16, [x17, #504]");
+ COMPARE(ldp(w18, w19, MemOperand(x20, -256)), "ldp w18, w19, [x20, #-256]");
+ COMPARE(ldp(x21, x22, MemOperand(x23, -512)), "ldp x21, x22, [x23, #-512]");
+ COMPARE(ldp(w24, w25, MemOperand(x26, 252, PreIndex)),
+ "ldp w24, w25, [x26, #252]!");
+ COMPARE(ldp(cp, jssp, MemOperand(fp, 504, PreIndex)),
+ "ldp cp, jssp, [fp, #504]!");
+ COMPARE(ldp(w30, w0, MemOperand(x1, -256, PreIndex)),
+ "ldp w30, w0, [x1, #-256]!");
+ COMPARE(ldp(x2, x3, MemOperand(x4, -512, PreIndex)),
+ "ldp x2, x3, [x4, #-512]!");
+ COMPARE(ldp(w5, w6, MemOperand(x7, 252, PostIndex)),
+ "ldp w5, w6, [x7], #252");
+ COMPARE(ldp(x8, x9, MemOperand(x10, 504, PostIndex)),
+ "ldp x8, x9, [x10], #504");
+ COMPARE(ldp(w11, w12, MemOperand(x13, -256, PostIndex)),
+ "ldp w11, w12, [x13], #-256");
+ COMPARE(ldp(x14, x15, MemOperand(x16, -512, PostIndex)),
+ "ldp x14, x15, [x16], #-512");
+
+ COMPARE(ldp(s17, s18, MemOperand(x19)), "ldp s17, s18, [x19]");
+ COMPARE(ldp(s20, s21, MemOperand(x22, 252)), "ldp s20, s21, [x22, #252]");
+ COMPARE(ldp(s23, s24, MemOperand(x25, -256)), "ldp s23, s24, [x25, #-256]");
+ COMPARE(ldp(s26, s27, MemOperand(jssp, 252, PreIndex)),
+ "ldp s26, s27, [jssp, #252]!");
+ COMPARE(ldp(s29, s30, MemOperand(fp, -256, PreIndex)),
+ "ldp s29, s30, [fp, #-256]!");
+ COMPARE(ldp(s31, s0, MemOperand(x1, 252, PostIndex)),
+ "ldp s31, s0, [x1], #252");
+ COMPARE(ldp(s2, s3, MemOperand(x4, -256, PostIndex)),
+ "ldp s2, s3, [x4], #-256");
+ COMPARE(ldp(d17, d18, MemOperand(x19)), "ldp d17, d18, [x19]");
+ COMPARE(ldp(d20, d21, MemOperand(x22, 504)), "ldp d20, d21, [x22, #504]");
+ COMPARE(ldp(d23, d24, MemOperand(x25, -512)), "ldp d23, d24, [x25, #-512]");
+ COMPARE(ldp(d26, d27, MemOperand(jssp, 504, PreIndex)),
+ "ldp d26, d27, [jssp, #504]!");
+ COMPARE(ldp(d29, d30, MemOperand(fp, -512, PreIndex)),
+ "ldp d29, d30, [fp, #-512]!");
+ COMPARE(ldp(d31, d0, MemOperand(x1, 504, PostIndex)),
+ "ldp d31, d0, [x1], #504");
+ COMPARE(ldp(d2, d3, MemOperand(x4, -512, PostIndex)),
+ "ldp d2, d3, [x4], #-512");
+
+ COMPARE(stp(w0, w1, MemOperand(x2)), "stp w0, w1, [x2]");
+ COMPARE(stp(x3, x4, MemOperand(x5)), "stp x3, x4, [x5]");
+ COMPARE(stp(w6, w7, MemOperand(x8, 4)), "stp w6, w7, [x8, #4]");
+ COMPARE(stp(x9, x10, MemOperand(x11, 8)), "stp x9, x10, [x11, #8]");
+ COMPARE(stp(w12, w13, MemOperand(x14, 252)), "stp w12, w13, [x14, #252]");
+ COMPARE(stp(x15, x16, MemOperand(x17, 504)), "stp x15, x16, [x17, #504]");
+ COMPARE(stp(w18, w19, MemOperand(x20, -256)), "stp w18, w19, [x20, #-256]");
+ COMPARE(stp(x21, x22, MemOperand(x23, -512)), "stp x21, x22, [x23, #-512]");
+ COMPARE(stp(w24, w25, MemOperand(x26, 252, PreIndex)),
+ "stp w24, w25, [x26, #252]!");
+ COMPARE(stp(cp, jssp, MemOperand(fp, 504, PreIndex)),
+ "stp cp, jssp, [fp, #504]!");
+ COMPARE(stp(w30, w0, MemOperand(x1, -256, PreIndex)),
+ "stp w30, w0, [x1, #-256]!");
+ COMPARE(stp(x2, x3, MemOperand(x4, -512, PreIndex)),
+ "stp x2, x3, [x4, #-512]!");
+ COMPARE(stp(w5, w6, MemOperand(x7, 252, PostIndex)),
+ "stp w5, w6, [x7], #252");
+ COMPARE(stp(x8, x9, MemOperand(x10, 504, PostIndex)),
+ "stp x8, x9, [x10], #504");
+ COMPARE(stp(w11, w12, MemOperand(x13, -256, PostIndex)),
+ "stp w11, w12, [x13], #-256");
+ COMPARE(stp(x14, x15, MemOperand(x16, -512, PostIndex)),
+ "stp x14, x15, [x16], #-512");
+
+ COMPARE(stp(s17, s18, MemOperand(x19)), "stp s17, s18, [x19]");
+ COMPARE(stp(s20, s21, MemOperand(x22, 252)), "stp s20, s21, [x22, #252]");
+ COMPARE(stp(s23, s24, MemOperand(x25, -256)), "stp s23, s24, [x25, #-256]");
+ COMPARE(stp(s26, s27, MemOperand(jssp, 252, PreIndex)),
+ "stp s26, s27, [jssp, #252]!");
+ COMPARE(stp(s29, s30, MemOperand(fp, -256, PreIndex)),
+ "stp s29, s30, [fp, #-256]!");
+ COMPARE(stp(s31, s0, MemOperand(x1, 252, PostIndex)),
+ "stp s31, s0, [x1], #252");
+ COMPARE(stp(s2, s3, MemOperand(x4, -256, PostIndex)),
+ "stp s2, s3, [x4], #-256");
+ COMPARE(stp(d17, d18, MemOperand(x19)), "stp d17, d18, [x19]");
+ COMPARE(stp(d20, d21, MemOperand(x22, 504)), "stp d20, d21, [x22, #504]");
+ COMPARE(stp(d23, d24, MemOperand(x25, -512)), "stp d23, d24, [x25, #-512]");
+ COMPARE(stp(d26, d27, MemOperand(jssp, 504, PreIndex)),
+ "stp d26, d27, [jssp, #504]!");
+ COMPARE(stp(d29, d30, MemOperand(fp, -512, PreIndex)),
+ "stp d29, d30, [fp, #-512]!");
+ COMPARE(stp(d31, d0, MemOperand(x1, 504, PostIndex)),
+ "stp d31, d0, [x1], #504");
+ COMPARE(stp(d2, d3, MemOperand(x4, -512, PostIndex)),
+ "stp d2, d3, [x4], #-512");
+
+ // TODO(all): Update / Restore this test.
+ COMPARE(ldp(w16, w17, MemOperand(jssp, 4, PostIndex)),
+ "ldp w16, w17, [jssp], #4");
+ COMPARE(stp(x18, x19, MemOperand(jssp, -8, PreIndex)),
+ "stp x18, x19, [jssp, #-8]!");
+ COMPARE(ldp(s30, s31, MemOperand(jssp, 12, PostIndex)),
+ "ldp s30, s31, [jssp], #12");
+ COMPARE(stp(d30, d31, MemOperand(jssp, -16)),
+ "stp d30, d31, [jssp, #-16]");
+
+ COMPARE(ldpsw(x0, x1, MemOperand(x2)), "ldpsw x0, x1, [x2]");
+ COMPARE(ldpsw(x3, x4, MemOperand(x5, 16)), "ldpsw x3, x4, [x5, #16]");
+ COMPARE(ldpsw(x6, x7, MemOperand(x8, -32, PreIndex)),
+ "ldpsw x6, x7, [x8, #-32]!");
+ COMPARE(ldpsw(x9, x10, MemOperand(x11, 128, PostIndex)),
+ "ldpsw x9, x10, [x11], #128");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_pair_nontemp) {
+ SET_UP();
+
+ COMPARE(ldnp(w0, w1, MemOperand(x2)), "ldnp w0, w1, [x2]");
+ COMPARE(stnp(w3, w4, MemOperand(x5, 252)), "stnp w3, w4, [x5, #252]");
+ COMPARE(ldnp(w6, w7, MemOperand(x8, -256)), "ldnp w6, w7, [x8, #-256]");
+ COMPARE(stnp(x9, x10, MemOperand(x11)), "stnp x9, x10, [x11]");
+ COMPARE(ldnp(x12, x13, MemOperand(x14, 504)), "ldnp x12, x13, [x14, #504]");
+ COMPARE(stnp(x15, x16, MemOperand(x17, -512)), "stnp x15, x16, [x17, #-512]");
+ COMPARE(ldnp(s18, s19, MemOperand(x20)), "ldnp s18, s19, [x20]");
+ COMPARE(stnp(s21, s22, MemOperand(x23, 252)), "stnp s21, s22, [x23, #252]");
+ COMPARE(ldnp(s24, s25, MemOperand(x26, -256)), "ldnp s24, s25, [x26, #-256]");
+ COMPARE(stnp(d27, d28, MemOperand(fp)), "stnp d27, d28, [fp]");
+ COMPARE(ldnp(d30, d31, MemOperand(x0, 504)), "ldnp d30, d31, [x0, #504]");
+ COMPARE(stnp(d1, d2, MemOperand(x3, -512)), "stnp d1, d2, [x3, #-512]");
+
+ CLEANUP();
+}
+
+#if 0 // TODO(all): enable.
+TEST_(load_literal) {
+ SET_UP();
+
+ COMPARE_PREFIX(ldr(x10, 0x1234567890abcdefUL), "ldr x10, pc+8");
+ COMPARE_PREFIX(ldr(w20, 0xfedcba09), "ldr w20, pc+8");
+ COMPARE_PREFIX(ldr(d11, 1.234), "ldr d11, pc+8");
+ COMPARE_PREFIX(ldr(s22, 2.5), "ldr s22, pc+8");
+
+ CLEANUP();
+}
+#endif
+
+TEST_(cond_select) {
+ SET_UP();
+
+ COMPARE(csel(w0, w1, w2, eq), "csel w0, w1, w2, eq");
+ COMPARE(csel(x3, x4, x5, ne), "csel x3, x4, x5, ne");
+ COMPARE(csinc(w6, w7, w8, hs), "csinc w6, w7, w8, hs");
+ COMPARE(csinc(x9, x10, x11, lo), "csinc x9, x10, x11, lo");
+ COMPARE(csinv(w12, w13, w14, mi), "csinv w12, w13, w14, mi");
+ COMPARE(csinv(x15, x16, x17, pl), "csinv x15, x16, x17, pl");
+ COMPARE(csneg(w18, w19, w20, vs), "csneg w18, w19, w20, vs");
+ COMPARE(csneg(x21, x22, x23, vc), "csneg x21, x22, x23, vc");
+ COMPARE(cset(w24, hi), "cset w24, hi");
+ COMPARE(cset(x25, ls), "cset x25, ls");
+ COMPARE(csetm(w26, ge), "csetm w26, ge");
+ COMPARE(csetm(cp, lt), "csetm cp, lt");
+ COMPARE(cinc(w28, w29, gt), "cinc w28, w29, gt");
+ COMPARE(cinc(lr, x0, le), "cinc lr, x0, le");
+ COMPARE(cinv(w1, w2, eq), "cinv w1, w2, eq");
+ COMPARE(cinv(x3, x4, ne), "cinv x3, x4, ne");
+ COMPARE(cneg(w5, w6, hs), "cneg w5, w6, hs");
+ COMPARE(cneg(x7, x8, lo), "cneg x7, x8, lo");
+
+ COMPARE(csel(x0, x1, x2, al), "csel x0, x1, x2, al");
+ COMPARE(csel(x1, x2, x3, nv), "csel x1, x2, x3, nv");
+ COMPARE(csinc(x2, x3, x4, al), "csinc x2, x3, x4, al");
+ COMPARE(csinc(x3, x4, x5, nv), "csinc x3, x4, x5, nv");
+ COMPARE(csinv(x4, x5, x6, al), "csinv x4, x5, x6, al");
+ COMPARE(csinv(x5, x6, x7, nv), "csinv x5, x6, x7, nv");
+ COMPARE(csneg(x6, x7, x8, al), "csneg x6, x7, x8, al");
+ COMPARE(csneg(x7, x8, x9, nv), "csneg x7, x8, x9, nv");
+
+ CLEANUP();
+}
+
+
+TEST(cond_select_macro) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Csel(w0, w1, -1, eq), "csinv w0, w1, wzr, eq");
+ COMPARE(Csel(w2, w3, 0, ne), "csel w2, w3, wzr, ne");
+ COMPARE(Csel(w4, w5, 1, hs), "csinc w4, w5, wzr, hs");
+ COMPARE(Csel(x6, x7, -1, lo), "csinv x6, x7, xzr, lo");
+ COMPARE(Csel(x8, x9, 0, mi), "csel x8, x9, xzr, mi");
+ COMPARE(Csel(x10, x11, 1, pl), "csinc x10, x11, xzr, pl");
+
+ CLEANUP();
+}
+
+
+TEST_(cond_cmp) {
+ SET_UP();
+
+ COMPARE(ccmn(w0, w1, NZCVFlag, eq), "ccmn w0, w1, #NZCV, eq");
+ COMPARE(ccmn(x2, x3, NZCFlag, ne), "ccmn x2, x3, #NZCv, ne");
+ COMPARE(ccmp(w4, w5, NZVFlag, hs), "ccmp w4, w5, #NZcV, hs");
+ COMPARE(ccmp(x6, x7, NZFlag, lo), "ccmp x6, x7, #NZcv, lo");
+ COMPARE(ccmn(w8, 31, NFlag, mi), "ccmn w8, #31, #Nzcv, mi");
+ COMPARE(ccmn(x9, 30, NCFlag, pl), "ccmn x9, #30, #NzCv, pl");
+ COMPARE(ccmp(w10, 29, NVFlag, vs), "ccmp w10, #29, #NzcV, vs");
+ COMPARE(ccmp(x11, 28, NFlag, vc), "ccmp x11, #28, #Nzcv, vc");
+ COMPARE(ccmn(w12, w13, NoFlag, al), "ccmn w12, w13, #nzcv, al");
+ COMPARE(ccmp(x14, 27, ZVFlag, nv), "ccmp x14, #27, #nZcV, nv");
+
+ CLEANUP();
+}
+
+
+TEST_(cond_cmp_macro) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Ccmp(w0, -1, VFlag, hi), "ccmn w0, #1, #nzcV, hi");
+ COMPARE(Ccmp(x1, -31, CFlag, ge), "ccmn x1, #31, #nzCv, ge");
+ COMPARE(Ccmn(w2, -1, CVFlag, gt), "ccmp w2, #1, #nzCV, gt");
+ COMPARE(Ccmn(x3, -31, ZCVFlag, ls), "ccmp x3, #31, #nZCV, ls");
+
+ CLEANUP();
+}
+
+
+TEST_(fmov_imm) {
+ SET_UP();
+
+ COMPARE(fmov(s0, 1.0), "fmov s0, #0x70 (1.0000)");
+ COMPARE(fmov(s31, -13.0), "fmov s31, #0xaa (-13.0000)");
+ COMPARE(fmov(d1, 1.0), "fmov d1, #0x70 (1.0000)");
+ COMPARE(fmov(d29, -13.0), "fmov d29, #0xaa (-13.0000)");
+
+ CLEANUP();
+}
+
+
+TEST_(fmov_reg) {
+ SET_UP();
+
+ COMPARE(fmov(w3, s13), "fmov w3, s13");
+ COMPARE(fmov(x6, d26), "fmov x6, d26");
+ COMPARE(fmov(s11, w30), "fmov s11, w30");
+ COMPARE(fmov(d31, x2), "fmov d31, x2");
+ COMPARE(fmov(s12, s13), "fmov s12, s13");
+ COMPARE(fmov(d22, d23), "fmov d22, d23");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_dp1) {
+ SET_UP();
+
+ COMPARE(fabs(s0, s1), "fabs s0, s1");
+ COMPARE(fabs(s31, s30), "fabs s31, s30");
+ COMPARE(fabs(d2, d3), "fabs d2, d3");
+ COMPARE(fabs(d31, d30), "fabs d31, d30");
+ COMPARE(fneg(s4, s5), "fneg s4, s5");
+ COMPARE(fneg(s31, s30), "fneg s31, s30");
+ COMPARE(fneg(d6, d7), "fneg d6, d7");
+ COMPARE(fneg(d31, d30), "fneg d31, d30");
+ COMPARE(fsqrt(s8, s9), "fsqrt s8, s9");
+ COMPARE(fsqrt(s31, s30), "fsqrt s31, s30");
+ COMPARE(fsqrt(d10, d11), "fsqrt d10, d11");
+ COMPARE(fsqrt(d31, d30), "fsqrt d31, d30");
+ COMPARE(frinta(s10, s11), "frinta s10, s11");
+ COMPARE(frinta(s31, s30), "frinta s31, s30");
+ COMPARE(frinta(d12, d13), "frinta d12, d13");
+ COMPARE(frinta(d31, d30), "frinta d31, d30");
+ COMPARE(frintn(s10, s11), "frintn s10, s11");
+ COMPARE(frintn(s31, s30), "frintn s31, s30");
+ COMPARE(frintn(d12, d13), "frintn d12, d13");
+ COMPARE(frintn(d31, d30), "frintn d31, d30");
+ COMPARE(frintz(s10, s11), "frintz s10, s11");
+ COMPARE(frintz(s31, s30), "frintz s31, s30");
+ COMPARE(frintz(d12, d13), "frintz d12, d13");
+ COMPARE(frintz(d31, d30), "frintz d31, d30");
+ COMPARE(fcvt(d14, s15), "fcvt d14, s15");
+ COMPARE(fcvt(d31, s31), "fcvt d31, s31");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_dp2) {
+ SET_UP();
+
+ COMPARE(fadd(s0, s1, s2), "fadd s0, s1, s2");
+ COMPARE(fadd(d3, d4, d5), "fadd d3, d4, d5");
+ COMPARE(fsub(s31, s30, s29), "fsub s31, s30, s29");
+ COMPARE(fsub(d31, d30, d29), "fsub d31, d30, d29");
+ COMPARE(fmul(s7, s8, s9), "fmul s7, s8, s9");
+ COMPARE(fmul(d10, d11, d12), "fmul d10, d11, d12");
+ COMPARE(fdiv(s13, s14, s15), "fdiv s13, s14, s15");
+ COMPARE(fdiv(d16, d17, d18), "fdiv d16, d17, d18");
+ COMPARE(fmax(s19, s20, s21), "fmax s19, s20, s21");
+ COMPARE(fmax(d22, d23, d24), "fmax d22, d23, d24");
+ COMPARE(fmin(s25, s26, s27), "fmin s25, s26, s27");
+ COMPARE(fmin(d28, d29, d30), "fmin d28, d29, d30");
+ COMPARE(fmaxnm(s31, s0, s1), "fmaxnm s31, s0, s1");
+ COMPARE(fmaxnm(d2, d3, d4), "fmaxnm d2, d3, d4");
+ COMPARE(fminnm(s5, s6, s7), "fminnm s5, s6, s7");
+ COMPARE(fminnm(d8, d9, d10), "fminnm d8, d9, d10");
+
+ CLEANUP();
+}
+
+
+TEST(fp_dp3) {
+ SET_UP();
+
+ COMPARE(fmadd(s7, s8, s9, s10), "fmadd s7, s8, s9, s10");
+ COMPARE(fmadd(d10, d11, d12, d10), "fmadd d10, d11, d12, d10");
+ COMPARE(fmsub(s7, s8, s9, s10), "fmsub s7, s8, s9, s10");
+ COMPARE(fmsub(d10, d11, d12, d10), "fmsub d10, d11, d12, d10");
+
+ COMPARE(fnmadd(s7, s8, s9, s10), "fnmadd s7, s8, s9, s10");
+ COMPARE(fnmadd(d10, d11, d12, d10), "fnmadd d10, d11, d12, d10");
+ COMPARE(fnmsub(s7, s8, s9, s10), "fnmsub s7, s8, s9, s10");
+ COMPARE(fnmsub(d10, d11, d12, d10), "fnmsub d10, d11, d12, d10");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_compare) {
+ SET_UP();
+
+ COMPARE(fcmp(s0, s1), "fcmp s0, s1");
+ COMPARE(fcmp(s31, s30), "fcmp s31, s30");
+ COMPARE(fcmp(d0, d1), "fcmp d0, d1");
+ COMPARE(fcmp(d31, d30), "fcmp d31, d30");
+ COMPARE(fcmp(s12, 0), "fcmp s12, #0.0");
+ COMPARE(fcmp(d12, 0), "fcmp d12, #0.0");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_cond_compare) {
+ SET_UP();
+
+ COMPARE(fccmp(s0, s1, NoFlag, eq), "fccmp s0, s1, #nzcv, eq");
+ COMPARE(fccmp(s2, s3, ZVFlag, ne), "fccmp s2, s3, #nZcV, ne");
+ COMPARE(fccmp(s30, s16, NCFlag, pl), "fccmp s30, s16, #NzCv, pl");
+ COMPARE(fccmp(s31, s31, NZCVFlag, le), "fccmp s31, s31, #NZCV, le");
+ COMPARE(fccmp(d4, d5, VFlag, gt), "fccmp d4, d5, #nzcV, gt");
+ COMPARE(fccmp(d6, d7, NFlag, vs), "fccmp d6, d7, #Nzcv, vs");
+ COMPARE(fccmp(d30, d0, NZFlag, vc), "fccmp d30, d0, #NZcv, vc");
+ COMPARE(fccmp(d31, d31, ZFlag, hs), "fccmp d31, d31, #nZcv, hs");
+ COMPARE(fccmp(s14, s15, CVFlag, al), "fccmp s14, s15, #nzCV, al");
+ COMPARE(fccmp(d16, d17, CFlag, nv), "fccmp d16, d17, #nzCv, nv");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_select) {
+ SET_UP();
+
+ COMPARE(fcsel(s0, s1, s2, eq), "fcsel s0, s1, s2, eq")
+ COMPARE(fcsel(s31, s31, s30, ne), "fcsel s31, s31, s30, ne");
+ COMPARE(fcsel(d0, d1, d2, mi), "fcsel d0, d1, d2, mi");
+ COMPARE(fcsel(d31, d30, d31, pl), "fcsel d31, d30, d31, pl");
+ COMPARE(fcsel(s14, s15, s16, al), "fcsel s14, s15, s16, al");
+ COMPARE(fcsel(d17, d18, d19, nv), "fcsel d17, d18, d19, nv");
+
+ CLEANUP();
+}
+
+
+TEST_(fcvt_scvtf_ucvtf) {
+ SET_UP();
+
+ COMPARE(fcvtas(w0, s1), "fcvtas w0, s1");
+ COMPARE(fcvtas(x2, s3), "fcvtas x2, s3");
+ COMPARE(fcvtas(w4, d5), "fcvtas w4, d5");
+ COMPARE(fcvtas(x6, d7), "fcvtas x6, d7");
+ COMPARE(fcvtau(w8, s9), "fcvtau w8, s9");
+ COMPARE(fcvtau(x10, s11), "fcvtau x10, s11");
+ COMPARE(fcvtau(w12, d13), "fcvtau w12, d13");
+ COMPARE(fcvtau(x14, d15), "fcvtau x14, d15");
+ COMPARE(fcvtns(w0, s1), "fcvtns w0, s1");
+ COMPARE(fcvtns(x2, s3), "fcvtns x2, s3");
+ COMPARE(fcvtns(w4, d5), "fcvtns w4, d5");
+ COMPARE(fcvtns(x6, d7), "fcvtns x6, d7");
+ COMPARE(fcvtnu(w8, s9), "fcvtnu w8, s9");
+ COMPARE(fcvtnu(x10, s11), "fcvtnu x10, s11");
+ COMPARE(fcvtnu(w12, d13), "fcvtnu w12, d13");
+ COMPARE(fcvtnu(x14, d15), "fcvtnu x14, d15");
+ COMPARE(fcvtzu(x16, d17), "fcvtzu x16, d17");
+ COMPARE(fcvtzu(w18, d19), "fcvtzu w18, d19");
+ COMPARE(fcvtzs(x20, d21), "fcvtzs x20, d21");
+ COMPARE(fcvtzs(w22, d23), "fcvtzs w22, d23");
+ COMPARE(fcvtzu(x16, s17), "fcvtzu x16, s17");
+ COMPARE(fcvtzu(w18, s19), "fcvtzu w18, s19");
+ COMPARE(fcvtzs(x20, s21), "fcvtzs x20, s21");
+ COMPARE(fcvtzs(w22, s23), "fcvtzs w22, s23");
+ COMPARE(scvtf(d24, w25), "scvtf d24, w25");
+ COMPARE(scvtf(s24, w25), "scvtf s24, w25");
+ COMPARE(scvtf(d26, x0), "scvtf d26, x0");
+ COMPARE(scvtf(s26, x0), "scvtf s26, x0");
+ COMPARE(ucvtf(d28, w29), "ucvtf d28, w29");
+ COMPARE(ucvtf(s28, w29), "ucvtf s28, w29");
+ COMPARE(ucvtf(d0, x1), "ucvtf d0, x1");
+ COMPARE(ucvtf(s0, x1), "ucvtf s0, x1");
+ COMPARE(ucvtf(d0, x1, 0), "ucvtf d0, x1");
+ COMPARE(ucvtf(s0, x1, 0), "ucvtf s0, x1");
+ COMPARE(scvtf(d1, x2, 1), "scvtf d1, x2, #1");
+ COMPARE(scvtf(s1, x2, 1), "scvtf s1, x2, #1");
+ COMPARE(scvtf(d3, x4, 15), "scvtf d3, x4, #15");
+ COMPARE(scvtf(s3, x4, 15), "scvtf s3, x4, #15");
+ COMPARE(scvtf(d5, x6, 32), "scvtf d5, x6, #32");
+ COMPARE(scvtf(s5, x6, 32), "scvtf s5, x6, #32");
+ COMPARE(ucvtf(d7, x8, 2), "ucvtf d7, x8, #2");
+ COMPARE(ucvtf(s7, x8, 2), "ucvtf s7, x8, #2");
+ COMPARE(ucvtf(d9, x10, 16), "ucvtf d9, x10, #16");
+ COMPARE(ucvtf(s9, x10, 16), "ucvtf s9, x10, #16");
+ COMPARE(ucvtf(d11, x12, 33), "ucvtf d11, x12, #33");
+ COMPARE(ucvtf(s11, x12, 33), "ucvtf s11, x12, #33");
+ COMPARE(fcvtms(w0, s1), "fcvtms w0, s1");
+ COMPARE(fcvtms(x2, s3), "fcvtms x2, s3");
+ COMPARE(fcvtms(w4, d5), "fcvtms w4, d5");
+ COMPARE(fcvtms(x6, d7), "fcvtms x6, d7");
+ COMPARE(fcvtmu(w8, s9), "fcvtmu w8, s9");
+ COMPARE(fcvtmu(x10, s11), "fcvtmu x10, s11");
+ COMPARE(fcvtmu(w12, d13), "fcvtmu w12, d13");
+ COMPARE(fcvtmu(x14, d15), "fcvtmu x14, d15");
+
+ CLEANUP();
+}
+
+
+TEST_(system_mrs) {
+ SET_UP();
+
+ COMPARE(mrs(x0, NZCV), "mrs x0, nzcv");
+ COMPARE(mrs(lr, NZCV), "mrs lr, nzcv");
+ COMPARE(mrs(x15, FPCR), "mrs x15, fpcr");
+
+ CLEANUP();
+}
+
+
+TEST_(system_msr) {
+ SET_UP();
+
+ COMPARE(msr(NZCV, x0), "msr nzcv, x0");
+ COMPARE(msr(NZCV, x30), "msr nzcv, lr");
+ COMPARE(msr(FPCR, x15), "msr fpcr, x15");
+
+ CLEANUP();
+}
+
+
+TEST_(system_nop) {
+ SET_UP();
+
+ COMPARE(nop(), "nop");
+
+ CLEANUP();
+}
+
+
+TEST_(debug) {
+ SET_UP();
+
+ ASSERT(kImmExceptionIsDebug == 0xdeb0);
+
+ // All debug codes should produce the same instruction, and the debug code
+ // can be any uint32_t.
+ COMPARE(debug("message", 0, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 1, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0xffff, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0x10000, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0x7fffffff, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0x80000000u, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0xffffffffu, NO_PARAM), "hlt #0xdeb0");
+
+ CLEANUP();
+}
+
+
+TEST_(hlt) {
+ SET_UP();
+
+ COMPARE(hlt(0), "hlt #0x0");
+ COMPARE(hlt(1), "hlt #0x1");
+ COMPARE(hlt(65535), "hlt #0xffff");
+
+ CLEANUP();
+}
+
+
+TEST_(brk) {
+ SET_UP();
+
+ COMPARE(brk(0), "brk #0x0");
+ COMPARE(brk(1), "brk #0x1");
+ COMPARE(brk(65535), "brk #0xffff");
+
+ CLEANUP();
+}
+
+
+TEST_(add_sub_negative) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Add(x10, x0, -42), "sub x10, x0, #0x2a (42)");
+ COMPARE(Add(x11, x1, -687), "sub x11, x1, #0x2af (687)");
+ COMPARE(Add(x12, x2, -0x88), "sub x12, x2, #0x88 (136)");
+
+ COMPARE(Sub(x13, x0, -600), "add x13, x0, #0x258 (600)");
+ COMPARE(Sub(x14, x1, -313), "add x14, x1, #0x139 (313)");
+ COMPARE(Sub(x15, x2, -0x555), "add x15, x2, #0x555 (1365)");
+
+ COMPARE(Add(w19, w3, -0x344), "sub w19, w3, #0x344 (836)");
+ COMPARE(Add(w20, w4, -2000), "sub w20, w4, #0x7d0 (2000)");
+
+ COMPARE(Sub(w21, w3, -0xbc), "add w21, w3, #0xbc (188)");
+ COMPARE(Sub(w22, w4, -2000), "add w22, w4, #0x7d0 (2000)");
+
+ COMPARE(Cmp(w0, -1), "cmn w0, #0x1 (1)");
+ COMPARE(Cmp(x1, -1), "cmn x1, #0x1 (1)");
+ COMPARE(Cmp(w2, -4095), "cmn w2, #0xfff (4095)");
+ COMPARE(Cmp(x3, -4095), "cmn x3, #0xfff (4095)");
+
+ COMPARE(Cmn(w0, -1), "cmp w0, #0x1 (1)");
+ COMPARE(Cmn(x1, -1), "cmp x1, #0x1 (1)");
+ COMPARE(Cmn(w2, -4095), "cmp w2, #0xfff (4095)");
+ COMPARE(Cmn(x3, -4095), "cmp x3, #0xfff (4095)");
+
+ CLEANUP();
+}
+
+
+TEST_(logical_immediate_move) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(And(w0, w1, 0), "movz w0, #0x0");
+ COMPARE(And(x0, x1, 0), "movz x0, #0x0");
+ COMPARE(Orr(w2, w3, 0), "mov w2, w3");
+ COMPARE(Orr(x2, x3, 0), "mov x2, x3");
+ COMPARE(Eor(w4, w5, 0), "mov w4, w5");
+ COMPARE(Eor(x4, x5, 0), "mov x4, x5");
+ COMPARE(Bic(w6, w7, 0), "mov w6, w7");
+ COMPARE(Bic(x6, x7, 0), "mov x6, x7");
+ COMPARE(Orn(w8, w9, 0), "movn w8, #0x0");
+ COMPARE(Orn(x8, x9, 0), "movn x8, #0x0");
+ COMPARE(Eon(w10, w11, 0), "mvn w10, w11");
+ COMPARE(Eon(x10, x11, 0), "mvn x10, x11");
+
+ COMPARE(And(w12, w13, 0xffffffff), "mov w12, w13");
+ COMPARE(And(x12, x13, 0xffffffff), "and x12, x13, #0xffffffff");
+ COMPARE(And(x12, x13, 0xffffffffffffffff), "mov x12, x13");
+ COMPARE(Orr(w14, w15, 0xffffffff), "movn w14, #0x0");
+ COMPARE(Orr(x14, x15, 0xffffffff), "orr x14, x15, #0xffffffff");
+ COMPARE(Orr(x14, x15, 0xffffffffffffffff), "movn x14, #0x0");
+ COMPARE(Eor(w16, w17, 0xffffffff), "mvn w16, w17");
+ COMPARE(Eor(x16, x17, 0xffffffff), "eor x16, x17, #0xffffffff");
+ COMPARE(Eor(x16, x17, 0xffffffffffffffff), "mvn x16, x17");
+ COMPARE(Bic(w18, w19, 0xffffffff), "movz w18, #0x0");
+ COMPARE(Bic(x18, x19, 0xffffffff), "and x18, x19, #0xffffffff00000000");
+ COMPARE(Bic(x18, x19, 0xffffffffffffffff), "movz x18, #0x0");
+ COMPARE(Orn(w20, w21, 0xffffffff), "mov w20, w21");
+ COMPARE(Orn(x20, x21, 0xffffffff), "orr x20, x21, #0xffffffff00000000");
+ COMPARE(Orn(x20, x21, 0xffffffffffffffff), "mov x20, x21");
+ COMPARE(Eon(w22, w23, 0xffffffff), "mov w22, w23");
+ COMPARE(Eon(x22, x23, 0xffffffff), "eor x22, x23, #0xffffffff00000000");
+ COMPARE(Eon(x22, x23, 0xffffffffffffffff), "mov x22, x23");
+
+ CLEANUP();
+}
+
+
+TEST_(barriers) {
+ SET_UP_CLASS(MacroAssembler);
+
+ // DMB
+ COMPARE(Dmb(FullSystem, BarrierAll), "dmb sy");
+ COMPARE(Dmb(FullSystem, BarrierReads), "dmb ld");
+ COMPARE(Dmb(FullSystem, BarrierWrites), "dmb st");
+
+ COMPARE(Dmb(InnerShareable, BarrierAll), "dmb ish");
+ COMPARE(Dmb(InnerShareable, BarrierReads), "dmb ishld");
+ COMPARE(Dmb(InnerShareable, BarrierWrites), "dmb ishst");
+
+ COMPARE(Dmb(NonShareable, BarrierAll), "dmb nsh");
+ COMPARE(Dmb(NonShareable, BarrierReads), "dmb nshld");
+ COMPARE(Dmb(NonShareable, BarrierWrites), "dmb nshst");
+
+ COMPARE(Dmb(OuterShareable, BarrierAll), "dmb osh");
+ COMPARE(Dmb(OuterShareable, BarrierReads), "dmb oshld");
+ COMPARE(Dmb(OuterShareable, BarrierWrites), "dmb oshst");
+
+ COMPARE(Dmb(FullSystem, BarrierOther), "dmb sy (0b1100)");
+ COMPARE(Dmb(InnerShareable, BarrierOther), "dmb sy (0b1000)");
+ COMPARE(Dmb(NonShareable, BarrierOther), "dmb sy (0b0100)");
+ COMPARE(Dmb(OuterShareable, BarrierOther), "dmb sy (0b0000)");
+
+ // DSB
+ COMPARE(Dsb(FullSystem, BarrierAll), "dsb sy");
+ COMPARE(Dsb(FullSystem, BarrierReads), "dsb ld");
+ COMPARE(Dsb(FullSystem, BarrierWrites), "dsb st");
+
+ COMPARE(Dsb(InnerShareable, BarrierAll), "dsb ish");
+ COMPARE(Dsb(InnerShareable, BarrierReads), "dsb ishld");
+ COMPARE(Dsb(InnerShareable, BarrierWrites), "dsb ishst");
+
+ COMPARE(Dsb(NonShareable, BarrierAll), "dsb nsh");
+ COMPARE(Dsb(NonShareable, BarrierReads), "dsb nshld");
+ COMPARE(Dsb(NonShareable, BarrierWrites), "dsb nshst");
+
+ COMPARE(Dsb(OuterShareable, BarrierAll), "dsb osh");
+ COMPARE(Dsb(OuterShareable, BarrierReads), "dsb oshld");
+ COMPARE(Dsb(OuterShareable, BarrierWrites), "dsb oshst");
+
+ COMPARE(Dsb(FullSystem, BarrierOther), "dsb sy (0b1100)");
+ COMPARE(Dsb(InnerShareable, BarrierOther), "dsb sy (0b1000)");
+ COMPARE(Dsb(NonShareable, BarrierOther), "dsb sy (0b0100)");
+ COMPARE(Dsb(OuterShareable, BarrierOther), "dsb sy (0b0000)");
+
+ // ISB
+ COMPARE(Isb(), "isb");
+
+ CLEANUP();
+}
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 301545c6c4..f32a69c4a3 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -28,6 +28,7 @@
#include <stdlib.h>
#include "v8.h"
+#include "stub-cache.h"
#include "debug.h"
#include "disasm.h"
@@ -254,7 +255,7 @@ TEST(DisasmIa320) {
__ bind(&L2);
__ call(Operand(ebx, ecx, times_4, 10000));
__ nop();
- Handle<Code> ic(isolate->builtins()->builtin(Builtins::kLoadIC_Initialize));
+ Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_CONTEXTUAL));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
__ call(FUNCTION_ADDR(DummyStaticFunction), RelocInfo::RUNTIME_ENTRY);
@@ -348,7 +349,37 @@ TEST(DisasmIa320) {
__ fdivp(3);
__ fcompp();
__ fwait();
+ __ frndint();
+ __ fninit();
__ nop();
+
+ // SSE instruction
+ {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope fscope(&assm, SSE2);
+ // Move operation
+ __ movaps(xmm0, xmm1);
+ __ shufps(xmm0, xmm0, 0x0);
+
+ // logic operation
+ __ andps(xmm0, xmm1);
+ __ andps(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ orps(xmm0, xmm1);
+ __ orps(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ xorps(xmm0, xmm1);
+ __ xorps(xmm0, Operand(ebx, ecx, times_4, 10000));
+
+ // Arithmetic operation
+ __ addps(xmm1, xmm0);
+ __ addps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ subps(xmm1, xmm0);
+ __ subps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ mulps(xmm1, xmm0);
+ __ mulps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ divps(xmm1, xmm0);
+ __ divps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ }
+ }
{
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(&assm, SSE2);
@@ -356,7 +387,6 @@ TEST(DisasmIa320) {
__ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
- __ movaps(xmm0, xmm1);
// 128 bit move instructions.
__ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
__ movdqa(Operand(ebx, ecx, times_4, 10000), xmm0);
@@ -370,7 +400,6 @@ TEST(DisasmIa320) {
__ ucomisd(xmm0, xmm1);
__ cmpltsd(xmm0, xmm1);
- __ andps(xmm0, xmm1);
__ andpd(xmm0, xmm1);
__ psllq(xmm0, 17);
__ psllq(xmm0, xmm1);
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 8fd036956f..9fce25fae4 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -90,11 +90,7 @@ TEST(DisasmX64) {
__ or_(rdx, Immediate(3));
__ xor_(rdx, Immediate(3));
__ nop();
- {
- CHECK(CpuFeatures::IsSupported(CPUID));
- CpuFeatures::Scope fscope(CPUID);
- __ cpuid();
- }
+ __ cpuid();
__ movsxbq(rdx, Operand(rcx, 0));
__ movsxwq(rdx, Operand(rcx, 0));
__ movzxbl(rdx, Operand(rcx, 0));
@@ -334,15 +330,34 @@ TEST(DisasmX64) {
__ fdivp(3);
__ fcompp();
__ fwait();
+ __ frndint();
+ __ fninit();
__ nop();
// SSE instruction
{
+ // Move operation
__ cvttss2si(rdx, Operand(rbx, rcx, times_4, 10000));
__ cvttss2si(rdx, xmm1);
__ movaps(xmm0, xmm1);
+ // logic operation
__ andps(xmm0, xmm1);
+ __ andps(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ orps(xmm0, xmm1);
+ __ ordps(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ xorps(xmm0, xmm1);
+ __ xordps(xmm0, Operand(rbx, rcx, times_4, 10000));
+
+ // Arithmetic operation
+ __ addps(xmm1, xmm0);
+ __ addps(xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ subps(xmm1, xmm0);
+ __ subps(xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ mulps(xmm1, xmm0);
+ __ mulps(xmm1, Operand(rbx, ecx, times_4, 10000));
+ __ divps(xmm1, xmm0);
+ __ divps(xmm1, Operand(rbx, rcx, times_4, 10000));
}
// SSE 2 instructions
{
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 1a000afba2..d7c5083050 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -89,13 +89,13 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
// Verify inferred function name.
SmartArrayPointer<char> inferred_name =
shared_func_info->inferred_name()->ToCString();
- CHECK_EQ(ref_inferred_name, *inferred_name);
+ CHECK_EQ(ref_inferred_name, inferred_name.get());
#endif // ENABLE_DEBUGGER_SUPPORT
}
-static v8::Handle<v8::Script> Compile(const char* src) {
- return v8::Script::Compile(v8::String::New(src));
+static v8::Handle<v8::Script> Compile(v8::Isolate* isolate, const char* src) {
+ return v8::Script::Compile(v8::String::NewFromUtf8(isolate, src));
}
@@ -104,6 +104,7 @@ TEST(GlobalProperty) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"fun1 = function() { return 1; }\n"
"fun2 = function() { return 2; }\n");
CheckFunctionName(script, "return 1", "fun1");
@@ -116,6 +117,7 @@ TEST(GlobalVar) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"var fun1 = function() { return 1; }\n"
"var fun2 = function() { return 2; }\n");
CheckFunctionName(script, "return 1", "fun1");
@@ -128,6 +130,7 @@ TEST(LocalVar) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function outer() {\n"
" var fun1 = function() { return 1; }\n"
" var fun2 = function() { return 2; }\n"
@@ -142,6 +145,7 @@ TEST(InConstructor) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function MyClass() {\n"
" this.method1 = function() { return 1; }\n"
" this.method2 = function() { return 2; }\n"
@@ -156,6 +160,7 @@ TEST(Factory) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function createMyObj() {\n"
" var obj = {};\n"
" obj.method1 = function() { return 1; }\n"
@@ -172,6 +177,7 @@ TEST(Static) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function MyClass() {}\n"
"MyClass.static1 = function() { return 1; }\n"
"MyClass.static2 = function() { return 2; }\n"
@@ -190,6 +196,7 @@ TEST(Prototype) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function MyClass() {}\n"
"MyClass.prototype.method1 = function() { return 1; }\n"
"MyClass.prototype.method2 = function() { return 2; }\n"
@@ -208,6 +215,7 @@ TEST(ObjectLiteral) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function MyClass() {}\n"
"MyClass.prototype = {\n"
" method1: function() { return 1; },\n"
@@ -222,6 +230,7 @@ TEST(AsParameter) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function f1(a) { return a(); }\n"
"function f2(a, b) { return a() + b(); }\n"
"var result1 = f1(function() { return 1; })\n"
@@ -238,6 +247,7 @@ TEST(MultipleFuncsConditional) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"fun1 = 0 ?\n"
" function() { return 1; } :\n"
" function() { return 2; }");
@@ -251,6 +261,7 @@ TEST(MultipleFuncsInLiteral) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function MyClass() {}\n"
"MyClass.prototype = {\n"
" method1: 0 ? function() { return 1; } :\n"
@@ -265,6 +276,7 @@ TEST(AnonymousInAnonymousClosure1) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"(function() {\n"
" (function() {\n"
" var a = 1;\n"
@@ -284,6 +296,7 @@ TEST(AnonymousInAnonymousClosure2) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"(function() {\n"
" (function() {\n"
" var a = 1;\n"
@@ -300,6 +313,7 @@ TEST(NamedInAnonymousClosure) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"var foo = function() {\n"
" (function named() {\n"
" var a = 1;\n"
@@ -317,6 +331,7 @@ TEST(Issue380) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function a() {\n"
"var result = function(p,a,c,k,e,d)"
"{return p}(\"if blah blah\",62,1976,\'a|b\'.split(\'|\'),0,{})\n"
@@ -330,6 +345,7 @@ TEST(MultipleAssignments) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"var fun1 = fun2 = function () { return 1; }\n"
"var bar1 = bar2 = bar3 = function () { return 2; }\n"
"foo1 = foo2 = function () { return 3; }\n"
@@ -346,6 +362,7 @@ TEST(AsConstructorParameter) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function Foo() {}\n"
"var foo = new Foo(function() { return 1; })\n"
"var bar = new Foo(function() { return 2; }, function() { return 3; })");
@@ -360,6 +377,7 @@ TEST(FactoryHashmap) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function createMyObj() {\n"
" var obj = {};\n"
" obj[\"method1\"] = function() { return 1; }\n"
@@ -376,6 +394,7 @@ TEST(FactoryHashmapVariable) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function createMyObj() {\n"
" var obj = {};\n"
" var methodName = \"method1\";\n"
@@ -395,6 +414,7 @@ TEST(FactoryHashmapConditional) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"function createMyObj() {\n"
" var obj = {};\n"
" obj[0 ? \"method1\" : \"method2\"] = function() { return 1; }\n"
@@ -410,6 +430,7 @@ TEST(GlobalAssignmentAndCall) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"var Foo = function() {\n"
" return 1;\n"
"}();\n"
@@ -428,6 +449,7 @@ TEST(AssignmentAndCall) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"(function Enclosing() {\n"
" var Foo;\n"
" Foo = function() {\n"
@@ -451,6 +473,7 @@ TEST(MethodAssignmentInAnonymousFunctionCall) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"(function () {\n"
" var EventSource = function () { };\n"
" EventSource.prototype.addListener = function () {\n"
@@ -467,6 +490,7 @@ TEST(ReturnAnonymousFunction) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
+ CcTest::isolate(),
"(function() {\n"
" function wrapCode() {\n"
" return function () {\n"
diff --git a/deps/v8/test/cctest/test-fuzz-a64.cc b/deps/v8/test/cctest/test-fuzz-a64.cc
new file mode 100644
index 0000000000..3f091d7586
--- /dev/null
+++ b/deps/v8/test/cctest/test-fuzz-a64.cc
@@ -0,0 +1,70 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include "cctest.h"
+
+#include "a64/decoder-a64.h"
+#include "a64/disasm-a64.h"
+
+using namespace v8::internal;
+
+TEST(FUZZ_decoder) {
+ // Feed noise into the decoder to check that it doesn't crash.
+ // 43 million = ~1% of the instruction space.
+ static const int instruction_count = 43 * 1024 * 1024;
+
+ uint16_t seed[3] = {1, 2, 3};
+ seed48(seed);
+
+ Decoder decoder;
+ Instruction buffer[kInstructionSize];
+
+ for (int i = 0; i < instruction_count; i++) {
+ uint32_t instr = mrand48();
+ buffer->SetInstructionBits(instr);
+ decoder.Decode(buffer);
+ }
+}
+
+
+TEST(FUZZ_disasm) {
+ // Feed noise into the disassembler to check that it doesn't crash.
+ // 9 million = ~0.2% of the instruction space.
+ static const int instruction_count = 9 * 1024 * 1024;
+
+ uint16_t seed[3] = {42, 43, 44};
+ seed48(seed);
+
+ Decoder decoder;
+ Disassembler disasm;
+ Instruction buffer[kInstructionSize];
+
+ decoder.AppendVisitor(&disasm);
+ for (int i = 0; i < instruction_count; i++) {
+ uint32_t instr = mrand48();
+ buffer->SetInstructionBits(instr);
+ decoder.Decode(buffer);
+ }
+}
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index d0b80d1c8f..48b6655bbc 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -316,6 +316,8 @@ TEST(ImplicitReferences) {
reinterpret_cast<HeapObject**>(g2s1.location()));
ASSERT(implicit_refs->at(1)->length == 1);
ASSERT(implicit_refs->at(1)->children[0] == g2c1.location());
+ global_handles->RemoveObjectGroups();
+ global_handles->RemoveImplicitRefGroups();
}
@@ -334,8 +336,8 @@ TEST(EternalHandles) {
for (int i = 0; i < kArrayLength; i++) {
indices[i] = -1;
HandleScope scope(isolate);
- v8::Local<v8::Object> object = v8::Object::New();
- object->Set(i, v8::Integer::New(i, v8_isolate));
+ v8::Local<v8::Object> object = v8::Object::New(v8_isolate);
+ object->Set(i, v8::Integer::New(v8_isolate, i));
// Create with internal api
eternal_handles->Create(
isolate, *v8::Utils::OpenHandle(*object), &indices[i]);
@@ -370,7 +372,7 @@ TEST(EternalHandles) {
// Create an eternal via the constructor
{
HandleScope scope(isolate);
- v8::Local<v8::Object> object = v8::Object::New();
+ v8::Local<v8::Object> object = v8::Object::New(v8_isolate);
v8::Eternal<v8::Object> eternal(v8_isolate, object);
CHECK(!eternal.IsEmpty());
CHECK(object == eternal.Get(v8_isolate));
diff --git a/deps/v8/test/cctest/test-global-object.cc b/deps/v8/test/cctest/test-global-object.cc
index 5fe77c2adf..bbec9df775 100644
--- a/deps/v8/test/cctest/test-global-object.cc
+++ b/deps/v8/test/cctest/test-global-object.cc
@@ -39,7 +39,7 @@ TEST(StrictUndeclaredGlobalVariable) {
LocalContext context;
v8::TryCatch try_catch;
v8::Local<v8::Script> script = v8_compile("\"use strict\"; x = 42;");
- v8::Handle<v8::Object> proto = v8::Object::New();
+ v8::Handle<v8::Object> proto = v8::Object::New(CcTest::isolate());
v8::Handle<v8::Object> global =
context->Global()->GetPrototype().As<v8::Object>();
proto->Set(var_name, v8_num(100));
diff --git a/deps/v8/test/cctest/test-hashing.cc b/deps/v8/test/cctest/test-hashing.cc
index 3ec844e9c7..a29d3d16cb 100644
--- a/deps/v8/test/cctest/test-hashing.cc
+++ b/deps/v8/test/cctest/test-hashing.cc
@@ -96,6 +96,24 @@ void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
StringHelper::GenerateHashGetHash(masm, r0);
__ pop(kRootRegister);
__ mov(pc, Operand(lr));
+#elif V8_TARGET_ARCH_A64
+ // The A64 assembler usually uses jssp (x28) as a stack pointer, but only csp
+ // is initialized by the calling (C++) code.
+ Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+ __ Push(root, xzr);
+ __ InitializeRootRegister();
+ __ Mov(x0, 0);
+ __ Mov(x10, Operand(string.at(0)));
+ StringHelper::GenerateHashInit(masm, x0, x10);
+ for (int i = 1; i < string.length(); i++) {
+ __ Mov(x10, Operand(string.at(i)));
+ StringHelper::GenerateHashAddCharacter(masm, x0, x10);
+ }
+ StringHelper::GenerateHashGetHash(masm, x0, x10);
+ __ Pop(xzr, root);
+ __ Ret();
+ __ SetStackPointer(old_stack_pointer);
#elif V8_TARGET_ARCH_MIPS
__ push(kRootRegister);
__ InitializeRootRegister();
@@ -111,6 +129,8 @@ void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
__ pop(kRootRegister);
__ jr(ra);
__ nop();
+#else
+#error Unsupported architecture.
#endif
}
@@ -138,6 +158,18 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ GetNumberHash(r0, ip);
__ pop(kRootRegister);
__ mov(pc, Operand(lr));
+#elif V8_TARGET_ARCH_A64
+ // The A64 assembler usually uses jssp (x28) as a stack pointer, but only csp
+ // is initialized by the calling (C++) code.
+ Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+ __ Push(root, xzr);
+ __ InitializeRootRegister();
+ __ Mov(x0, key);
+ __ GetNumberHash(x0, x10);
+ __ Pop(xzr, root);
+ __ Ret();
+ __ SetStackPointer(old_stack_pointer);
#elif V8_TARGET_ARCH_MIPS
__ push(kRootRegister);
__ InitializeRootRegister();
@@ -146,6 +178,8 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ pop(kRootRegister);
__ jr(ra);
__ nop();
+#else
+#error Unsupported architecture.
#endif
}
@@ -172,8 +206,8 @@ void check(i::Vector<const uint8_t> string) {
Handle<String> v8_string = factory->NewStringFromOneByte(string);
v8_string->set_hash_field(String::kEmptyHashField);
#ifdef USE_SIMULATOR
- uint32_t codegen_hash =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0));
+ uint32_t codegen_hash = static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0)));
#else
uint32_t codegen_hash = hash();
#endif
@@ -207,8 +241,8 @@ void check(uint32_t key) {
HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
#ifdef USE_SIMULATOR
- uint32_t codegen_hash =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0));
+ uint32_t codegen_hash = static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0)));
#else
uint32_t codegen_hash = hash();
#endif
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index db2243a3f0..c04ed9de13 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -234,9 +234,9 @@ TEST(HeapSnapshotObjectSizes) {
CHECK_NE(NULL, x2);
// Test sizes.
- CHECK_NE(0, x->GetSelfSize());
- CHECK_NE(0, x1->GetSelfSize());
- CHECK_NE(0, x2->GetSelfSize());
+ CHECK_NE(0, static_cast<int>(x->GetShallowSize()));
+ CHECK_NE(0, static_cast<int>(x1->GetShallowSize()));
+ CHECK_NE(0, static_cast<int>(x2->GetShallowSize()));
}
@@ -255,7 +255,8 @@ TEST(BoundFunctionInSnapshot) {
const v8::HeapGraphNode* f =
GetProperty(global, v8::HeapGraphEdge::kProperty, "boundFunction");
CHECK(f);
- CHECK_EQ(v8::String::New("native_bind"), f->GetName());
+ CHECK_EQ(v8::String::NewFromUtf8(env->GetIsolate(), "native_bind"),
+ f->GetName());
const v8::HeapGraphNode* bindings =
GetProperty(f, v8::HeapGraphEdge::kInternal, "bindings");
CHECK_NE(NULL, bindings);
@@ -343,6 +344,15 @@ TEST(HeapSnapshotCodeObjects) {
GetProperty(lazy, v8::HeapGraphEdge::kInternal, "shared");
CHECK_NE(NULL, lazy_code);
+ // Check that there's no strong next_code_link. There might be a weak one
+ // but might be not, so we can't check that fact.
+ const v8::HeapGraphNode* code =
+ GetProperty(compiled_code, v8::HeapGraphEdge::kInternal, "code");
+ CHECK_NE(NULL, code);
+ const v8::HeapGraphNode* next_code_link =
+ GetProperty(code, v8::HeapGraphEdge::kInternal, "code");
+ CHECK_EQ(NULL, next_code_link);
+
// Verify that non-compiled code doesn't contain references to "x"
// literal, while compiled code does. The scope info is stored in FixedArray
// objects attached to the SharedFunctionInfo.
@@ -422,7 +432,8 @@ TEST(HeapSnapshotSlicedString) {
TEST(HeapSnapshotConsString) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Local<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
global_template->SetInternalFieldCount(1);
LocalContext env(NULL, global_template);
v8::Handle<v8::Object> global_proxy = env->Global();
@@ -465,13 +476,14 @@ TEST(HeapSnapshotConsString) {
TEST(HeapSnapshotInternalReferences) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Local<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
global_template->SetInternalFieldCount(2);
LocalContext env(NULL, global_template);
v8::Handle<v8::Object> global_proxy = env->Global();
v8::Handle<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
CHECK_EQ(2, global->InternalFieldCount());
- v8::Local<v8::Object> obj = v8::Object::New();
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
global->SetInternalField(0, v8_num(17));
global->SetInternalField(1, obj);
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
@@ -734,8 +746,9 @@ TEST(HeapSnapshotJSONSerialization) {
stream.WriteTo(json);
// Verify that snapshot string is valid JSON.
- AsciiResource json_res(json);
- v8::Local<v8::String> json_string = v8::String::NewExternal(&json_res);
+ AsciiResource* json_res = new AsciiResource(json);
+ v8::Local<v8::String> json_string =
+ v8::String::NewExternal(env->GetIsolate(), json_res);
env->Global()->Set(v8_str("json_snapshot"), json_string);
v8::Local<v8::Value> snapshot_parse_result = CompileRun(
"var parsed = JSON.parse(json_snapshot); true;");
@@ -1003,7 +1016,7 @@ TEST(HeapSnapshotObjectsStats) {
CHECK_EQ(2, stats_update.first_interval_index());
}
- v8::Local<v8::Array> array = v8::Array::New();
+ v8::Local<v8::Array> array = v8::Array::New(env->GetIsolate());
CHECK_EQ(0, array->Length());
// Force array's buffer allocation.
array->Set(2, v8_num(7));
@@ -1039,6 +1052,49 @@ TEST(HeapSnapshotObjectsStats) {
}
+TEST(HeapObjectIds) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+ const int kLength = 10;
+ v8::Handle<v8::Object> objects[kLength];
+ v8::SnapshotObjectId ids[kLength];
+
+ heap_profiler->StartTrackingHeapObjects(false);
+
+ for (int i = 0; i < kLength; i++) {
+ objects[i] = v8::Object::New(isolate);
+ }
+ GetHeapStatsUpdate(heap_profiler);
+
+ for (int i = 0; i < kLength; i++) {
+ v8::SnapshotObjectId id = heap_profiler->GetObjectId(objects[i]);
+ CHECK_NE(v8::HeapProfiler::kUnknownObjectId, static_cast<int>(id));
+ ids[i] = id;
+ }
+
+ heap_profiler->StopTrackingHeapObjects();
+ CcTest::heap()->CollectAllAvailableGarbage();
+
+ for (int i = 0; i < kLength; i++) {
+ v8::SnapshotObjectId id = heap_profiler->GetObjectId(objects[i]);
+ CHECK_EQ(static_cast<int>(ids[i]), static_cast<int>(id));
+ v8::Handle<v8::Value> obj = heap_profiler->FindObjectById(ids[i]);
+ CHECK_EQ(objects[i], obj);
+ }
+
+ heap_profiler->ClearObjectIds();
+ for (int i = 0; i < kLength; i++) {
+ v8::SnapshotObjectId id = heap_profiler->GetObjectId(objects[i]);
+ CHECK_EQ(v8::HeapProfiler::kUnknownObjectId, static_cast<int>(id));
+ v8::Handle<v8::Value> obj = heap_profiler->FindObjectById(ids[i]);
+ CHECK(obj.IsEmpty());
+ }
+}
+
+
static void CheckChildrenIds(const v8::HeapSnapshot* snapshot,
const v8::HeapGraphNode* node,
int level, int max_level) {
@@ -1083,8 +1139,8 @@ TEST(HeapSnapshotGetSnapshotObjectId) {
GetProperty(global, v8::HeapGraphEdge::kProperty, "globalObject");
CHECK(global_object);
- v8::Local<v8::Value> globalObjectHandle =
- env->Global()->Get(v8::String::New("globalObject"));
+ v8::Local<v8::Value> globalObjectHandle = env->Global()->Get(
+ v8::String::NewFromUtf8(env->GetIsolate(), "globalObject"));
CHECK(!globalObjectHandle.IsEmpty());
CHECK(globalObjectHandle->IsObject());
@@ -1309,7 +1365,7 @@ class GraphWithImplicitRefs {
instance_ = this;
isolate_ = (*env)->GetIsolate();
for (int i = 0; i < kObjectsCount; i++) {
- objects_[i].Reset(isolate_, v8::Object::New());
+ objects_[i].Reset(isolate_, v8::Object::New(isolate_));
}
(*env)->Global()->Set(v8_str("root_object"),
v8::Local<v8::Value>::New(isolate_, objects_[0]));
@@ -1481,6 +1537,30 @@ TEST(GlobalObjectName) {
}
+TEST(GlobalObjectFields) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ CompileRun("obj = {};");
+ const v8::HeapSnapshot* snapshot =
+ heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* builtins =
+ GetProperty(global, v8::HeapGraphEdge::kInternal, "builtins");
+ CHECK_NE(NULL, builtins);
+ const v8::HeapGraphNode* native_context =
+ GetProperty(global, v8::HeapGraphEdge::kInternal, "native_context");
+ CHECK_NE(NULL, native_context);
+ const v8::HeapGraphNode* global_context =
+ GetProperty(global, v8::HeapGraphEdge::kInternal, "global_context");
+ CHECK_NE(NULL, global_context);
+ const v8::HeapGraphNode* global_receiver =
+ GetProperty(global, v8::HeapGraphEdge::kInternal, "global_receiver");
+ CHECK_NE(NULL, global_receiver);
+}
+
+
TEST(NoHandleLeaks) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -1517,7 +1597,7 @@ TEST(NodesIteration) {
}
-TEST(GetHeapValue) {
+TEST(GetHeapValueForNode) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -1527,25 +1607,26 @@ TEST(GetHeapValue) {
heap_profiler->TakeHeapSnapshot(v8_str("value"));
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK(global->GetHeapValue()->IsObject());
+ CHECK(heap_profiler->FindObjectById(global->GetId())->IsObject());
v8::Local<v8::Object> js_global =
env->Global()->GetPrototype().As<v8::Object>();
- CHECK(js_global == global->GetHeapValue());
+ CHECK(js_global == heap_profiler->FindObjectById(global->GetId()));
const v8::HeapGraphNode* obj = GetProperty(
global, v8::HeapGraphEdge::kProperty, "a");
- CHECK(obj->GetHeapValue()->IsObject());
+ CHECK(heap_profiler->FindObjectById(obj->GetId())->IsObject());
v8::Local<v8::Object> js_obj = js_global->Get(v8_str("a")).As<v8::Object>();
- CHECK(js_obj == obj->GetHeapValue());
+ CHECK(js_obj == heap_profiler->FindObjectById(obj->GetId()));
const v8::HeapGraphNode* s_prop =
GetProperty(obj, v8::HeapGraphEdge::kProperty, "s_prop");
v8::Local<v8::String> js_s_prop =
js_obj->Get(v8_str("s_prop")).As<v8::String>();
- CHECK(js_s_prop == s_prop->GetHeapValue());
+ CHECK(js_s_prop == heap_profiler->FindObjectById(s_prop->GetId()));
const v8::HeapGraphNode* n_prop =
GetProperty(obj, v8::HeapGraphEdge::kProperty, "n_prop");
v8::Local<v8::Number> js_n_prop =
js_obj->Get(v8_str("n_prop")).As<v8::Number>();
- CHECK(js_n_prop->NumberValue() == n_prop->GetHeapValue()->NumberValue());
+ CHECK(js_n_prop->NumberValue() ==
+ heap_profiler->FindObjectById(n_prop->GetId())->NumberValue());
}
@@ -1570,18 +1651,18 @@ TEST(GetHeapValueForDeletedObject) {
// Perform the check inside a nested local scope to avoid creating a
// reference to the object we are deleting.
v8::HandleScope scope(env->GetIsolate());
- CHECK(prop->GetHeapValue()->IsObject());
+ CHECK(heap_profiler->FindObjectById(prop->GetId())->IsObject());
}
CompileRun("delete a.p;");
- CHECK(prop->GetHeapValue()->IsUndefined());
+ CHECK(heap_profiler->FindObjectById(prop->GetId()).IsEmpty());
}
static int StringCmp(const char* ref, i::String* act) {
i::SmartArrayPointer<char> s_act = act->ToCString();
- int result = strcmp(ref, *s_act);
+ int result = strcmp(ref, s_act.get());
if (result != 0)
- fprintf(stderr, "Expected: \"%s\", Actual: \"%s\"\n", ref, *s_act);
+ fprintf(stderr, "Expected: \"%s\", Actual: \"%s\"\n", ref, s_act.get());
return result;
}
@@ -1719,7 +1800,8 @@ TEST(HiddenPropertiesFastCase) {
GetProperty(c, v8::HeapGraphEdge::kInternal, "hidden_properties");
CHECK_EQ(NULL, hidden_props);
- v8::Handle<v8::Value> cHandle = env->Global()->Get(v8::String::New("c"));
+ v8::Handle<v8::Value> cHandle =
+ env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "c"));
CHECK(!cHandle.IsEmpty() && cHandle->IsObject());
cHandle->ToObject()->SetHiddenValue(v8_str("key"), v8_str("val"));
@@ -1760,10 +1842,10 @@ bool HasWeakGlobalHandle() {
}
-static void PersistentHandleCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* handle,
- void*) {
- handle->Dispose();
+static void PersistentHandleCallback(
+ const v8::WeakCallbackData<v8::Object, v8::Persistent<v8::Object> >& data) {
+ data.GetParameter()->Reset();
+ delete data.GetParameter();
}
@@ -1773,8 +1855,9 @@ TEST(WeakGlobalHandle) {
CHECK(!HasWeakGlobalHandle());
- v8::Persistent<v8::Object> handle(env->GetIsolate(), v8::Object::New());
- handle.MakeWeak<v8::Value, void>(NULL, PersistentHandleCallback);
+ v8::Persistent<v8::Object> handle(env->GetIsolate(),
+ v8::Object::New(env->GetIsolate()));
+ handle.SetWeak(&handle, PersistentHandleCallback);
CHECK(HasWeakGlobalHandle());
}
@@ -1934,8 +2017,8 @@ TEST(ManyLocalsInSharedContext) {
CHECK_EQ(v8::internal::Context::MIN_CONTEXT_SLOTS + num_objects - 1,
context_object->GetChildrenCount());
// Check all the objects have got their names.
- // ... well check just every 8th because otherwise it's too slow in debug.
- for (int i = 0; i < num_objects - 1; i += 8) {
+ // ... well check just every 15th because otherwise it's too slow in debug.
+ for (int i = 0; i < num_objects - 1; i += 15) {
i::EmbeddedVector<char, 100> var_name;
i::OS::SNPrintF(var_name, "f_%d", i);
const v8::HeapGraphNode* f_object = GetProperty(
@@ -1947,8 +2030,9 @@ TEST(ManyLocalsInSharedContext) {
TEST(AllocationSitesAreVisible) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
CompileRun(
"fun = function () { var a = [3, 2, 1]; return a; }\n"
"fun();");
@@ -1983,16 +2067,21 @@ TEST(AllocationSitesAreVisible) {
"elements");
CHECK_NE(NULL, elements);
CHECK_EQ(v8::HeapGraphNode::kArray, elements->GetType());
- CHECK_EQ(v8::internal::FixedArray::SizeFor(3), elements->GetSelfSize());
+ CHECK_EQ(v8::internal::FixedArray::SizeFor(3),
+ static_cast<int>(elements->GetShallowSize()));
- CHECK(transition_info->GetHeapValue()->IsArray());
- v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(
- transition_info->GetHeapValue());
+ v8::Handle<v8::Value> array_val =
+ heap_profiler->FindObjectById(transition_info->GetId());
+ CHECK(array_val->IsArray());
+ v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(array_val);
// Verify the array is "a" in the code above.
CHECK_EQ(3, array->Length());
- CHECK_EQ(v8::Integer::New(3), array->Get(v8::Integer::New(0)));
- CHECK_EQ(v8::Integer::New(2), array->Get(v8::Integer::New(1)));
- CHECK_EQ(v8::Integer::New(1), array->Get(v8::Integer::New(2)));
+ CHECK_EQ(v8::Integer::New(isolate, 3),
+ array->Get(v8::Integer::New(isolate, 0)));
+ CHECK_EQ(v8::Integer::New(isolate, 2),
+ array->Get(v8::Integer::New(isolate, 1)));
+ CHECK_EQ(v8::Integer::New(isolate, 1),
+ array->Get(v8::Integer::New(isolate, 2)));
}
@@ -2014,70 +2103,66 @@ TEST(JSFunctionHasCodeLink) {
}
-
-class HeapProfilerExtension : public v8::Extension {
- public:
- static const char* kName;
- HeapProfilerExtension() : v8::Extension(kName, kSource) { }
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<v8::String> name);
- static void FindUntrackedObjects(
- const v8::FunctionCallbackInfo<v8::Value>& args);
- private:
- static const char* kSource;
-};
-
-const char* HeapProfilerExtension::kName = "v8/heap-profiler";
-
-
-const char* HeapProfilerExtension::kSource =
- "native function findUntrackedObjects();";
-
-
-v8::Handle<v8::FunctionTemplate> HeapProfilerExtension::GetNativeFunction(
- v8::Handle<v8::String> name) {
- if (name->Equals(v8::String::New("findUntrackedObjects"))) {
- return v8::FunctionTemplate::New(
- HeapProfilerExtension::FindUntrackedObjects);
- } else {
- CHECK(false);
- return v8::Handle<v8::FunctionTemplate>();
+static const v8::HeapGraphNode* GetNodeByPath(const v8::HeapSnapshot* snapshot,
+ const char* path[],
+ int depth) {
+ const v8::HeapGraphNode* node = snapshot->GetRoot();
+ for (int current_depth = 0; current_depth < depth; ++current_depth) {
+ int i, count = node->GetChildrenCount();
+ for (i = 0; i < count; ++i) {
+ const v8::HeapGraphEdge* edge = node->GetChild(i);
+ const v8::HeapGraphNode* to_node = edge->GetToNode();
+ v8::String::Utf8Value edge_name(edge->GetName());
+ v8::String::Utf8Value node_name(to_node->GetName());
+ i::EmbeddedVector<char, 100> name;
+ i::OS::SNPrintF(name, "%s::%s", *edge_name, *node_name);
+ if (strstr(name.start(), path[current_depth])) {
+ node = to_node;
+ break;
+ }
+ }
+ if (i == count) return NULL;
}
+ return node;
}
-void HeapProfilerExtension::FindUntrackedObjects(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- i::HeapProfiler* heap_profiler =
- reinterpret_cast<i::HeapProfiler*>(args.GetIsolate()->GetHeapProfiler());
- int untracked_objects = heap_profiler->FindUntrackedObjects();
- args.GetReturnValue().Set(untracked_objects);
- CHECK_EQ(0, untracked_objects);
-}
-
+TEST(CheckCodeNames) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ CompileRun("var a = 1.1;");
+ const v8::HeapSnapshot* snapshot =
+ heap_profiler->TakeHeapSnapshot(v8_str("CheckCodeNames"));
+ CHECK(ValidateSnapshot(snapshot));
-static HeapProfilerExtension kHeapProfilerExtension;
-v8::DeclareExtension kHeapProfilerExtensionDeclaration(
- &kHeapProfilerExtension);
+ const char* stub_path[] = {
+ "::(GC roots)",
+ "::(Strong roots)",
+ "code_stubs::",
+ "::(ArraySingleArgumentConstructorStub code)"
+ };
+ const v8::HeapGraphNode* node = GetNodeByPath(snapshot,
+ stub_path, ARRAY_SIZE(stub_path));
+ CHECK_NE(NULL, node);
+ const char* builtin_path1[] = {
+ "::(GC roots)",
+ "::(Builtins)",
+ "::(KeyedLoadIC_Generic builtin)"
+ };
+ node = GetNodeByPath(snapshot, builtin_path1, ARRAY_SIZE(builtin_path1));
+ CHECK_NE(NULL, node);
-// This is an example of using checking of JS allocations tracking in a test.
-TEST(HeapObjectsTracker) {
- const char* extensions[] = { HeapProfilerExtension::kName };
- v8::ExtensionConfiguration config(1, extensions);
- LocalContext env(&config);
- v8::HandleScope scope(env->GetIsolate());
- HeapObjectsTracker tracker;
- CompileRun("var a = 1.2");
- CompileRun("var a = 1.2; var b = 1.0; var c = 1.0;");
- CompileRun(
- "var a = [];\n"
- "for (var i = 0; i < 5; ++i)\n"
- " a[i] = i;\n"
- "findUntrackedObjects();\n"
- "for (var i = 0; i < 3; ++i)\n"
- " a.shift();\n"
- "findUntrackedObjects();\n");
+ const char* builtin_path2[] = {
+ "::(GC roots)",
+ "::(Builtins)",
+ "::(CompileUnoptimized builtin)"
+ };
+ node = GetNodeByPath(snapshot, builtin_path2, ARRAY_SIZE(builtin_path2));
+ CHECK_NE(NULL, node);
+ v8::String::Utf8Value node_name(node->GetName());
+ CHECK_EQ("(CompileUnoptimized builtin)", *node_name);
}
@@ -2123,12 +2208,6 @@ static const char* record_trace_tree_source =
"for (var i = 0; i < 100; i++) start();\n";
-static i::HeapSnapshot* ToInternal(const v8::HeapSnapshot* snapshot) {
- return const_cast<i::HeapSnapshot*>(
- reinterpret_cast<const i::HeapSnapshot*>(snapshot));
-}
-
-
static AllocationTraceNode* FindNode(
AllocationTracker* tracker, const Vector<const char*>& names) {
AllocationTraceNode* node = tracker->trace_tree()->root();
@@ -2149,19 +2228,48 @@ static AllocationTraceNode* FindNode(
}
+TEST(ArrayGrowLeftTrim) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ heap_profiler->StartTrackingHeapObjects(true);
+
+ CompileRun(
+ "var a = [];\n"
+ "for (var i = 0; i < 5; ++i)\n"
+ " a[i] = i;\n"
+ "for (var i = 0; i < 3; ++i)\n"
+ " a.shift();\n");
+
+ const char* names[] = { "(anonymous function)" };
+ AllocationTracker* tracker =
+ reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
+ CHECK_NE(NULL, tracker);
+ // Resolve all function locations.
+ tracker->PrepareForSerialization();
+ // Print for better diagnostics in case of failure.
+ tracker->trace_tree()->Print(tracker);
+
+ AllocationTraceNode* node =
+ FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+ CHECK_NE(NULL, node);
+ CHECK_GE(node->allocation_count(), 2);
+ CHECK_GE(node->allocation_size(), 4 * 5);
+ heap_profiler->StopTrackingHeapObjects();
+}
+
+
TEST(TrackHeapAllocations) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
- heap_profiler->StartRecordingHeapAllocations();
+ heap_profiler->StartTrackingHeapObjects(true);
CompileRun(record_trace_tree_source);
- const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot(
- v8::String::New("Test"));
- i::HeapSnapshotsCollection* collection = ToInternal(snapshot)->collection();
- AllocationTracker* tracker = collection->allocation_tracker();
+ AllocationTracker* tracker =
+ reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
CHECK_NE(NULL, tracker);
// Resolve all function locations.
tracker->PrepareForSerialization();
@@ -2175,5 +2283,197 @@ TEST(TrackHeapAllocations) {
CHECK_NE(NULL, node);
CHECK_GE(node->allocation_count(), 100);
CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
- heap_profiler->StopRecordingHeapAllocations();
+ heap_profiler->StopTrackingHeapObjects();
+}
+
+
+static const char* inline_heap_allocation_source =
+"function f_0(x) {\n"
+" return f_1(x+1);\n"
+"}\n"
+"%NeverOptimizeFunction(f_0);\n"
+"function f_1(x) {\n"
+" return new f_2(x+1);\n"
+"}\n"
+"function f_2(x) {\n"
+" this.foo = x;\n"
+"}\n"
+"var instances = [];\n"
+"function start() {\n"
+" instances.push(f_0(0));\n"
+"}\n"
+"\n"
+"for (var i = 0; i < 100; i++) start();\n";
+
+
+TEST(TrackBumpPointerAllocations) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ LocalContext env;
+
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ const char* names[] = { "(anonymous function)", "start", "f_0", "f_1" };
+ // First check that normally all allocations are recorded.
+ {
+ heap_profiler->StartTrackingHeapObjects(true);
+
+ CompileRun(inline_heap_allocation_source);
+
+ AllocationTracker* tracker =
+ reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
+ CHECK_NE(NULL, tracker);
+ // Resolve all function locations.
+ tracker->PrepareForSerialization();
+ // Print for better diagnostics in case of failure.
+ tracker->trace_tree()->Print(tracker);
+
+ AllocationTraceNode* node =
+ FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+ CHECK_NE(NULL, node);
+ CHECK_GE(node->allocation_count(), 100);
+ CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
+ heap_profiler->StopTrackingHeapObjects();
+ }
+
+ {
+ heap_profiler->StartTrackingHeapObjects(true);
+
+ // Now check that not all allocations are tracked if we manually reenable
+ // inline allocations.
+ CHECK(CcTest::heap()->inline_allocation_disabled());
+ CcTest::heap()->EnableInlineAllocation();
+
+ CompileRun(inline_heap_allocation_source);
+
+ AllocationTracker* tracker =
+ reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
+ CHECK_NE(NULL, tracker);
+ // Resolve all function locations.
+ tracker->PrepareForSerialization();
+ // Print for better diagnostics in case of failure.
+ tracker->trace_tree()->Print(tracker);
+
+ AllocationTraceNode* node =
+ FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+ CHECK_NE(NULL, node);
+ CHECK_LT(node->allocation_count(), 100);
+
+ CcTest::heap()->DisableInlineAllocation();
+ heap_profiler->StopTrackingHeapObjects();
+ }
+}
+
+
+TEST(ArrayBufferAndArrayBufferView) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ CompileRun("arr1 = new Uint32Array(100);\n");
+ const v8::HeapSnapshot* snapshot =
+ heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* arr1_obj =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "arr1");
+ CHECK_NE(NULL, arr1_obj);
+ const v8::HeapGraphNode* arr1_buffer =
+ GetProperty(arr1_obj, v8::HeapGraphEdge::kInternal, "buffer");
+ CHECK_NE(NULL, arr1_buffer);
+ const v8::HeapGraphNode* first_view =
+ GetProperty(arr1_buffer, v8::HeapGraphEdge::kWeak, "weak_first_view");
+ CHECK_NE(NULL, first_view);
+ const v8::HeapGraphNode* backing_store =
+ GetProperty(arr1_buffer, v8::HeapGraphEdge::kInternal, "backing_store");
+ CHECK_NE(NULL, backing_store);
+ CHECK_EQ(400, static_cast<int>(backing_store->GetShallowSize()));
+}
+
+
+static int GetRetainersCount(const v8::HeapSnapshot* snapshot,
+ const v8::HeapGraphNode* node) {
+ int count = 0;
+ for (int i = 0, l = snapshot->GetNodesCount(); i < l; ++i) {
+ const v8::HeapGraphNode* parent = snapshot->GetNode(i);
+ for (int j = 0, l2 = parent->GetChildrenCount(); j < l2; ++j) {
+ if (parent->GetChild(j)->GetToNode() == node) {
+ ++count;
+ }
+ }
+ }
+ return count;
+}
+
+
+TEST(ArrayBufferSharedBackingStore) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+ v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
+
+ v8::Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 1024);
+ CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
+ CHECK(!ab->IsExternal());
+ v8::ArrayBuffer::Contents ab_contents = ab->Externalize();
+ CHECK(ab->IsExternal());
+
+ CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
+ void* data = ab_contents.Data();
+ ASSERT(data != NULL);
+ v8::Local<v8::ArrayBuffer> ab2 =
+ v8::ArrayBuffer::New(isolate, data, ab_contents.ByteLength());
+ CHECK(ab2->IsExternal());
+ env->Global()->Set(v8_str("ab1"), ab);
+ env->Global()->Set(v8_str("ab2"), ab2);
+
+ v8::Handle<v8::Value> result = CompileRun("ab2.byteLength");
+ CHECK_EQ(1024, result->Int32Value());
+
+ const v8::HeapSnapshot* snapshot =
+ heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* ab1_node =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "ab1");
+ CHECK_NE(NULL, ab1_node);
+ const v8::HeapGraphNode* ab1_data =
+ GetProperty(ab1_node, v8::HeapGraphEdge::kInternal, "backing_store");
+ CHECK_NE(NULL, ab1_data);
+ const v8::HeapGraphNode* ab2_node =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "ab2");
+ CHECK_NE(NULL, ab2_node);
+ const v8::HeapGraphNode* ab2_data =
+ GetProperty(ab2_node, v8::HeapGraphEdge::kInternal, "backing_store");
+ CHECK_NE(NULL, ab2_data);
+ CHECK_EQ(ab1_data, ab2_data);
+ CHECK_EQ(2, GetRetainersCount(snapshot, ab1_data));
+ free(data);
+}
+
+
+TEST(BoxObject) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+ v8::Handle<v8::Object> global_proxy = env->Global();
+ v8::Handle<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
+
+ i::Factory* factory = CcTest::i_isolate()->factory();
+ i::Handle<i::String> string =
+ factory->NewStringFromAscii(i::CStrVector("string"));
+ i::Handle<i::Object> box = factory->NewBox(string);
+ global->Set(0, v8::ToApiHandle<v8::Object>(box));
+
+ v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
+ const v8::HeapSnapshot* snapshot =
+ heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global_node = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* box_node =
+ GetProperty(global_node, v8::HeapGraphEdge::kElement, "0");
+ CHECK_NE(NULL, box_node);
+ v8::String::Utf8Value box_node_name(box_node->GetName());
+ CHECK_EQ("system / Box", *box_node_name);
+ const v8::HeapGraphNode* box_value =
+ GetProperty(box_node, v8::HeapGraphEdge::kInternal, "value");
+ CHECK_NE(NULL, box_value);
}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 74c2b75811..0efbbfd776 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
+#include <utility>
#include "v8.h"
@@ -179,7 +180,7 @@ TEST(HeapObjects) {
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Smi::cast(value)->value());
-#ifndef V8_TARGET_ARCH_X64
+#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_A64)
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
value = heap->NumberFromInt32(Smi::kMinValue - 1)->ToObjectChecked();
CHECK(value->IsHeapNumber());
@@ -382,22 +383,24 @@ TEST(GlobalHandles) {
CHECK((*h4)->IsHeapNumber());
CHECK_EQ(*h3, *h1);
- global_handles->Destroy(h1.location());
- global_handles->Destroy(h3.location());
+ GlobalHandles::Destroy(h1.location());
+ GlobalHandles::Destroy(h3.location());
CHECK_EQ(*h4, *h2);
- global_handles->Destroy(h2.location());
- global_handles->Destroy(h4.location());
+ GlobalHandles::Destroy(h2.location());
+ GlobalHandles::Destroy(h4.location());
}
static bool WeakPointerCleared = false;
-static void TestWeakGlobalHandleCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* handle,
- void* id) {
- if (1234 == reinterpret_cast<intptr_t>(id)) WeakPointerCleared = true;
- handle->Dispose();
+static void TestWeakGlobalHandleCallback(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ std::pair<v8::Persistent<v8::Value>*, int>* p =
+ reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
+ data.GetParameter());
+ if (p->second == 1234) WeakPointerCleared = true;
+ p->first->Reset();
}
@@ -424,12 +427,13 @@ TEST(WeakGlobalHandlesScavenge) {
h2 = global_handles->Create(*u);
}
- global_handles->MakeWeak(h2.location(),
- reinterpret_cast<void*>(1234),
- &TestWeakGlobalHandleCallback);
+ std::pair<Handle<Object>*, int> handle_and_id(&h2, 1234);
+ GlobalHandles::MakeWeak(h2.location(),
+ reinterpret_cast<void*>(&handle_and_id),
+ &TestWeakGlobalHandleCallback);
// Scavenge treats weak pointers as normal roots.
- heap->PerformScavenge();
+ heap->CollectGarbage(NEW_SPACE);
CHECK((*h1)->IsString());
CHECK((*h2)->IsHeapNumber());
@@ -438,8 +442,8 @@ TEST(WeakGlobalHandlesScavenge) {
CHECK(!global_handles->IsNearDeath(h2.location()));
CHECK(!global_handles->IsNearDeath(h1.location()));
- global_handles->Destroy(h1.location());
- global_handles->Destroy(h2.location());
+ GlobalHandles::Destroy(h1.location());
+ GlobalHandles::Destroy(h2.location());
}
@@ -470,9 +474,10 @@ TEST(WeakGlobalHandlesMark) {
heap->CollectGarbage(NEW_SPACE);
CHECK(!heap->InNewSpace(*h1) && !heap->InNewSpace(*h2));
- global_handles->MakeWeak(h2.location(),
- reinterpret_cast<void*>(1234),
- &TestWeakGlobalHandleCallback);
+ std::pair<Handle<Object>*, int> handle_and_id(&h2, 1234);
+ GlobalHandles::MakeWeak(h2.location(),
+ reinterpret_cast<void*>(&handle_and_id),
+ &TestWeakGlobalHandleCallback);
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
CHECK(!GlobalHandles::IsNearDeath(h2.location()));
@@ -484,7 +489,7 @@ TEST(WeakGlobalHandlesMark) {
CHECK(WeakPointerCleared);
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
- global_handles->Destroy(h1.location());
+ GlobalHandles::Destroy(h1.location());
}
@@ -507,12 +512,13 @@ TEST(DeleteWeakGlobalHandle) {
h = global_handles->Create(*i);
}
- global_handles->MakeWeak(h.location(),
- reinterpret_cast<void*>(1234),
- &TestWeakGlobalHandleCallback);
+ std::pair<Handle<Object>*, int> handle_and_id(&h, 1234);
+ GlobalHandles::MakeWeak(h.location(),
+ reinterpret_cast<void*>(&handle_and_id),
+ &TestWeakGlobalHandleCallback);
// Scanvenge does not recognize weak reference.
- heap->PerformScavenge();
+ heap->CollectGarbage(NEW_SPACE);
CHECK(!WeakPointerCleared);
@@ -760,7 +766,7 @@ TEST(JSArray) {
CHECK(array->HasFastSmiOrObjectElements());
// array[length] = name.
- array->SetElement(0, *name, NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetElement(array, 0, name, NONE, kNonStrictMode);
CHECK_EQ(Smi::FromInt(1), array->length());
CHECK_EQ(array->GetElement(isolate, 0), *name);
@@ -775,7 +781,7 @@ TEST(JSArray) {
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
- array->SetElement(int_length, *name, NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetElement(array, int_length, name, NONE, kNonStrictMode);
uint32_t new_int_length = 0;
CHECK(array->length()->ToArrayIndex(&new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
@@ -805,8 +811,8 @@ TEST(JSObjectCopy) {
JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
JSReceiver::SetProperty(obj, second, two, NONE, kNonStrictMode);
- obj->SetElement(0, *first, NONE, kNonStrictMode)->ToObjectChecked();
- obj->SetElement(1, *second, NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetElement(obj, 0, first, NONE, kNonStrictMode);
+ JSReceiver::SetElement(obj, 1, second, NONE, kNonStrictMode);
// Make the clone.
Handle<JSObject> clone = JSObject::Copy(obj);
@@ -822,8 +828,8 @@ TEST(JSObjectCopy) {
JSReceiver::SetProperty(clone, first, two, NONE, kNonStrictMode);
JSReceiver::SetProperty(clone, second, one, NONE, kNonStrictMode);
- clone->SetElement(0, *second, NONE, kNonStrictMode)->ToObjectChecked();
- clone->SetElement(1, *first, NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetElement(clone, 0, second, NONE, kNonStrictMode);
+ JSReceiver::SetElement(clone, 1, first, NONE, kNonStrictMode);
CHECK_EQ(obj->GetElement(isolate, 1), clone->GetElement(isolate, 0));
CHECK_EQ(obj->GetElement(isolate, 0), clone->GetElement(isolate, 1));
@@ -912,7 +918,7 @@ TEST(Iteration) {
factory->NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
// Allocate a large string (for large object space).
- int large_size = Page::kMaxNonCodeHeapObjectSize + 1;
+ int large_size = Page::kMaxRegularHeapObjectSize + 1;
char* str = new char[large_size];
for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
str[large_size - 1] = '\0';
@@ -935,7 +941,7 @@ TEST(EmptyHandleEscapeFrom) {
Handle<JSObject> runaway;
{
- v8::HandleScope nested(CcTest::isolate());
+ v8::EscapableHandleScope nested(CcTest::isolate());
Handle<JSObject> empty;
runaway = empty.EscapeFrom(&nested);
}
@@ -981,7 +987,7 @@ TEST(Regression39128) {
// just enough room to allocate JSObject and thus fill the newspace.
int allocation_amount = Min(FixedArray::kMaxSize,
- Page::kMaxNonCodeHeapObjectSize + kPointerSize);
+ Page::kMaxRegularHeapObjectSize + kPointerSize);
int allocation_len = LenFromSize(allocation_amount);
NewSpace* new_space = heap->new_space();
Address* top_addr = new_space->allocation_top_address();
@@ -1430,7 +1436,7 @@ TEST(TestInternalWeakLists) {
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(NEW_SPACE);
CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctions(ctx[i]));
}
@@ -1442,14 +1448,14 @@ TEST(TestInternalWeakLists) {
// Get rid of f3 and f5 in the same way.
CompileRun("f3=null");
for (int j = 0; j < 10; j++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(NEW_SPACE);
CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
}
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
CompileRun("f5=null");
for (int j = 0; j < 10; j++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(NEW_SPACE);
CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
}
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
@@ -1471,7 +1477,7 @@ TEST(TestInternalWeakLists) {
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CHECK_EQ(kNumTestContexts - i, CountNativeContexts());
}
@@ -1784,15 +1790,15 @@ TEST(LeakNativeContextViaMap) {
"%OptimizeFunctionOnNextCall(f);"
"f();");
CHECK_EQ(42, res->Int32Value());
- ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
+ ctx2->Global()->Set(v8_str("o"), v8::Int32::New(isolate, 0));
ctx2->Exit();
v8::Local<v8::Context>::New(isolate, ctx1)->Exit();
- ctx1p.Dispose();
+ ctx1p.Reset();
v8::V8::ContextDisposedNotification();
}
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
- ctx2p.Dispose();
+ ctx2p.Reset();
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1830,15 +1836,15 @@ TEST(LeakNativeContextViaFunction) {
"%OptimizeFunctionOnNextCall(f);"
"f(o);");
CHECK_EQ(42, res->Int32Value());
- ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
+ ctx2->Global()->Set(v8_str("o"), v8::Int32::New(isolate, 0));
ctx2->Exit();
ctx1->Exit();
- ctx1p.Dispose();
+ ctx1p.Reset();
v8::V8::ContextDisposedNotification();
}
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
- ctx2p.Dispose();
+ ctx2p.Reset();
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1874,15 +1880,15 @@ TEST(LeakNativeContextViaMapKeyed) {
"%OptimizeFunctionOnNextCall(f);"
"f();");
CHECK_EQ(42, res->Int32Value());
- ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
+ ctx2->Global()->Set(v8_str("o"), v8::Int32::New(isolate, 0));
ctx2->Exit();
ctx1->Exit();
- ctx1p.Dispose();
+ ctx1p.Reset();
v8::V8::ContextDisposedNotification();
}
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
- ctx2p.Dispose();
+ ctx2p.Reset();
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1922,15 +1928,15 @@ TEST(LeakNativeContextViaMapProto) {
"%OptimizeFunctionOnNextCall(f);"
"f();");
CHECK_EQ(42, res->Int32Value());
- ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
+ ctx2->Global()->Set(v8_str("o"), v8::Int32::New(isolate, 0));
ctx2->Exit();
ctx1->Exit();
- ctx1p.Dispose();
+ ctx1p.Reset();
v8::V8::ContextDisposedNotification();
}
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
- ctx2p.Dispose();
+ ctx2p.Reset();
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -2184,6 +2190,8 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
TEST(OptimizedPretenuringAllocationFolding) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_max_new_space_size = 2048;
+ i::FLAG_allocation_site_pretenuring = false;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
@@ -2199,8 +2207,13 @@ TEST(OptimizedPretenuringAllocationFolding) {
" this.e = 1.3;"
" this.f = [{}];"
"}"
+ "var number_elements = 20000;"
+ "var elements = new Array();"
"function f() {"
- " return new DataObject();"
+ " for (var i = 0; i < number_elements; i++) {"
+ " elements[i] = new DataObject();"
+ " }"
+ " return elements[number_elements-1]"
"};"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
@@ -2220,6 +2233,8 @@ TEST(OptimizedPretenuringAllocationFolding) {
TEST(OptimizedPretenuringAllocationFoldingBlocks) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_max_new_space_size = 2048;
+ i::FLAG_allocation_site_pretenuring = false;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
@@ -2227,6 +2242,8 @@ TEST(OptimizedPretenuringAllocationFoldingBlocks) {
CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
+ "var number_elements = 30000;"
+ "var elements = new Array(number_elements);"
"function DataObject() {"
" this.a = [{}];"
" this.b = [{}];"
@@ -2236,7 +2253,10 @@ TEST(OptimizedPretenuringAllocationFoldingBlocks) {
" this.f = 1.3;"
"}"
"function f() {"
- " return new DataObject();"
+ " for (var i = 0; i < number_elements; i++) {"
+ " elements[i] = new DataObject();"
+ " }"
+ " return elements[number_elements - 1];"
"};"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
@@ -2256,16 +2276,20 @@ TEST(OptimizedPretenuringAllocationFoldingBlocks) {
TEST(OptimizedPretenuringObjectArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_max_new_space_size = 2048;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
+ "var number_elements = 20000;"
+ "var elements = new Array(number_elements);"
"function f() {"
- " var numbers = [{}, {}, {}];"
- " return numbers;"
+ " for (var i = 0; i < number_elements; i++) {"
+ " elements[i] = [{}, {}, {}];"
+ " }"
+ " return elements[number_elements - 1];"
"};"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
@@ -2281,16 +2305,20 @@ TEST(OptimizedPretenuringObjectArrayLiterals) {
TEST(OptimizedPretenuringMixedInObjectProperties) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_max_new_space_size = 2048;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
+ "var number_elements = 20000;"
+ "var elements = new Array(number_elements);"
"function f() {"
- " var numbers = {a: {c: 2.2, d: {}}, b: 1.1};"
- " return numbers;"
+ " for (var i = 0; i < number_elements; i++) {"
+ " elements[i] = {a: {c: 2.2, d: {}}, b: 1.1};"
+ " }"
+ " return elements[number_elements - 1];"
"};"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
@@ -2312,16 +2340,20 @@ TEST(OptimizedPretenuringMixedInObjectProperties) {
TEST(OptimizedPretenuringDoubleArrayProperties) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_max_new_space_size = 2048;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
+ "var number_elements = 30000;"
+ "var elements = new Array(number_elements);"
"function f() {"
- " var numbers = {a: 1.1, b: 2.2};"
- " return numbers;"
+ " for (var i = 0; i < number_elements; i++) {"
+ " elements[i] = {a: 1.1, b: 2.2};"
+ " }"
+ " return elements[i - 1];"
"};"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
@@ -2337,16 +2369,20 @@ TEST(OptimizedPretenuringDoubleArrayProperties) {
TEST(OptimizedPretenuringdoubleArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_max_new_space_size = 2048;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
+ "var number_elements = 30000;"
+ "var elements = new Array(number_elements);"
"function f() {"
- " var numbers = [1.1, 2.2, 3.3];"
- " return numbers;"
+ " for (var i = 0; i < number_elements; i++) {"
+ " elements[i] = [1.1, 2.2, 3.3];"
+ " }"
+ " return elements[number_elements - 1];"
"};"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
@@ -2362,16 +2398,20 @@ TEST(OptimizedPretenuringdoubleArrayLiterals) {
TEST(OptimizedPretenuringNestedMixedArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_max_new_space_size = 2048;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
+ "var number_elements = 20000;"
+ "var elements = new Array(number_elements);"
"function f() {"
- " var numbers = [[{}, {}, {}],[1.1, 2.2, 3.3]];"
- " return numbers;"
+ " for (var i = 0; i < number_elements; i++) {"
+ " elements[i] = [[{}, {}, {}], [1.1, 2.2, 3.3]];"
+ " }"
+ " return elements[number_elements - 1];"
"};"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
@@ -2396,16 +2436,20 @@ TEST(OptimizedPretenuringNestedMixedArrayLiterals) {
TEST(OptimizedPretenuringNestedObjectLiterals) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_max_new_space_size = 2048;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
+ "var number_elements = 20000;"
+ "var elements = new Array(number_elements);"
"function f() {"
- " var numbers = [[{}, {}, {}],[{}, {}, {}]];"
- " return numbers;"
+ " for (var i = 0; i < number_elements; i++) {"
+ " elements[i] = [[{}, {}, {}],[{}, {}, {}]];"
+ " }"
+ " return elements[number_elements - 1];"
"};"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
@@ -2430,16 +2474,20 @@ TEST(OptimizedPretenuringNestedObjectLiterals) {
TEST(OptimizedPretenuringNestedDoubleLiterals) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_max_new_space_size = 2048;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
+ "var number_elements = 20000;"
+ "var elements = new Array(number_elements);"
"function f() {"
- " var numbers = [[1.1, 1.2, 1.3],[2.1, 2.2, 2.3]];"
- " return numbers;"
+ " for (var i = 0; i < number_elements; i++) {"
+ " elements[i] = [[1.1, 1.2, 1.3],[2.1, 2.2, 2.3]];"
+ " }"
+ " return elements[number_elements - 1];"
"};"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
@@ -2493,6 +2541,7 @@ TEST(OptimizedAllocationArrayLiterals) {
TEST(OptimizedPretenuringCallNew) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_allocation_site_pretenuring = false;
i::FLAG_pretenuring_call_new = true;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
@@ -2752,14 +2801,14 @@ TEST(Regress2211) {
for (int i = 0; i < 2; i++) {
// Store identity hash first and common hidden property second.
- v8::Handle<v8::Object> obj = v8::Object::New();
+ v8::Handle<v8::Object> obj = v8::Object::New(CcTest::isolate());
Handle<JSObject> internal_obj = v8::Utils::OpenHandle(*obj);
CHECK(internal_obj->HasFastProperties());
// In the first iteration, set hidden value first and identity hash second.
// In the second iteration, reverse the order.
if (i == 0) obj->SetHiddenValue(v8_str("key string"), value);
- JSObject::SetIdentityHash(internal_obj, hash);
+ JSObject::SetIdentityHash(internal_obj, handle(hash, CcTest::i_isolate()));
if (i == 1) obj->SetHiddenValue(v8_str("key string"), value);
// Check values.
@@ -2777,7 +2826,7 @@ TEST(Regress2211) {
}
-TEST(IncrementalMarkingClearsTypeFeedbackCells) {
+TEST(IncrementalMarkingClearsTypeFeedbackInfo) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -2799,32 +2848,35 @@ TEST(IncrementalMarkingClearsTypeFeedbackCells) {
// originating from two different native contexts.
CcTest::global()->Set(v8_str("fun1"), fun1);
CcTest::global()->Set(v8_str("fun2"), fun2);
- CompileRun("function f(a, b) { a(); b(); } f(fun1, fun2);");
+ CompileRun("function f(a, b) { a(); b(); }"
+ "f(fun1, fun2);" // Run twice to skip premonomorphic state.
+ "f(fun1, fun2)");
+
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
CcTest::global()->Get(v8_str("f"))));
- Handle<TypeFeedbackCells> cells(TypeFeedbackInfo::cast(
- f->shared()->code()->type_feedback_info())->type_feedback_cells());
- CHECK_EQ(2, cells->CellCount());
- CHECK(cells->GetCell(0)->value()->IsJSFunction());
- CHECK(cells->GetCell(1)->value()->IsJSFunction());
+ Handle<FixedArray> feedback_vector(TypeFeedbackInfo::cast(
+ f->shared()->code()->type_feedback_info())->feedback_vector());
+
+ CHECK_EQ(2, feedback_vector->length());
+ CHECK(feedback_vector->get(0)->IsJSFunction());
+ CHECK(feedback_vector->get(1)->IsJSFunction());
SimulateIncrementalMarking();
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK_EQ(2, cells->CellCount());
- CHECK(cells->GetCell(0)->value()->IsTheHole());
- CHECK(cells->GetCell(1)->value()->IsTheHole());
+ CHECK_EQ(2, feedback_vector->length());
+ CHECK(feedback_vector->get(0)->IsTheHole());
+ CHECK(feedback_vector->get(1)->IsTheHole());
}
static Code* FindFirstIC(Code* code, Code::Kind kind) {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID) |
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET_CONTEXT);
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
for (RelocIterator it(code, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Code* target = Code::GetCodeFromTargetAddress(info->target_address());
@@ -2836,7 +2888,7 @@ static Code* FindFirstIC(Code* code, Code::Kind kind) {
}
-TEST(IncrementalMarkingPreservesMonomorhpicIC) {
+TEST(IncrementalMarkingPreservesMonomorphicIC) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -2861,7 +2913,7 @@ TEST(IncrementalMarkingPreservesMonomorhpicIC) {
}
-TEST(IncrementalMarkingClearsMonomorhpicIC) {
+TEST(IncrementalMarkingClearsMonomorphicIC) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -2895,7 +2947,7 @@ TEST(IncrementalMarkingClearsMonomorhpicIC) {
}
-TEST(IncrementalMarkingClearsPolymorhpicIC) {
+TEST(IncrementalMarkingClearsPolymorphicIC) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -2963,13 +3015,12 @@ void ReleaseStackTraceDataTest(const char* source, const char* accessor) {
// after the first time the accessor is fired. We use external string
// to check whether the data is being released since the external string
// resource's callback is fired when the external string is GC'ed.
- FLAG_use_ic = false; // ICs retain objects.
- FLAG_concurrent_recompilation = false;
v8::HandleScope scope(CcTest::isolate());
SourceResource* resource = new SourceResource(i::StrDup(source));
{
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::String> source_string = v8::String::NewExternal(resource);
+ v8::Handle<v8::String> source_string =
+ v8::String::NewExternal(CcTest::isolate(), resource);
CcTest::heap()->CollectAllAvailableGarbage();
v8::Script::Compile(source_string)->Run();
CHECK(!resource->IsDisposed());
@@ -2987,6 +3038,8 @@ void ReleaseStackTraceDataTest(const char* source, const char* accessor) {
TEST(ReleaseStackTraceData) {
+ FLAG_use_ic = false; // ICs retain objects.
+ FLAG_concurrent_recompilation = false;
CcTest::InitializeVM();
static const char* source1 = "var error = null; "
/* Normal Error */ "try { "
@@ -3030,69 +3083,6 @@ TEST(ReleaseStackTraceData) {
}
-TEST(Regression144230) {
- i::FLAG_stress_compaction = false;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- HandleScope scope(isolate);
-
- // First make sure that the uninitialized CallIC stub is on a single page
- // that will later be selected as an evacuation candidate.
- {
- HandleScope inner_scope(isolate);
- AlwaysAllocateScope always_allocate;
- SimulateFullSpace(heap->code_space());
- isolate->stub_cache()->ComputeCallInitialize(9, RelocInfo::CODE_TARGET);
- }
-
- // Second compile a CallIC and execute it once so that it gets patched to
- // the pre-monomorphic stub. These code objects are on yet another page.
- {
- HandleScope inner_scope(isolate);
- AlwaysAllocateScope always_allocate;
- SimulateFullSpace(heap->code_space());
- CompileRun("var o = { f:function(a,b,c,d,e,f,g,h,i) {}};"
- "function call() { o.f(1,2,3,4,5,6,7,8,9); };"
- "call();");
- }
-
- // Third we fill up the last page of the code space so that it does not get
- // chosen as an evacuation candidate.
- {
- HandleScope inner_scope(isolate);
- AlwaysAllocateScope always_allocate;
- CompileRun("for (var i = 0; i < 2000; i++) {"
- " eval('function f' + i + '() { return ' + i +'; };' +"
- " 'f' + i + '();');"
- "}");
- }
- heap->CollectAllGarbage(Heap::kNoGCFlags);
-
- // Fourth is the tricky part. Make sure the code containing the CallIC is
- // visited first without clearing the IC. The shared function info is then
- // visited later, causing the CallIC to be cleared.
- Handle<String> name = isolate->factory()->InternalizeUtf8String("call");
- Handle<GlobalObject> global(isolate->context()->global_object());
- Handle<Smi> zero(Smi::FromInt(0), isolate);
- MaybeObject* maybe_call = global->GetProperty(*name);
- JSFunction* call = JSFunction::cast(maybe_call->ToObjectChecked());
- JSReceiver::SetProperty(global, name, zero, NONE, kNonStrictMode);
- isolate->compilation_cache()->Clear();
- call->shared()->set_ic_age(heap->global_ic_age() + 1);
- Handle<Object> call_code(call->code(), isolate);
- Handle<Object> call_function(call, isolate);
-
- // Now we are ready to mess up the heap.
- heap->CollectAllGarbage(Heap::kReduceMemoryFootprintMask);
-
- // Either heap verification caught the problem already or we go kaboom once
- // the CallIC is executed the next time.
- JSReceiver::SetProperty(global, name, call_function, NONE, kNonStrictMode);
- CompileRun("call();");
-}
-
-
TEST(Regress159140) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_flush_code_incrementally = true;
@@ -3320,7 +3310,7 @@ TEST(Regress169928) {
v8_str("fastliteralcase(mote, 2.5);");
v8::Local<v8::String> array_name = v8_str("mote");
- CcTest::global()->Set(array_name, v8::Int32::New(0));
+ CcTest::global()->Set(array_name, v8::Int32::New(CcTest::isolate(), 0));
// First make sure we flip spaces
CcTest::heap()->CollectGarbage(NEW_SPACE);
@@ -3485,8 +3475,7 @@ TEST(DeferredHandles) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate));
- v8::ImplementationUtilities::HandleScopeData* data =
- isolate->handle_scope_data();
+ HandleScopeData* data = isolate->handle_scope_data();
Handle<Object> init(heap->empty_string(), isolate);
while (data->next < data->limit) {
Handle<Object> obj(heap->empty_string(), isolate);
@@ -3498,7 +3487,7 @@ TEST(DeferredHandles) {
DeferredHandleScope deferred(isolate);
DummyVisitor visitor;
isolate->handle_scope_implementer()->Iterate(&visitor);
- deferred.Detach();
+ delete deferred.Detach();
}
@@ -3516,3 +3505,188 @@ TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
ASSERT(marking->IsComplete());
}
+
+
+TEST(DisableInlineAllocation) {
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ CompileRun("function test() {"
+ " var x = [];"
+ " for (var i = 0; i < 10; i++) {"
+ " x[i] = [ {}, [1,2,3], [1,x,3] ];"
+ " }"
+ "}"
+ "function run() {"
+ " %OptimizeFunctionOnNextCall(test);"
+ " test();"
+ " %DeoptimizeFunction(test);"
+ "}");
+
+ // Warm-up with inline allocation enabled.
+ CompileRun("test(); test(); run();");
+
+ // Run test with inline allocation disabled.
+ CcTest::heap()->DisableInlineAllocation();
+ CompileRun("run()");
+
+ // Run test with inline allocation disabled and pretenuring.
+ CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
+ CompileRun("run()");
+
+ // Run test with inline allocation re-enabled.
+ CcTest::heap()->EnableInlineAllocation();
+ CompileRun("run()");
+}
+
+
+static int AllocationSitesCount(Heap* heap) {
+ int count = 0;
+ for (Object* site = heap->allocation_sites_list();
+ !(site->IsUndefined());
+ site = AllocationSite::cast(site)->weak_next()) {
+ count++;
+ }
+ return count;
+}
+
+
+TEST(EnsureAllocationSiteDependentCodesProcessed) {
+ if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ v8::internal::Heap* heap = CcTest::heap();
+ GlobalHandles* global_handles = isolate->global_handles();
+
+ if (!isolate->use_crankshaft()) return;
+
+ // The allocation site at the head of the list is ours.
+ Handle<AllocationSite> site;
+ {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+
+ int count = AllocationSitesCount(heap);
+ CompileRun("var bar = function() { return (new Array()); };"
+ "var a = bar();"
+ "bar();"
+ "bar();");
+
+ // One allocation site should have been created.
+ int new_count = AllocationSitesCount(heap);
+ CHECK_EQ(new_count, (count + 1));
+ site = Handle<AllocationSite>::cast(
+ global_handles->Create(
+ AllocationSite::cast(heap->allocation_sites_list())));
+
+ CompileRun("%OptimizeFunctionOnNextCall(bar); bar();");
+
+ DependentCode::GroupStartIndexes starts(site->dependent_code());
+ CHECK_GE(starts.number_of_entries(), 1);
+ int index = starts.at(DependentCode::kAllocationSiteTransitionChangedGroup);
+ CHECK(site->dependent_code()->is_code_at(index));
+ Code* function_bar = site->dependent_code()->code_at(index);
+ Handle<JSFunction> bar_handle =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ CcTest::global()->Get(v8_str("bar"))));
+ CHECK_EQ(bar_handle->code(), function_bar);
+ }
+
+ // Now make sure that a gc should get rid of the function, even though we
+ // still have the allocation site alive.
+ for (int i = 0; i < 4; i++) {
+ heap->CollectAllGarbage(false);
+ }
+
+ // The site still exists because of our global handle, but the code is no
+ // longer referred to by dependent_code().
+ DependentCode::GroupStartIndexes starts(site->dependent_code());
+ int index = starts.at(DependentCode::kAllocationSiteTransitionChangedGroup);
+ CHECK(!(site->dependent_code()->is_code_at(index)));
+}
+
+
+TEST(CellsInOptimizedCodeAreWeak) {
+ if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ i::FLAG_weak_embedded_objects_in_optimized_code = true;
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ v8::internal::Heap* heap = CcTest::heap();
+
+ if (!isolate->use_crankshaft()) return;
+ HandleScope outer_scope(heap->isolate());
+ Handle<Code> code;
+ {
+ LocalContext context;
+ HandleScope scope(heap->isolate());
+
+ CompileRun("bar = (function() {"
+ " function bar() {"
+ " return foo(1);"
+ " };"
+ " var foo = function(x) { with (x) { return 1 + x; } };"
+ " bar(foo);"
+ " bar(foo);"
+ " bar(foo);"
+ " %OptimizeFunctionOnNextCall(bar);"
+ " bar(foo);"
+ " return bar;})();");
+
+ Handle<JSFunction> bar =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ CcTest::global()->Get(v8_str("bar"))));
+ code = scope.CloseAndEscape(Handle<Code>(bar->code()));
+ }
+
+ // Now make sure that a gc should get rid of the function
+ for (int i = 0; i < 4; i++) {
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ }
+
+ ASSERT(code->marked_for_deoptimization());
+}
+
+
+TEST(ObjectsInOptimizedCodeAreWeak) {
+ if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ i::FLAG_weak_embedded_objects_in_optimized_code = true;
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ v8::internal::Heap* heap = CcTest::heap();
+
+ if (!isolate->use_crankshaft()) return;
+ HandleScope outer_scope(heap->isolate());
+ Handle<Code> code;
+ {
+ LocalContext context;
+ HandleScope scope(heap->isolate());
+
+ CompileRun("function bar() {"
+ " return foo(1);"
+ "};"
+ "function foo(x) { with (x) { return 1 + x; } };"
+ "bar();"
+ "bar();"
+ "bar();"
+ "%OptimizeFunctionOnNextCall(bar);"
+ "bar();");
+
+ Handle<JSFunction> bar =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ CcTest::global()->Get(v8_str("bar"))));
+ code = scope.CloseAndEscape(Handle<Code>(bar->code()));
+ }
+
+ // Now make sure that a gc should get rid of the function
+ for (int i = 0; i < 4; i++) {
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ }
+
+ ASSERT(code->marked_for_deoptimization());
+}
diff --git a/deps/v8/test/cctest/test-javascript-a64.cc b/deps/v8/test/cctest/test-javascript-a64.cc
new file mode 100644
index 0000000000..bd7a2b2851
--- /dev/null
+++ b/deps/v8/test/cctest/test-javascript-a64.cc
@@ -0,0 +1,266 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <limits.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "isolate.h"
+#include "compilation-cache.h"
+#include "execution.h"
+#include "snapshot.h"
+#include "platform.h"
+#include "utils.h"
+#include "cctest.h"
+#include "parser.h"
+#include "unicode-inl.h"
+
+using ::v8::Context;
+using ::v8::Extension;
+using ::v8::Function;
+using ::v8::FunctionTemplate;
+using ::v8::Handle;
+using ::v8::HandleScope;
+using ::v8::Local;
+using ::v8::Message;
+using ::v8::MessageCallback;
+using ::v8::Object;
+using ::v8::ObjectTemplate;
+using ::v8::Persistent;
+using ::v8::Script;
+using ::v8::StackTrace;
+using ::v8::String;
+using ::v8::TryCatch;
+using ::v8::Undefined;
+using ::v8::V8;
+using ::v8::Value;
+
+static void ExpectBoolean(bool expected, Local<Value> result) {
+ CHECK(result->IsBoolean());
+ CHECK_EQ(expected, result->BooleanValue());
+}
+
+
+static void ExpectInt32(int32_t expected, Local<Value> result) {
+ CHECK(result->IsInt32());
+ CHECK_EQ(expected, result->Int32Value());
+}
+
+
+static void ExpectNumber(double expected, Local<Value> result) {
+ CHECK(result->IsNumber());
+ CHECK_EQ(expected, result->NumberValue());
+}
+
+
+static void ExpectUndefined(Local<Value> result) {
+ CHECK(result->IsUndefined());
+}
+
+
+// Tests are sorted by order of implementation.
+
+TEST(simple_value) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun("0x271828;");
+ ExpectInt32(0x271828, result);
+}
+
+
+TEST(global_variable) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun("var my_global_var = 0x123; my_global_var;");
+ ExpectInt32(0x123, result);
+}
+
+
+TEST(simple_function_call) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+ "function foo() { return 0x314; }"
+ "foo();");
+ ExpectInt32(0x314, result);
+}
+
+
+TEST(binary_op) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+ "function foo() {"
+ " var a = 0x1200;"
+ " var b = 0x0035;"
+ " return 2 * (a + b - 1);"
+ "}"
+ "foo();");
+ ExpectInt32(0x2468, result);
+}
+
+static void if_comparison_testcontext_helper(
+ char const * op,
+ char const * lhs,
+ char const * rhs,
+ int expect) {
+ char buffer[256];
+ snprintf(buffer, sizeof(buffer),
+ "var lhs = %s;"
+ "var rhs = %s;"
+ "if ( lhs %s rhs ) { 1; }"
+ "else { 0; }",
+ lhs, rhs, op);
+ Local<Value> result = CompileRun(buffer);
+ ExpectInt32(expect, result);
+}
+
+static void if_comparison_effectcontext_helper(
+ char const * op,
+ char const * lhs,
+ char const * rhs,
+ int expect) {
+ char buffer[256];
+ snprintf(buffer, sizeof(buffer),
+ "var lhs = %s;"
+ "var rhs = %s;"
+ "var test = lhs %s rhs;"
+ "if ( test ) { 1; }"
+ "else { 0; }",
+ lhs, rhs, op);
+ Local<Value> result = CompileRun(buffer);
+ ExpectInt32(expect, result);
+}
+
+static void if_comparison_helper(
+ char const * op,
+ int expect_when_lt,
+ int expect_when_eq,
+ int expect_when_gt) {
+ // TODO(all): Non-SMI tests.
+
+ if_comparison_testcontext_helper(op, "1", "3", expect_when_lt);
+ if_comparison_testcontext_helper(op, "5", "5", expect_when_eq);
+ if_comparison_testcontext_helper(op, "9", "7", expect_when_gt);
+
+ if_comparison_effectcontext_helper(op, "1", "3", expect_when_lt);
+ if_comparison_effectcontext_helper(op, "5", "5", expect_when_eq);
+ if_comparison_effectcontext_helper(op, "9", "7", expect_when_gt);
+}
+
+
+TEST(if_comparison) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ if_comparison_helper("<", 1, 0, 0);
+ if_comparison_helper("<=", 1, 1, 0);
+ if_comparison_helper("==", 0, 1, 0);
+ if_comparison_helper("===", 0, 1, 0);
+ if_comparison_helper(">=", 0, 1, 1);
+ if_comparison_helper(">", 0, 0, 1);
+ if_comparison_helper("!=", 1, 0, 1);
+ if_comparison_helper("!==", 1, 0, 1);
+}
+
+
+TEST(unary_plus) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ // SMI
+ result = CompileRun("var a = 1234; +a");
+ ExpectInt32(1234, result);
+ // Number
+ result = CompileRun("var a = 1234.5; +a");
+ ExpectNumber(1234.5, result);
+ // String (SMI)
+ result = CompileRun("var a = '1234'; +a");
+ ExpectInt32(1234, result);
+ // String (Number)
+ result = CompileRun("var a = '1234.5'; +a");
+ ExpectNumber(1234.5, result);
+ // Check side effects.
+ result = CompileRun("var a = 1234; +(a = 4321); a");
+ ExpectInt32(4321, result);
+}
+
+
+TEST(unary_minus) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ result = CompileRun("var a = 1234; -a");
+ ExpectInt32(-1234, result);
+ result = CompileRun("var a = 1234.5; -a");
+ ExpectNumber(-1234.5, result);
+ result = CompileRun("var a = 1234; -(a = 4321); a");
+ ExpectInt32(4321, result);
+ result = CompileRun("var a = '1234'; -a");
+ ExpectInt32(-1234, result);
+ result = CompileRun("var a = '1234.5'; -a");
+ ExpectNumber(-1234.5, result);
+}
+
+
+TEST(unary_void) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ result = CompileRun("var a = 1234; void (a);");
+ ExpectUndefined(result);
+ result = CompileRun("var a = 0; void (a = 42); a");
+ ExpectInt32(42, result);
+ result = CompileRun("var a = 0; void (a = 42);");
+ ExpectUndefined(result);
+}
+
+
+TEST(unary_not) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ result = CompileRun("var a = 1234; !a");
+ ExpectBoolean(false, result);
+ result = CompileRun("var a = 0; !a");
+ ExpectBoolean(true, result);
+ result = CompileRun("var a = 0; !(a = 1234); a");
+ ExpectInt32(1234, result);
+ result = CompileRun("var a = '1234'; !a");
+ ExpectBoolean(false, result);
+ result = CompileRun("var a = ''; !a");
+ ExpectBoolean(true, result);
+ result = CompileRun("var a = 1234; !!a");
+ ExpectBoolean(true, result);
+ result = CompileRun("var a = 0; !!a");
+ ExpectBoolean(false, result);
+ result = CompileRun("var a = 0; if ( !a ) { 1; } else { 0; }");
+ ExpectInt32(1, result);
+ result = CompileRun("var a = 1; if ( !a ) { 1; } else { 0; }");
+ ExpectInt32(0, result);
+}
diff --git a/deps/v8/test/cctest/test-js-a64-variables.cc b/deps/v8/test/cctest/test-js-a64-variables.cc
new file mode 100644
index 0000000000..df3f4a8295
--- /dev/null
+++ b/deps/v8/test/cctest/test-js-a64-variables.cc
@@ -0,0 +1,143 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Adapted from test/mjsunit/compiler/variables.js
+
+#include <limits.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "isolate.h"
+#include "compilation-cache.h"
+#include "execution.h"
+#include "snapshot.h"
+#include "platform.h"
+#include "utils.h"
+#include "cctest.h"
+#include "parser.h"
+#include "unicode-inl.h"
+
+using ::v8::Context;
+using ::v8::Extension;
+using ::v8::Function;
+using ::v8::FunctionTemplate;
+using ::v8::Handle;
+using ::v8::HandleScope;
+using ::v8::Local;
+using ::v8::Message;
+using ::v8::MessageCallback;
+using ::v8::Object;
+using ::v8::ObjectTemplate;
+using ::v8::Persistent;
+using ::v8::Script;
+using ::v8::StackTrace;
+using ::v8::String;
+using ::v8::TryCatch;
+using ::v8::Undefined;
+using ::v8::V8;
+using ::v8::Value;
+
+static void ExpectInt32(int32_t expected, Local<Value> result) {
+ CHECK(result->IsInt32());
+ CHECK_EQ(expected, result->Int32Value());
+}
+
+
+// Global variables.
+TEST(global_variables) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"var x = 0;"
+"function f0() { return x; }"
+"f0();");
+ ExpectInt32(0, result);
+}
+
+
+// Parameters.
+TEST(parameters) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f1(x) { return x; }"
+"f1(1);");
+ ExpectInt32(1, result);
+}
+
+
+// Stack-allocated locals.
+TEST(stack_allocated_locals) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f2() { var x = 2; return x; }"
+"f2();");
+ ExpectInt32(2, result);
+}
+
+
+// Context-allocated locals. Local function forces x into f3's context.
+TEST(context_allocated_locals) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f3(x) {"
+" function g() { return x; }"
+" return x;"
+"}"
+"f3(3);");
+ ExpectInt32(3, result);
+}
+
+
+// Local function reads x from an outer context.
+TEST(read_from_outer_context) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f4(x) {"
+" function g() { return x; }"
+" return g();"
+"}"
+"f4(4);");
+ ExpectInt32(4, result);
+}
+
+
+// Local function reads x from an outer context.
+TEST(lookup_slots) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f5(x) {"
+" with ({}) return x;"
+"}"
+"f5(5);");
+ ExpectInt32(5, result);
+}
diff --git a/deps/v8/test/cctest/test-libplatform-task-queue.cc b/deps/v8/test/cctest/test-libplatform-task-queue.cc
new file mode 100644
index 0000000000..4765515763
--- /dev/null
+++ b/deps/v8/test/cctest/test-libplatform-task-queue.cc
@@ -0,0 +1,95 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "libplatform/task-queue.h"
+#include "test-libplatform.h"
+
+using namespace v8::internal;
+
+
+TEST(TaskQueueBasic) {
+ TaskCounter task_counter;
+
+ TaskQueue queue;
+
+ TestTask* task = new TestTask(&task_counter);
+ queue.Append(task);
+ CHECK_EQ(1, task_counter.GetCount());
+ CHECK_EQ(task, queue.GetNext());
+ delete task;
+ CHECK_EQ(0, task_counter.GetCount());
+
+ queue.Terminate();
+ CHECK_EQ(NULL, queue.GetNext());
+}
+
+
+class ReadQueueTask : public TestTask {
+ public:
+ ReadQueueTask(TaskCounter* task_counter, TaskQueue* queue)
+ : TestTask(task_counter, true), queue_(queue) {}
+ virtual ~ReadQueueTask() {}
+
+ virtual void Run() V8_OVERRIDE {
+ TestTask::Run();
+ CHECK_EQ(NULL, queue_->GetNext());
+ }
+
+ private:
+ TaskQueue* queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadQueueTask);
+};
+
+
+TEST(TaskQueueTerminateMultipleReaders) {
+ TaskQueue queue;
+ TaskCounter task_counter;
+ ReadQueueTask* read1 = new ReadQueueTask(&task_counter, &queue);
+ ReadQueueTask* read2 = new ReadQueueTask(&task_counter, &queue);
+
+ TestWorkerThread thread1(read1);
+ TestWorkerThread thread2(read2);
+
+ thread1.Start();
+ thread2.Start();
+
+ CHECK_EQ(2, task_counter.GetCount());
+
+ thread1.Signal();
+ thread2.Signal();
+
+ queue.Terminate();
+
+ thread1.Join();
+ thread2.Join();
+
+ CHECK_EQ(0, task_counter.GetCount());
+}
diff --git a/deps/v8/test/mjsunit/proto-poison.js b/deps/v8/test/cctest/test-libplatform-worker-thread.cc
index ca3b5d6d06..090d6e1a18 100644
--- a/deps/v8/test/mjsunit/proto-poison.js
+++ b/deps/v8/test/cctest/test-libplatform-worker-thread.cc
@@ -25,21 +25,40 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Check that the __proto__ accessor is properly poisoned when extracted
-// from Object.prototype using the property descriptor.
-var desc = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
-assertEquals("function", typeof desc.get);
-assertEquals("function", typeof desc.set);
-assertDoesNotThrow("desc.get.call({})");
-assertThrows("desc.set.call({})", TypeError);
-
-// Check that any redefinition of the __proto__ accessor causes poising
-// to cease and the accessor to be extracted normally.
-Object.defineProperty(Object.prototype, "__proto__", { get:function(){} });
-desc = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
-assertDoesNotThrow("desc.get.call({})");
-assertThrows("desc.set.call({})", TypeError);
-Object.defineProperty(Object.prototype, "__proto__", { set:function(x){} });
-desc = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
-assertDoesNotThrow("desc.get.call({})");
-assertDoesNotThrow("desc.set.call({})");
+#include "v8.h"
+
+#include "cctest.h"
+#include "libplatform/task-queue.h"
+#include "libplatform/worker-thread.h"
+#include "test-libplatform.h"
+
+using namespace v8::internal;
+
+
+TEST(WorkerThread) {
+ TaskQueue queue;
+ TaskCounter task_counter;
+
+ TestTask* task1 = new TestTask(&task_counter, true);
+ TestTask* task2 = new TestTask(&task_counter, true);
+ TestTask* task3 = new TestTask(&task_counter, true);
+ TestTask* task4 = new TestTask(&task_counter, true);
+
+ WorkerThread* thread1 = new WorkerThread(&queue);
+ WorkerThread* thread2 = new WorkerThread(&queue);
+
+ CHECK_EQ(4, task_counter.GetCount());
+
+ queue.Append(task1);
+ queue.Append(task2);
+ queue.Append(task3);
+ queue.Append(task4);
+
+ // TaskQueue ASSERTs that it is empty in its destructor.
+ queue.Terminate();
+
+ delete thread1;
+ delete thread2;
+
+ CHECK_EQ(0, task_counter.GetCount());
+}
diff --git a/deps/v8/test/cctest/test-libplatform.h b/deps/v8/test/cctest/test-libplatform.h
new file mode 100644
index 0000000000..e32770eeda
--- /dev/null
+++ b/deps/v8/test/cctest/test-libplatform.h
@@ -0,0 +1,120 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef TEST_LIBPLATFORM_H_
+#define TEST_LIBPLATFORM_H_
+
+#include "v8.h"
+
+#include "cctest.h"
+
+using namespace v8::internal;
+
+class TaskCounter {
+ public:
+ TaskCounter() : counter_(0) {}
+ ~TaskCounter() { CHECK_EQ(0, counter_); }
+
+ int GetCount() const {
+ LockGuard<Mutex> guard(&lock_);
+ return counter_;
+ }
+
+ void Inc() {
+ LockGuard<Mutex> guard(&lock_);
+ ++counter_;
+ }
+
+ void Dec() {
+ LockGuard<Mutex> guard(&lock_);
+ --counter_;
+ }
+
+ private:
+ mutable Mutex lock_;
+ int counter_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskCounter);
+};
+
+
+class TestTask : public v8::Task {
+ public:
+ TestTask(TaskCounter* task_counter, bool expected_to_run)
+ : task_counter_(task_counter),
+ expected_to_run_(expected_to_run),
+ executed_(false) {
+ task_counter_->Inc();
+ }
+
+ explicit TestTask(TaskCounter* task_counter)
+ : task_counter_(task_counter), expected_to_run_(false), executed_(false) {
+ task_counter_->Inc();
+ }
+
+ virtual ~TestTask() {
+ CHECK_EQ(expected_to_run_, executed_);
+ task_counter_->Dec();
+ }
+
+ // v8::Task implementation.
+ virtual void Run() V8_OVERRIDE { executed_ = true; }
+
+ private:
+ TaskCounter* task_counter_;
+ bool expected_to_run_;
+ bool executed_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestTask);
+};
+
+
+class TestWorkerThread : public Thread {
+ public:
+ explicit TestWorkerThread(v8::Task* task)
+ : Thread("libplatform TestWorkerThread"), semaphore_(0), task_(task) {}
+ virtual ~TestWorkerThread() {}
+
+ void Signal() { semaphore_.Signal(); }
+
+ // Thread implementation.
+ virtual void Run() V8_OVERRIDE {
+ semaphore_.Wait();
+ if (task_) {
+ task_->Run();
+ delete task_;
+ }
+ }
+
+ private:
+ Semaphore semaphore_;
+ v8::Task* task_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestWorkerThread);
+};
+
+#endif // TEST_LIBPLATFORM_H_
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index 1094512276..12b35a5c4d 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -667,7 +667,7 @@ TEST(Regress1433) {
v8::HandleScope handle_scope(isolate);
v8::Handle<Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- v8::Handle<String> source = v8::String::New("1+1");
+ v8::Handle<String> source = v8::String::NewFromUtf8(isolate, "1+1");
v8::Handle<Script> script = v8::Script::Compile(source);
v8::Handle<Value> result = script->Run();
v8::String::Utf8Value utf8(result);
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 4a0717d09d..5b6858e553 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -38,6 +38,7 @@
#include "isolate.h"
#include "log.h"
#include "sampler.h"
+#include "trace-extension.h"
#include "vm-state-inl.h"
using v8::Function;
@@ -52,135 +53,9 @@ using v8::internal::Address;
using v8::internal::Handle;
using v8::internal::Isolate;
using v8::internal::JSFunction;
-using v8::internal::RegisterState;
using v8::internal::TickSample;
-static struct {
- TickSample* sample;
-} trace_env = { NULL };
-
-
-static void InitTraceEnv(TickSample* sample) {
- trace_env.sample = sample;
-}
-
-
-static void DoTrace(Address fp) {
- RegisterState regs;
- regs.fp = fp;
- // sp is only used to define stack high bound
- regs.sp =
- reinterpret_cast<Address>(trace_env.sample) - 10240;
- trace_env.sample->Init(CcTest::i_isolate(), regs);
-}
-
-
-// Hide c_entry_fp to emulate situation when sampling is done while
-// pure JS code is being executed
-static void DoTraceHideCEntryFPAddress(Address fp) {
- v8::internal::Address saved_c_frame_fp =
- *(CcTest::i_isolate()->c_entry_fp_address());
- CHECK(saved_c_frame_fp);
- *(CcTest::i_isolate()->c_entry_fp_address()) = 0;
- DoTrace(fp);
- *(CcTest::i_isolate()->c_entry_fp_address()) = saved_c_frame_fp;
-}
-
-
-// --- T r a c e E x t e n s i o n ---
-
-class TraceExtension : public v8::Extension {
- public:
- TraceExtension() : v8::Extension("v8/trace", kSource) { }
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<String> name);
- static void Trace(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void JSTrace(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void JSEntrySP(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void JSEntrySPLevel2(const v8::FunctionCallbackInfo<v8::Value>& args);
- private:
- static Address GetFP(const v8::FunctionCallbackInfo<v8::Value>& args);
- static const char* kSource;
-};
-
-
-const char* TraceExtension::kSource =
- "native function trace();"
- "native function js_trace();"
- "native function js_entry_sp();"
- "native function js_entry_sp_level2();";
-
-v8::Handle<v8::FunctionTemplate> TraceExtension::GetNativeFunction(
- v8::Handle<String> name) {
- if (name->Equals(String::New("trace"))) {
- return v8::FunctionTemplate::New(TraceExtension::Trace);
- } else if (name->Equals(String::New("js_trace"))) {
- return v8::FunctionTemplate::New(TraceExtension::JSTrace);
- } else if (name->Equals(String::New("js_entry_sp"))) {
- return v8::FunctionTemplate::New(TraceExtension::JSEntrySP);
- } else if (name->Equals(String::New("js_entry_sp_level2"))) {
- return v8::FunctionTemplate::New(TraceExtension::JSEntrySPLevel2);
- } else {
- CHECK(false);
- return v8::Handle<v8::FunctionTemplate>();
- }
-}
-
-
-Address TraceExtension::GetFP(const v8::FunctionCallbackInfo<v8::Value>& args) {
- // Convert frame pointer from encoding as smis in the arguments to a pointer.
- CHECK_EQ(2, args.Length()); // Ignore second argument on 32-bit platform.
-#if defined(V8_HOST_ARCH_32_BIT)
- Address fp = *reinterpret_cast<Address*>(*args[0]);
-#elif defined(V8_HOST_ARCH_64_BIT)
- int64_t low_bits = *reinterpret_cast<uint64_t*>(*args[0]) >> 32;
- int64_t high_bits = *reinterpret_cast<uint64_t*>(*args[1]);
- Address fp = reinterpret_cast<Address>(high_bits | low_bits);
-#else
-#error Host architecture is neither 32-bit nor 64-bit.
-#endif
- printf("Trace: %p\n", fp);
- return fp;
-}
-
-
-void TraceExtension::Trace(const v8::FunctionCallbackInfo<v8::Value>& args) {
- DoTrace(GetFP(args));
-}
-
-
-void TraceExtension::JSTrace(const v8::FunctionCallbackInfo<v8::Value>& args) {
- DoTraceHideCEntryFPAddress(GetFP(args));
-}
-
-
-static Address GetJsEntrySp() {
- CHECK_NE(NULL, CcTest::i_isolate()->thread_local_top());
- return CcTest::i_isolate()->js_entry_sp();
-}
-
-
-void TraceExtension::JSEntrySP(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK_NE(0, GetJsEntrySp());
-}
-
-
-void TraceExtension::JSEntrySPLevel2(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::HandleScope scope(args.GetIsolate());
- const Address js_entry_sp = GetJsEntrySp();
- CHECK_NE(0, js_entry_sp);
- CompileRun("js_entry_sp();");
- CHECK_EQ(js_entry_sp, GetJsEntrySp());
-}
-
-
-static TraceExtension kTraceExtension;
-v8::DeclareExtension kTraceExtensionDeclaration(&kTraceExtension);
-
-
static bool IsAddressWithinFuncCode(JSFunction* function, Address addr) {
i::Code* code = function->code();
return code->contains(addr);
@@ -230,7 +105,7 @@ static void construct_call(const v8::FunctionCallbackInfo<v8::Value>& args) {
void CreateFramePointerGrabberConstructor(v8::Local<v8::Context> context,
const char* constructor_name) {
Local<v8::FunctionTemplate> constructor_template =
- v8::FunctionTemplate::New(construct_call);
+ v8::FunctionTemplate::New(context->GetIsolate(), construct_call);
constructor_template->SetClassName(v8_str("FPGrabber"));
Local<Function> fun = constructor_template->GetFunction();
context->Global()->Set(v8_str(constructor_name), fun);
@@ -269,7 +144,7 @@ TEST(CFromJSStackTrace) {
i::FLAG_use_inlining = false;
TickSample sample;
- InitTraceEnv(&sample);
+ i::TraceExtension::InitTraceEnv(&sample);
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
@@ -294,7 +169,7 @@ TEST(CFromJSStackTrace) {
// TickSample::Trace
CHECK(sample.has_external_callback);
- CHECK_EQ(FUNCTION_ADDR(TraceExtension::Trace), sample.external_callback);
+ CHECK_EQ(FUNCTION_ADDR(i::TraceExtension::Trace), sample.external_callback);
// Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace"
int base = 0;
@@ -317,7 +192,7 @@ TEST(PureJSStackTrace) {
i::FLAG_use_inlining = false;
TickSample sample;
- InitTraceEnv(&sample);
+ i::TraceExtension::InitTraceEnv(&sample);
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
@@ -347,7 +222,7 @@ TEST(PureJSStackTrace) {
//
CHECK(sample.has_external_callback);
- CHECK_EQ(FUNCTION_ADDR(TraceExtension::JSTrace), sample.external_callback);
+ CHECK_EQ(FUNCTION_ADDR(i::TraceExtension::JSTrace), sample.external_callback);
// Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
int base = 0;
@@ -369,7 +244,7 @@ static void CFuncDoTrace(byte dummy_parameter) {
#else
#error Unexpected platform.
#endif
- DoTrace(fp);
+ i::TraceExtension::DoTrace(fp);
}
@@ -388,7 +263,7 @@ static int CFunc(int depth) {
// get any meaningful info here.
TEST(PureCStackTrace) {
TickSample sample;
- InitTraceEnv(&sample);
+ i::TraceExtension::InitTraceEnv(&sample);
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
v8::Context::Scope context_scope(context);
@@ -401,11 +276,11 @@ TEST(JsEntrySp) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
v8::Context::Scope context_scope(context);
- CHECK_EQ(0, GetJsEntrySp());
+ CHECK_EQ(0, i::TraceExtension::GetJsEntrySp());
CompileRun("a = 1; b = a + 1;");
- CHECK_EQ(0, GetJsEntrySp());
+ CHECK_EQ(0, i::TraceExtension::GetJsEntrySp());
CompileRun("js_entry_sp();");
- CHECK_EQ(0, GetJsEntrySp());
+ CHECK_EQ(0, i::TraceExtension::GetJsEntrySp());
CompileRun("js_entry_sp_level2();");
- CHECK_EQ(0, GetJsEntrySp());
+ CHECK_EQ(0, i::TraceExtension::GetJsEntrySp());
}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 2cf2a77445..65310369cb 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -203,7 +203,7 @@ class LoopingNonJsThread : public LoopingThread {
double i = 10;
SignalRunning();
while (IsRunning()) {
- i = sin(i);
+ i = std::sin(i);
i::OS::Sleep(1);
}
}
@@ -305,9 +305,11 @@ TEST(Issue23768) {
env->Enter();
SimpleExternalString source_ext_str("(function ext() {})();");
- v8::Local<v8::String> source = v8::String::NewExternal(&source_ext_str);
+ v8::Local<v8::String> source =
+ v8::String::NewExternal(CcTest::isolate(), &source_ext_str);
// Script needs to have a name in order to trigger InitLineEnds execution.
- v8::Handle<v8::String> origin = v8::String::New("issue-23768-test");
+ v8::Handle<v8::String> origin =
+ v8::String::NewFromUtf8(CcTest::isolate(), "issue-23768-test");
v8::Handle<v8::Script> evil_script = v8::Script::Compile(source, origin);
CHECK(!evil_script.IsEmpty());
CHECK(!evil_script->Run().IsEmpty());
@@ -327,17 +329,20 @@ static void ObjMethod1(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(LogCallbacks) {
+ v8::Isolate* isolate = CcTest::isolate();
ScopedLoggerInitializer initialize_logger;
Logger* logger = initialize_logger.logger();
v8::Local<v8::FunctionTemplate> obj =
- v8::Local<v8::FunctionTemplate>::New(CcTest::isolate(),
- v8::FunctionTemplate::New());
+ v8::Local<v8::FunctionTemplate>::New(isolate,
+ v8::FunctionTemplate::New(isolate));
obj->SetClassName(v8_str("Obj"));
v8::Handle<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
- v8::Local<v8::Signature> signature = v8::Signature::New(obj);
+ v8::Local<v8::Signature> signature =
+ v8::Signature::New(isolate, obj);
proto->Set(v8_str("method1"),
- v8::FunctionTemplate::New(ObjMethod1,
+ v8::FunctionTemplate::New(isolate,
+ ObjMethod1,
v8::Handle<v8::Value>(),
signature),
static_cast<v8::PropertyAttribute>(v8::DontDelete));
@@ -358,6 +363,7 @@ TEST(LogCallbacks) {
ObjMethod1);
CHECK_NE(NULL, StrNStr(log.start(), ref_data.start(), log.length()));
+ log.Dispose();
}
@@ -376,12 +382,13 @@ static void Prop2Getter(v8::Local<v8::String> property,
TEST(LogAccessorCallbacks) {
+ v8::Isolate* isolate = CcTest::isolate();
ScopedLoggerInitializer initialize_logger;
Logger* logger = initialize_logger.logger();
v8::Local<v8::FunctionTemplate> obj =
- v8::Local<v8::FunctionTemplate>::New(CcTest::isolate(),
- v8::FunctionTemplate::New());
+ v8::Local<v8::FunctionTemplate>::New(isolate,
+ v8::FunctionTemplate::New(isolate));
obj->SetClassName(v8_str("Obj"));
v8::Handle<v8::ObjectTemplate> inst = obj->InstanceTemplate();
inst->SetAccessor(v8_str("prop1"), Prop1Getter, Prop1Setter);
@@ -414,6 +421,7 @@ TEST(LogAccessorCallbacks) {
Prop2Getter);
CHECK_NE(NULL,
StrNStr(log.start(), prop2_getter_record.start(), log.length()));
+ log.Dispose();
}
@@ -451,12 +459,14 @@ TEST(EquivalenceOfLoggingAndTraversal) {
i::Vector<const char> log(
i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
CHECK(exists);
- v8::Handle<v8::String> log_str = v8::String::New(log.start(), log.length());
+ v8::Handle<v8::String> log_str = v8::String::NewFromUtf8(
+ CcTest::isolate(), log.start(), v8::String::kNormalString, log.length());
initialize_logger.env()->Global()->Set(v8_str("_log"), log_str);
i::Vector<const unsigned char> source = TestSources::GetScriptsSource();
- v8::Handle<v8::String> source_str = v8::String::New(
- reinterpret_cast<const char*>(source.start()), source.length());
+ v8::Handle<v8::String> source_str = v8::String::NewFromUtf8(
+ CcTest::isolate(), reinterpret_cast<const char*>(source.start()),
+ v8::String::kNormalString, source.length());
v8::TryCatch try_catch;
v8::Handle<v8::Script> script = v8::Script::Compile(source_str, v8_str(""));
if (script.IsEmpty()) {
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 77f7abbd44..d40b8a50c0 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -132,5 +132,101 @@ TEST(CopyBytes) {
}
+typedef int (*F5)(void*, void*, void*, void*, void*);
+
+
+TEST(LoadAndStoreWithRepresentation) {
+ v8::internal::V8::Initialize(NULL);
+
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
+ __ sub(sp, sp, Operand(1 * kPointerSize));
+ Label exit;
+
+ // Test 1.
+ __ mov(r0, Operand(1)); // Test number.
+ __ mov(r1, Operand(0));
+ __ str(r1, MemOperand(sp, 0 * kPointerSize));
+ __ mov(r2, Operand(-1));
+ __ Store(r2, MemOperand(sp, 0 * kPointerSize), Representation::UInteger8());
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+ __ mov(r2, Operand(255));
+ __ cmp(r3, r2);
+ __ b(ne, &exit);
+ __ mov(r2, Operand(255));
+ __ Load(r3, MemOperand(sp, 0 * kPointerSize), Representation::UInteger8());
+ __ cmp(r3, r2);
+ __ b(ne, &exit);
+
+ // Test 2.
+ __ mov(r0, Operand(2)); // Test number.
+ __ mov(r1, Operand(0));
+ __ str(r1, MemOperand(sp, 0 * kPointerSize));
+ __ mov(r2, Operand(-1));
+ __ Store(r2, MemOperand(sp, 0 * kPointerSize), Representation::Integer8());
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+ __ mov(r2, Operand(255));
+ __ cmp(r3, r2);
+ __ b(ne, &exit);
+ __ mov(r2, Operand(-1));
+ __ Load(r3, MemOperand(sp, 0 * kPointerSize), Representation::Integer8());
+ __ cmp(r3, r2);
+ __ b(ne, &exit);
+
+ // Test 3.
+ __ mov(r0, Operand(3)); // Test number.
+ __ mov(r1, Operand(0));
+ __ str(r1, MemOperand(sp, 0 * kPointerSize));
+ __ mov(r2, Operand(-1));
+ __ Store(r2, MemOperand(sp, 0 * kPointerSize), Representation::UInteger16());
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+ __ mov(r2, Operand(65535));
+ __ cmp(r3, r2);
+ __ b(ne, &exit);
+ __ mov(r2, Operand(65535));
+ __ Load(r3, MemOperand(sp, 0 * kPointerSize), Representation::UInteger16());
+ __ cmp(r3, r2);
+ __ b(ne, &exit);
+
+ // Test 4.
+ __ mov(r0, Operand(4)); // Test number.
+ __ mov(r1, Operand(0));
+ __ str(r1, MemOperand(sp, 0 * kPointerSize));
+ __ mov(r2, Operand(-1));
+ __ Store(r2, MemOperand(sp, 0 * kPointerSize), Representation::Integer16());
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+ __ mov(r2, Operand(65535));
+ __ cmp(r3, r2);
+ __ b(ne, &exit);
+ __ mov(r2, Operand(-1));
+ __ Load(r3, MemOperand(sp, 0 * kPointerSize), Representation::Integer16());
+ __ cmp(r3, r2);
+ __ b(ne, &exit);
+
+ __ mov(r0, Operand(0)); // Success.
+ __ bind(&exit);
+ __ add(sp, sp, Operand(1 * kPointerSize));
+ __ bx(lr);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+
+ // Call the function from C++.
+ F5 f = FUNCTION_CAST<F5>(Code::cast(code)->entry());
+ CHECK_EQ(0, CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+}
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-ia32.cc b/deps/v8/test/cctest/test-macro-assembler-ia32.cc
new file mode 100644
index 0000000000..38c738f1d0
--- /dev/null
+++ b/deps/v8/test/cctest/test-macro-assembler-ia32.cc
@@ -0,0 +1,139 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "factory.h"
+#include "platform.h"
+#include "serialize.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+#if __GNUC__
+#define STDCALL __attribute__((stdcall))
+#else
+#define STDCALL __stdcall
+#endif
+
+typedef int STDCALL F0Type();
+typedef F0Type* F0;
+
+#define __ masm->
+
+
+TEST(LoadAndStoreWithRepresentation) {
+ v8::internal::V8::Initialize(NULL);
+
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
+ __ push(ebx);
+ __ push(edx);
+ __ sub(esp, Immediate(1 * kPointerSize));
+ Label exit;
+
+ // Test 1.
+ __ mov(eax, Immediate(1)); // Test number.
+ __ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
+ __ mov(ebx, Immediate(-1));
+ __ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::UInteger8());
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ mov(edx, Immediate(255));
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+ __ Load(ebx, Operand(esp, 0 * kPointerSize), Representation::UInteger8());
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+
+
+ // Test 2.
+ __ mov(eax, Immediate(2)); // Test number.
+ __ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
+ __ mov(ebx, Immediate(-1));
+ __ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::Integer8());
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ mov(edx, Immediate(255));
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+ __ Load(ebx, Operand(esp, 0 * kPointerSize), Representation::Integer8());
+ __ mov(edx, Immediate(-1));
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+
+ // Test 3.
+ __ mov(eax, Immediate(3)); // Test number.
+ __ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
+ __ mov(ebx, Immediate(-1));
+ __ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::Integer16());
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ mov(edx, Immediate(65535));
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+ __ Load(edx, Operand(esp, 0 * kPointerSize), Representation::Integer16());
+ __ mov(ebx, Immediate(-1));
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+
+ // Test 4.
+ __ mov(eax, Immediate(4)); // Test number.
+ __ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
+ __ mov(ebx, Immediate(-1));
+ __ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::UInteger16());
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ mov(edx, Immediate(65535));
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+ __ Load(edx, Operand(esp, 0 * kPointerSize), Representation::UInteger16());
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+
+ __ xor_(eax, eax); // Success.
+ __ bind(&exit);
+ __ add(esp, Immediate(1 * kPointerSize));
+ __ pop(edx);
+ __ pop(ebx);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index b200949679..3154aac596 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -132,5 +132,47 @@ TEST(CopyBytes) {
}
+static void TestNaN(const char *code) {
+ // NaN value is different on MIPS and x86 architectures, and TEST(NaNx)
+ // tests checks the case where a x86 NaN value is serialized into the
+ // snapshot on the simulator during cross compilation.
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> context = CcTest::NewContext(PRINT_EXTENSION);
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::Script> script = v8::Script::Compile(v8_str(code));
+ v8::Local<v8::Object> result = v8::Local<v8::Object>::Cast(script->Run());
+ // Have to populate the handle manually, as it's not Cast-able.
+ i::Handle<i::JSObject> o =
+ v8::Utils::OpenHandle<v8::Object, i::JSObject>(result);
+ i::Handle<i::JSArray> array1(reinterpret_cast<i::JSArray*>(*o));
+ i::FixedDoubleArray* a = i::FixedDoubleArray::cast(array1->elements());
+ double value = a->get_scalar(0);
+ CHECK(std::isnan(value) &&
+ i::BitCast<uint64_t>(value) ==
+ i::BitCast<uint64_t>(
+ i::FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+}
+
+
+TEST(NaN0) {
+ TestNaN(
+ "var result;"
+ "for (var i = 0; i < 2; i++) {"
+ " result = new Array(Number.NaN, Number.POSITIVE_INFINITY);"
+ "}"
+ "result;");
+}
+
+
+TEST(NaN1) {
+ TestNaN(
+ "var result;"
+ "for (var i = 0; i < 2; i++) {"
+ " result = [NaN];"
+ "}"
+ "result;");
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 61914b58c3..3daed5b456 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -35,50 +35,53 @@
#include "serialize.h"
#include "cctest.h"
-using v8::internal::Assembler;
-using v8::internal::CodeDesc;
-using v8::internal::Condition;
-using v8::internal::FUNCTION_CAST;
-using v8::internal::HandleScope;
-using v8::internal::Immediate;
-using v8::internal::Isolate;
-using v8::internal::Label;
-using v8::internal::MacroAssembler;
-using v8::internal::OS;
-using v8::internal::Operand;
-using v8::internal::RelocInfo;
-using v8::internal::Representation;
-using v8::internal::Smi;
-using v8::internal::SmiIndex;
-using v8::internal::byte;
-using v8::internal::carry;
-using v8::internal::greater;
-using v8::internal::greater_equal;
-using v8::internal::kIntSize;
-using v8::internal::kPointerSize;
-using v8::internal::kSmiTagMask;
-using v8::internal::kSmiValueSize;
-using v8::internal::less_equal;
-using v8::internal::negative;
-using v8::internal::not_carry;
-using v8::internal::not_equal;
-using v8::internal::not_zero;
-using v8::internal::positive;
-using v8::internal::r11;
-using v8::internal::r13;
-using v8::internal::r14;
-using v8::internal::r15;
-using v8::internal::r8;
-using v8::internal::r9;
-using v8::internal::rax;
-using v8::internal::rbp;
-using v8::internal::rbx;
-using v8::internal::rcx;
-using v8::internal::rdi;
-using v8::internal::rdx;
-using v8::internal::rsi;
-using v8::internal::rsp;
-using v8::internal::times_pointer_size;
+namespace i = v8::internal;
+using i::Address;
+using i::Assembler;
+using i::CodeDesc;
+using i::Condition;
+using i::FUNCTION_CAST;
+using i::HandleScope;
+using i::Immediate;
+using i::Isolate;
+using i::Label;
+using i::MacroAssembler;
+using i::OS;
+using i::Operand;
+using i::RelocInfo;
+using i::Representation;
+using i::Smi;
+using i::SmiIndex;
+using i::byte;
+using i::carry;
+using i::greater;
+using i::greater_equal;
+using i::kIntSize;
+using i::kPointerSize;
+using i::kSmiTagMask;
+using i::kSmiValueSize;
+using i::less_equal;
+using i::negative;
+using i::not_carry;
+using i::not_equal;
+using i::equal;
+using i::not_zero;
+using i::positive;
+using i::r11;
+using i::r13;
+using i::r14;
+using i::r15;
+using i::r8;
+using i::r9;
+using i::rax;
+using i::rbp;
+using i::rbx;
+using i::rcx;
+using i::rdi;
+using i::rdx;
+using i::rsi;
+using i::rsp;
+using i::times_pointer_size;
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
@@ -96,8 +99,8 @@ typedef int (*F0)();
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
- __ push(v8::internal::kSmiConstantRegister);
- __ push(v8::internal::kRootRegister);
+ __ push(i::kSmiConstantRegister);
+ __ push(i::kRootRegister);
__ InitializeSmiConstantRegister();
__ InitializeRootRegister();
}
@@ -106,11 +109,11 @@ static void EntryCode(MacroAssembler* masm) {
static void ExitCode(MacroAssembler* masm) {
// Return -1 if kSmiConstantRegister was clobbered during the test.
__ Move(rdx, Smi::FromInt(1));
- __ cmpq(rdx, v8::internal::kSmiConstantRegister);
+ __ cmpq(rdx, i::kSmiConstantRegister);
__ movq(rdx, Immediate(-1));
__ cmovq(not_equal, rax, rdx);
- __ pop(v8::internal::kRootRegister);
- __ pop(v8::internal::kSmiConstantRegister);
+ __ pop(i::kRootRegister);
+ __ pop(i::kSmiConstantRegister);
}
@@ -151,7 +154,7 @@ static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
// Test that we can move a Smi value literally into a register.
TEST(SmiMove) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
@@ -162,7 +165,6 @@ TEST(SmiMove) {
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -239,7 +241,7 @@ void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) {
// Test that we can compare smis for equality (and more).
TEST(SmiCompare) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
@@ -252,7 +254,6 @@ TEST(SmiCompare) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -291,7 +292,7 @@ TEST(SmiCompare) {
TEST(Integer32ToSmi) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
@@ -303,7 +304,6 @@ TEST(Integer32ToSmi) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -402,7 +402,7 @@ void TestI64PlusConstantToSmi(MacroAssembler* masm,
ASSERT(Smi::IsValid(result));
__ movl(rax, Immediate(id));
__ Move(r8, Smi::FromInt(static_cast<int>(result)));
- __ movq(rcx, x, RelocInfo::NONE64);
+ __ movq(rcx, x);
__ movq(r11, rcx);
__ Integer64PlusConstantToSmi(rdx, rcx, y);
__ cmpq(rdx, r8);
@@ -420,7 +420,7 @@ void TestI64PlusConstantToSmi(MacroAssembler* masm,
TEST(Integer64PlusConstantToSmi) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
@@ -432,7 +432,6 @@ TEST(Integer64PlusConstantToSmi) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -465,7 +464,7 @@ TEST(Integer64PlusConstantToSmi) {
TEST(SmiCheck) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
@@ -477,7 +476,6 @@ TEST(SmiCheck) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
Condition cond;
@@ -713,7 +711,7 @@ void TestSmiNeg(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(SmiNeg) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
@@ -726,7 +724,6 @@ TEST(SmiNeg) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -770,7 +767,7 @@ static void SmiAddTest(MacroAssembler* masm,
__ j(not_equal, exit);
__ incq(rax);
- __ SmiAdd(rcx, rcx, rdx, exit); \
+ __ SmiAdd(rcx, rcx, rdx, exit);
__ cmpq(rcx, r8);
__ j(not_equal, exit);
@@ -789,13 +786,30 @@ static void SmiAddTest(MacroAssembler* masm,
__ movl(rcx, Immediate(first));
__ Integer32ToSmi(rcx, rcx);
+ i::SmiOperationExecutionMode mode;
+ mode.Add(i::PRESERVE_SOURCE_REGISTER);
+ mode.Add(i::BAILOUT_ON_OVERFLOW);
__ incq(rax);
- __ SmiAddConstant(r9, rcx, Smi::FromInt(second), exit);
+ __ SmiAddConstant(r9, rcx, Smi::FromInt(second), mode, exit);
__ cmpq(r9, r8);
__ j(not_equal, exit);
__ incq(rax);
- __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), exit);
+ __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), mode, exit);
+ __ cmpq(rcx, r8);
+ __ j(not_equal, exit);
+
+ __ movl(rcx, Immediate(first));
+ __ Integer32ToSmi(rcx, rcx);
+
+ mode.RemoveAll();
+ mode.Add(i::PRESERVE_SOURCE_REGISTER);
+ mode.Add(i::BAILOUT_ON_NO_OVERFLOW);
+ Label done;
+ __ incq(rax);
+ __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), mode, &done);
+ __ jmp(exit);
+ __ bind(&done);
__ cmpq(rcx, r8);
__ j(not_equal, exit);
}
@@ -835,11 +849,14 @@ static void SmiAddOverflowTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
+ i::SmiOperationExecutionMode mode;
+ mode.Add(i::PRESERVE_SOURCE_REGISTER);
+ mode.Add(i::BAILOUT_ON_OVERFLOW);
__ movq(rcx, r11);
{
Label overflow_ok;
__ incq(rax);
- __ SmiAddConstant(r9, rcx, Smi::FromInt(y_min), &overflow_ok);
+ __ SmiAddConstant(r9, rcx, Smi::FromInt(y_min), mode, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -850,7 +867,7 @@ static void SmiAddOverflowTest(MacroAssembler* masm,
{
Label overflow_ok;
__ incq(rax);
- __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_min), &overflow_ok);
+ __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_min), mode, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -886,7 +903,7 @@ static void SmiAddOverflowTest(MacroAssembler* masm,
{
Label overflow_ok;
__ incq(rax);
- __ SmiAddConstant(r9, rcx, Smi::FromInt(y_max), &overflow_ok);
+ __ SmiAddConstant(r9, rcx, Smi::FromInt(y_max), mode, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -894,25 +911,27 @@ static void SmiAddOverflowTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
+ mode.RemoveAll();
+ mode.Add(i::BAILOUT_ON_OVERFLOW);
{
Label overflow_ok;
__ incq(rax);
- __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_max), &overflow_ok);
+ __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_max), mode, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
__ cmpq(rcx, r11);
- __ j(not_equal, exit);
+ __ j(equal, exit);
}
}
TEST(SmiAdd) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
&actual_size,
true));
CHECK(buffer);
@@ -921,7 +940,6 @@ TEST(SmiAdd) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -987,15 +1005,30 @@ static void SmiSubTest(MacroAssembler* masm,
__ cmpq(rcx, r8);
__ j(not_equal, exit);
+ i::SmiOperationExecutionMode mode;
+ mode.Add(i::PRESERVE_SOURCE_REGISTER);
+ mode.Add(i::BAILOUT_ON_OVERFLOW);
__ Move(rcx, Smi::FromInt(first));
-
__ incq(rax); // Test 4.
- __ SmiSubConstant(r9, rcx, Smi::FromInt(second), exit);
- __ cmpq(r9, r8);
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), mode, exit);
+ __ cmpq(rcx, r8);
__ j(not_equal, exit);
+ __ Move(rcx, Smi::FromInt(first));
__ incq(rax); // Test 5.
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), exit);
+ __ SmiSubConstant(r9, rcx, Smi::FromInt(second), mode, exit);
+ __ cmpq(r9, r8);
+ __ j(not_equal, exit);
+
+ mode.RemoveAll();
+ mode.Add(i::PRESERVE_SOURCE_REGISTER);
+ mode.Add(i::BAILOUT_ON_NO_OVERFLOW);
+ __ Move(rcx, Smi::FromInt(first));
+ Label done;
+ __ incq(rax); // Test 6.
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), mode, &done);
+ __ jmp(exit);
+ __ bind(&done);
__ cmpq(rcx, r8);
__ j(not_equal, exit);
}
@@ -1035,11 +1068,15 @@ static void SmiSubOverflowTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
+ i::SmiOperationExecutionMode mode;
+ mode.Add(i::PRESERVE_SOURCE_REGISTER);
+ mode.Add(i::BAILOUT_ON_OVERFLOW);
+
__ movq(rcx, r11);
{
Label overflow_ok;
__ incq(rax);
- __ SmiSubConstant(r9, rcx, Smi::FromInt(y_min), &overflow_ok);
+ __ SmiSubConstant(r9, rcx, Smi::FromInt(y_min), mode, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -1050,7 +1087,7 @@ static void SmiSubOverflowTest(MacroAssembler* masm,
{
Label overflow_ok;
__ incq(rax);
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_min), &overflow_ok);
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_min), mode, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -1086,7 +1123,7 @@ static void SmiSubOverflowTest(MacroAssembler* masm,
{
Label overflow_ok;
__ incq(rax);
- __ SmiSubConstant(r9, rcx, Smi::FromInt(y_max), &overflow_ok);
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), mode, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -1094,25 +1131,28 @@ static void SmiSubOverflowTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
+ mode.RemoveAll();
+ mode.Add(i::BAILOUT_ON_OVERFLOW);
+ __ movq(rcx, r11);
{
Label overflow_ok;
__ incq(rax);
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), &overflow_ok);
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), mode, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
__ cmpq(rcx, r11);
- __ j(not_equal, exit);
+ __ j(equal, exit);
}
}
TEST(SmiSub) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
&actual_size,
true));
CHECK(buffer);
@@ -1121,7 +1161,6 @@ TEST(SmiSub) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -1200,7 +1239,7 @@ void TestSmiMul(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiMul) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
@@ -1212,7 +1251,6 @@ TEST(SmiMul) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -1306,7 +1344,7 @@ void TestSmiDiv(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiDiv) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
@@ -1319,7 +1357,6 @@ TEST(SmiDiv) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -1417,7 +1454,7 @@ void TestSmiMod(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiMod) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
@@ -1430,7 +1467,6 @@ TEST(SmiMod) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -1515,11 +1551,11 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(SmiIndex) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 5,
&actual_size,
true));
CHECK(buffer);
@@ -1528,7 +1564,6 @@ TEST(SmiIndex) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -1585,11 +1620,11 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiSelectNonSmi) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
&actual_size,
true));
CHECK(buffer);
@@ -1598,7 +1633,6 @@ TEST(SmiSelectNonSmi) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false); // Avoid inline checks.
EntryCode(masm);
Label exit;
@@ -1665,11 +1699,11 @@ void TestSmiAnd(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiAnd) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
&actual_size,
true));
CHECK(buffer);
@@ -1678,7 +1712,6 @@ TEST(SmiAnd) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -1747,11 +1780,11 @@ void TestSmiOr(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiOr) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
&actual_size,
true));
CHECK(buffer);
@@ -1760,7 +1793,6 @@ TEST(SmiOr) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -1831,11 +1863,11 @@ void TestSmiXor(MacroAssembler* masm, Label* exit, int id, int x, int y) {
TEST(SmiXor) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
&actual_size,
true));
CHECK(buffer);
@@ -1844,7 +1876,6 @@ TEST(SmiXor) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -1899,7 +1930,7 @@ void TestSmiNot(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(SmiNot) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
@@ -1912,7 +1943,6 @@ TEST(SmiNot) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -1996,11 +2026,11 @@ void TestSmiShiftLeft(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(SmiShiftLeft) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 7,
&actual_size,
true));
CHECK(buffer);
@@ -2009,7 +2039,6 @@ TEST(SmiShiftLeft) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -2103,11 +2132,11 @@ void TestSmiShiftLogicalRight(MacroAssembler* masm,
TEST(SmiShiftLogicalRight) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 5,
&actual_size,
true));
CHECK(buffer);
@@ -2116,7 +2145,6 @@ TEST(SmiShiftLogicalRight) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -2173,11 +2201,11 @@ void TestSmiShiftArithmeticRight(MacroAssembler* masm,
TEST(SmiShiftArithmeticRight) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
&actual_size,
true));
CHECK(buffer);
@@ -2186,7 +2214,6 @@ TEST(SmiShiftArithmeticRight) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -2238,7 +2265,7 @@ void TestPositiveSmiPowerUp(MacroAssembler* masm, Label* exit, int id, int x) {
TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
- v8::internal::V8::Initialize(NULL);
+ i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
@@ -2251,7 +2278,6 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
EntryCode(masm);
Label exit;
@@ -2279,9 +2305,9 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
TEST(OperandOffset) {
- v8::internal::V8::Initialize(NULL);
- int data[256];
- for (int i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
+ i::V8::Initialize(NULL);
+ uint32_t data[256];
+ for (uint32_t i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
// Allocate an executable page of memory.
size_t actual_size;
@@ -2295,7 +2321,6 @@ TEST(OperandOffset) {
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
- masm->set_allow_stub_calls(false);
Label exit;
EntryCode(masm);
@@ -2322,7 +2347,7 @@ TEST(OperandOffset) {
__ lea(r13, Operand(rbp, -3 * kPointerSize));
__ lea(rbx, Operand(rbp, -5 * kPointerSize));
__ movl(rcx, Immediate(2));
- __ movq(r8, reinterpret_cast<uintptr_t>(&data[128]), RelocInfo::NONE64);
+ __ Move(r8, reinterpret_cast<Address>(&data[128]), RelocInfo::NONE64);
__ movl(rax, Immediate(1));
Operand sp0 = Operand(rsp, 0);
@@ -2648,7 +2673,6 @@ TEST(LoadAndStoreWithRepresentation) {
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
- masm->set_allow_stub_calls(false);
EntryCode(masm);
__ subq(rsp, Immediate(1 * kPointerSize));
Label exit;
@@ -2657,12 +2681,12 @@ TEST(LoadAndStoreWithRepresentation) {
__ movq(rax, Immediate(1)); // Test number.
__ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
__ movq(rcx, Immediate(-1));
- __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Byte());
+ __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::UInteger8());
__ movq(rcx, Operand(rsp, 0 * kPointerSize));
__ movl(rdx, Immediate(255));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Byte());
+ __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::UInteger8());
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
@@ -2731,6 +2755,47 @@ TEST(LoadAndStoreWithRepresentation) {
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
+ // Test 7.
+ __ movq(rax, Immediate(7)); // Test number.
+ __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(rcx, Immediate(-1));
+ __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Integer8());
+ __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ movl(rdx, Immediate(255));
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+ __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Integer8());
+ __ movq(rcx, Immediate(-1));
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ // Test 8.
+ __ movq(rax, Immediate(8)); // Test number.
+ __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(rcx, Immediate(-1));
+ __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Integer16());
+ __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ movl(rdx, Immediate(65535));
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+ __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Integer16());
+ __ movq(rcx, Immediate(-1));
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ // Test 9.
+ __ movq(rax, Immediate(9)); // Test number.
+ __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(rcx, Immediate(-1));
+ __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::UInteger16());
+ __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ movl(rdx, Immediate(65535));
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+ __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::UInteger16());
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+
__ xor_(rax, rax); // Success.
__ bind(&exit);
__ addq(rsp, Immediate(1 * kPointerSize));
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index e62bdeb074..9e09051305 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -35,6 +35,7 @@
#include <errno.h>
#endif
+#include <utility>
#include "v8.h"
@@ -81,7 +82,7 @@ TEST(Promotion) {
// Allocate a fixed array in the new space.
int array_length =
- (Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
+ (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
(4 * kPointerSize);
Object* obj = heap->AllocateFixedArray(array_length)->ToObjectChecked();
Handle<FixedArray> array(FixedArray::cast(obj));
@@ -106,7 +107,7 @@ TEST(NoPromotion) {
// Allocate a big fixed array in the new space.
int array_length =
- (Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
+ (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
(2 * kPointerSize);
Object* obj = heap->AllocateFixedArray(array_length)->ToObjectChecked();
Handle<FixedArray> array(FixedArray::cast(obj));
@@ -245,12 +246,14 @@ TEST(MapCompact) {
static int NumberOfWeakCalls = 0;
-static void WeakPointerCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* handle,
- void* id) {
- ASSERT(id == reinterpret_cast<void*>(1234));
+static void WeakPointerCallback(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ std::pair<v8::Persistent<v8::Value>*, int>* p =
+ reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
+ data.GetParameter());
+ ASSERT_EQ(1234, p->second);
NumberOfWeakCalls++;
- handle->Dispose();
+ p->first->Reset();
}
@@ -268,15 +271,18 @@ TEST(ObjectGroups) {
global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g1c1 =
global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
- global_handles->MakeWeak(g1s1.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
- global_handles->MakeWeak(g1s2.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
- global_handles->MakeWeak(g1c1.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
+ std::pair<Handle<Object>*, int> g1s1_and_id(&g1s1, 1234);
+ GlobalHandles::MakeWeak(g1s1.location(),
+ reinterpret_cast<void*>(&g1s1_and_id),
+ &WeakPointerCallback);
+ std::pair<Handle<Object>*, int> g1s2_and_id(&g1s2, 1234);
+ GlobalHandles::MakeWeak(g1s2.location(),
+ reinterpret_cast<void*>(&g1s2_and_id),
+ &WeakPointerCallback);
+ std::pair<Handle<Object>*, int> g1c1_and_id(&g1c1, 1234);
+ GlobalHandles::MakeWeak(g1c1.location(),
+ reinterpret_cast<void*>(&g1c1_and_id),
+ &WeakPointerCallback);
Handle<Object> g2s1 =
global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
@@ -284,15 +290,18 @@ TEST(ObjectGroups) {
global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g2c1 =
global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
- global_handles->MakeWeak(g2s1.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
- global_handles->MakeWeak(g2s2.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
- global_handles->MakeWeak(g2c1.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
+ std::pair<Handle<Object>*, int> g2s1_and_id(&g2s1, 1234);
+ GlobalHandles::MakeWeak(g2s1.location(),
+ reinterpret_cast<void*>(&g2s1_and_id),
+ &WeakPointerCallback);
+ std::pair<Handle<Object>*, int> g2s2_and_id(&g2s2, 1234);
+ GlobalHandles::MakeWeak(g2s2.location(),
+ reinterpret_cast<void*>(&g2s2_and_id),
+ &WeakPointerCallback);
+ std::pair<Handle<Object>*, int> g2c1_and_id(&g2c1, 1234);
+ GlobalHandles::MakeWeak(g2c1.location(),
+ reinterpret_cast<void*>(&g2c1_and_id),
+ &WeakPointerCallback);
Handle<Object> root = global_handles->Create(*g1s1); // make a root.
@@ -319,9 +328,10 @@ TEST(ObjectGroups) {
CHECK_EQ(0, NumberOfWeakCalls);
// Weaken the root.
- global_handles->MakeWeak(root.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
+ std::pair<Handle<Object>*, int> root_and_id(&root, 1234);
+ GlobalHandles::MakeWeak(root.location(),
+ reinterpret_cast<void*>(&root_and_id),
+ &WeakPointerCallback);
// But make children strong roots---all the objects (except for children)
// should be collectable now.
global_handles->ClearWeakness(g1c1.location());
@@ -347,12 +357,12 @@ TEST(ObjectGroups) {
CHECK_EQ(5, NumberOfWeakCalls);
// And now make children weak again and collect them.
- global_handles->MakeWeak(g1c1.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
- global_handles->MakeWeak(g2c1.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
+ GlobalHandles::MakeWeak(g1c1.location(),
+ reinterpret_cast<void*>(&g1c1_and_id),
+ &WeakPointerCallback);
+ GlobalHandles::MakeWeak(g2c1.location(),
+ reinterpret_cast<void*>(&g2c1_and_id),
+ &WeakPointerCallback);
heap->CollectGarbage(OLD_POINTER_SPACE);
CHECK_EQ(7, NumberOfWeakCalls);
@@ -486,6 +496,7 @@ TEST(BootUpMemoryUse) {
intptr_t initial_memory = MemoryInUse();
// Avoid flakiness.
FLAG_crankshaft = false;
+ FLAG_concurrent_osr = false;
FLAG_concurrent_recompilation = false;
// Only Linux has the proc filesystem and only if it is mapped. If it's not
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
new file mode 100644
index 0000000000..9662effa50
--- /dev/null
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -0,0 +1,79 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "cctest.h"
+
+using namespace v8::internal;
+
+
+static void SetUpNewSpaceWithPoisonedMementoAtTop() {
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ NewSpace* new_space = heap->new_space();
+
+ // Make sure we can allocate some objects without causing a GC later.
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+
+ // Allocate a string, the GC may suspect a memento behind the string.
+ Handle<SeqOneByteString> string = isolate->factory()->NewRawOneByteString(12);
+ CHECK(*string);
+
+ // Create an allocation memento behind the string with a garbage allocation
+ // site pointer.
+ AllocationMemento* memento =
+ reinterpret_cast<AllocationMemento*>(new_space->top() + kHeapObjectTag);
+ memento->set_map_no_write_barrier(heap->allocation_memento_map());
+ memento->set_allocation_site(
+ reinterpret_cast<AllocationSite*>(kHeapObjectTag), SKIP_WRITE_BARRIER);
+}
+
+
+TEST(Regress340063) {
+ CcTest::InitializeVM();
+ if (!i::FLAG_allocation_site_pretenuring) return;
+ v8::HandleScope scope(CcTest::isolate());
+
+
+ SetUpNewSpaceWithPoisonedMementoAtTop();
+
+ // Call GC to see if we can handle a poisonous memento right after the
+ // current new space top pointer.
+ CcTest::i_isolate()->heap()->CollectAllGarbage(
+ Heap::kAbortIncrementalMarkingMask);
+}
+
+
+TEST(BadMementoAfterTopForceScavenge) {
+ CcTest::InitializeVM();
+ if (!i::FLAG_allocation_site_pretenuring) return;
+ v8::HandleScope scope(CcTest::isolate());
+
+ SetUpNewSpaceWithPoisonedMementoAtTop();
+
+ // Force GC to test the poisoned memento handling
+ CcTest::i_isolate()->heap()->CollectGarbage(i::NEW_SPACE);
+}
diff --git a/deps/v8/test/cctest/test-microtask-delivery.cc b/deps/v8/test/cctest/test-microtask-delivery.cc
new file mode 100644
index 0000000000..4db760dd39
--- /dev/null
+++ b/deps/v8/test/cctest/test-microtask-delivery.cc
@@ -0,0 +1,137 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cctest.h"
+
+using namespace v8;
+namespace i = v8::internal;
+
+namespace {
+class HarmonyIsolate {
+ public:
+ HarmonyIsolate() {
+ i::FLAG_harmony_observation = true;
+ i::FLAG_harmony_promises = true;
+ isolate_ = Isolate::New();
+ isolate_->Enter();
+ }
+
+ ~HarmonyIsolate() {
+ isolate_->Exit();
+ isolate_->Dispose();
+ }
+
+ Isolate* GetIsolate() const { return isolate_; }
+
+ private:
+ Isolate* isolate_;
+};
+}
+
+
+TEST(MicrotaskDeliverySimple) {
+ HarmonyIsolate isolate;
+ HandleScope scope(isolate.GetIsolate());
+ LocalContext context(isolate.GetIsolate());
+ CompileRun(
+ "var ordering = [];"
+ "var resolver = {};"
+ "function handler(resolve) { resolver.resolve = resolve; }"
+ "var obj = {};"
+ "var observeOrders = [1, 4];"
+ "function observer() {"
+ "ordering.push(observeOrders.shift());"
+ "resolver.resolve();"
+ "}"
+ "var p = new Promise(handler);"
+ "p.then(function() {"
+ "ordering.push(2);"
+ "}).then(function() {"
+ "ordering.push(3);"
+ "obj.id++;"
+ "return new Promise(handler);"
+ "}).then(function() {"
+ "ordering.push(5);"
+ "}).then(function() {"
+ "ordering.push(6);"
+ "});"
+ "Object.observe(obj, observer);"
+ "obj.id = 1;");
+ CHECK_EQ(6, CompileRun("ordering.length")->Int32Value());
+ CHECK_EQ(1, CompileRun("ordering[0]")->Int32Value());
+ CHECK_EQ(2, CompileRun("ordering[1]")->Int32Value());
+ CHECK_EQ(3, CompileRun("ordering[2]")->Int32Value());
+ CHECK_EQ(4, CompileRun("ordering[3]")->Int32Value());
+ CHECK_EQ(5, CompileRun("ordering[4]")->Int32Value());
+ CHECK_EQ(6, CompileRun("ordering[5]")->Int32Value());
+}
+
+
+TEST(MicrotaskPerIsolateState) {
+ HarmonyIsolate isolate;
+ HandleScope scope(isolate.GetIsolate());
+ LocalContext context1(isolate.GetIsolate());
+ V8::SetAutorunMicrotasks(isolate.GetIsolate(), false);
+ CompileRun(
+ "var obj = { calls: 0 };");
+ Handle<Value> obj = CompileRun("obj");
+ {
+ LocalContext context2(isolate.GetIsolate());
+ context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ obj);
+ CompileRun(
+ "var resolver = {};"
+ "new Promise(function(resolve) {"
+ "resolver.resolve = resolve;"
+ "}).then(function() {"
+ "obj.calls++;"
+ "});"
+ "(function() {"
+ "resolver.resolve();"
+ "})();");
+ }
+ {
+ LocalContext context3(isolate.GetIsolate());
+ context3->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ obj);
+ CompileRun(
+ "var foo = { id: 1 };"
+ "Object.observe(foo, function() {"
+ "obj.calls++;"
+ "});"
+ "foo.id++;");
+ }
+ {
+ LocalContext context4(isolate.GetIsolate());
+ context4->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ obj);
+ V8::RunMicrotasks(isolate.GetIsolate());
+ CHECK_EQ(2, CompileRun("obj.calls")->Int32Value());
+ }
+}
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
index b4488a603a..0a30d4e275 100644
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ b/deps/v8/test/cctest/test-object-observe.cc
@@ -72,23 +72,29 @@ TEST(PerIsolateState) {
Handle<Value> notify_fun2;
{
LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::New("obj"), obj);
+ context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ obj);
notify_fun2 = CompileRun(
"(function() { obj.foo = 'baz'; })");
}
Handle<Value> notify_fun3;
{
LocalContext context3(isolate.GetIsolate());
- context3->Global()->Set(String::New("obj"), obj);
+ context3->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ obj);
notify_fun3 = CompileRun(
"(function() { obj.foo = 'bat'; })");
}
{
LocalContext context4(isolate.GetIsolate());
- context4->Global()->Set(String::New("observer"), observer);
- context4->Global()->Set(String::New("fun1"), notify_fun1);
- context4->Global()->Set(String::New("fun2"), notify_fun2);
- context4->Global()->Set(String::New("fun3"), notify_fun3);
+ context4->Global()->Set(
+ String::NewFromUtf8(isolate.GetIsolate(), "observer"), observer);
+ context4->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "fun1"),
+ notify_fun1);
+ context4->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "fun2"),
+ notify_fun2);
+ context4->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "fun3"),
+ notify_fun3);
CompileRun("fun1(); fun2(); fun3(); Object.deliverChangeRecords(observer)");
}
CHECK_EQ(1, CompileRun("calls")->Int32Value());
@@ -211,8 +217,10 @@ TEST(ObjectHashTableGrowth) {
{
// As does initializing this context.
LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::New("obj"), obj);
- context2->Global()->Set(String::New("observer"), observer);
+ context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ obj);
+ context2->Global()->Set(
+ String::NewFromUtf8(isolate.GetIsolate(), "observer"), observer);
CompileRun(
"var objArr = [];"
// 100 objects should be enough to make the hash table grow
@@ -234,7 +242,6 @@ TEST(GlobalObjectObservation) {
LocalContext context(isolate.GetIsolate());
HandleScope scope(isolate.GetIsolate());
Handle<Object> global_proxy = context->Global();
- Handle<Object> inner_global = global_proxy->GetPrototype().As<Object>();
CompileRun(
"var records = [];"
"var global = this;"
@@ -247,33 +254,26 @@ TEST(GlobalObjectObservation) {
context->DetachGlobal();
CompileRun("global.bar = 'goodbye';");
CHECK_EQ(1, CompileRun("records.length")->Int32Value());
-
- // Mutating the global object directly still has an effect...
- CompileRun("this.bar = 'goodbye';");
- CHECK_EQ(2, CompileRun("records.length")->Int32Value());
- CHECK(inner_global->StrictEquals(CompileRun("records[1].object")));
-
- // Reattached, back to global proxy.
- context->ReattachGlobal(global_proxy);
- CompileRun("global.baz = 'again';");
- CHECK_EQ(3, CompileRun("records.length")->Int32Value());
- CHECK(global_proxy->StrictEquals(CompileRun("records[2].object")));
+ CompileRun("this.baz = 'goodbye';");
+ CHECK_EQ(1, CompileRun("records.length")->Int32Value());
// Attached to a different context, should not leak mutations
// to the old context.
context->DetachGlobal();
{
LocalContext context2(isolate.GetIsolate());
- context2->DetachGlobal();
- context2->ReattachGlobal(global_proxy);
CompileRun(
"var records2 = [];"
+ "var global = this;"
"Object.observe(this, function(r) { [].push.apply(records2, r) });"
- "this.bat = 'context2';");
+ "this.v1 = 'context2';");
+ context2->DetachGlobal();
+ CompileRun(
+ "global.v2 = 'context2';"
+ "this.v3 = 'context2';");
CHECK_EQ(1, CompileRun("records2.length")->Int32Value());
- CHECK(global_proxy->StrictEquals(CompileRun("records2[0].object")));
}
- CHECK_EQ(3, CompileRun("records.length")->Int32Value());
+ CHECK_EQ(1, CompileRun("records.length")->Int32Value());
// Attaching by passing to Context::New
{
@@ -287,7 +287,7 @@ TEST(GlobalObjectObservation) {
CHECK_EQ(1, CompileRun("records3.length")->Int32Value());
CHECK(global_proxy->StrictEquals(CompileRun("records3[0].object")));
}
- CHECK_EQ(3, CompileRun("records.length")->Int32Value());
+ CHECK_EQ(1, CompileRun("records.length")->Int32Value());
}
@@ -300,7 +300,8 @@ struct RecordExpectation {
// TODO(adamk): Use this helper elsewhere in this file.
-static void ExpectRecords(Handle<Value> records,
+static void ExpectRecords(v8::Isolate* isolate,
+ Handle<Value> records,
const RecordExpectation expectations[],
int num) {
CHECK(records->IsArray());
@@ -311,62 +312,67 @@ static void ExpectRecords(Handle<Value> records,
CHECK(record->IsObject());
Handle<Object> recordObj = record.As<Object>();
CHECK(expectations[i].object->StrictEquals(
- recordObj->Get(String::New("object"))));
- CHECK(String::New(expectations[i].type)->Equals(
- recordObj->Get(String::New("type"))));
+ recordObj->Get(String::NewFromUtf8(isolate, "object"))));
+ CHECK(String::NewFromUtf8(isolate, expectations[i].type)->Equals(
+ recordObj->Get(String::NewFromUtf8(isolate, "type"))));
if (strcmp("splice", expectations[i].type) != 0) {
- CHECK(String::New(expectations[i].name)->Equals(
- recordObj->Get(String::New("name"))));
+ CHECK(String::NewFromUtf8(isolate, expectations[i].name)->Equals(
+ recordObj->Get(String::NewFromUtf8(isolate, "name"))));
if (!expectations[i].old_value.IsEmpty()) {
CHECK(expectations[i].old_value->Equals(
- recordObj->Get(String::New("oldValue"))));
+ recordObj->Get(String::NewFromUtf8(isolate, "oldValue"))));
}
}
}
}
-#define EXPECT_RECORDS(records, expectations) \
- ExpectRecords(records, expectations, ARRAY_SIZE(expectations))
+#define EXPECT_RECORDS(records, expectations) \
+ ExpectRecords(isolate.GetIsolate(), records, expectations, \
+ ARRAY_SIZE(expectations))
TEST(APITestBasicMutation) {
HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ v8::Isolate* v8_isolate = isolate.GetIsolate();
+ HandleScope scope(v8_isolate);
+ LocalContext context(v8_isolate);
Handle<Object> obj = Handle<Object>::Cast(CompileRun(
"var records = [];"
"var obj = {};"
"function observer(r) { [].push.apply(records, r); };"
"Object.observe(obj, observer);"
"obj"));
- obj->Set(String::New("foo"), Number::New(7));
- obj->Set(1, Number::New(2));
+ obj->Set(String::NewFromUtf8(v8_isolate, "foo"),
+ Number::New(v8_isolate, 7));
+ obj->Set(1, Number::New(v8_isolate, 2));
// ForceSet should work just as well as Set
- obj->ForceSet(String::New("foo"), Number::New(3));
- obj->ForceSet(Number::New(1), Number::New(4));
+ obj->ForceSet(String::NewFromUtf8(v8_isolate, "foo"),
+ Number::New(v8_isolate, 3));
+ obj->ForceSet(Number::New(v8_isolate, 1), Number::New(v8_isolate, 4));
// Setting an indexed element via the property setting method
- obj->Set(Number::New(1), Number::New(5));
+ obj->Set(Number::New(v8_isolate, 1), Number::New(v8_isolate, 5));
// Setting with a non-String, non-uint32 key
- obj->Set(Number::New(1.1), Number::New(6), DontDelete);
- obj->Delete(String::New("foo"));
+ obj->Set(Number::New(v8_isolate, 1.1),
+ Number::New(v8_isolate, 6), DontDelete);
+ obj->Delete(String::NewFromUtf8(v8_isolate, "foo"));
obj->Delete(1);
- obj->ForceDelete(Number::New(1.1));
+ obj->ForceDelete(Number::New(v8_isolate, 1.1));
// Force delivery
// TODO(adamk): Should the above set methods trigger delivery themselves?
CompileRun("void 0");
CHECK_EQ(9, CompileRun("records.length")->Int32Value());
const RecordExpectation expected_records[] = {
- { obj, "new", "foo", Handle<Value>() },
- { obj, "new", "1", Handle<Value>() },
+ { obj, "add", "foo", Handle<Value>() },
+ { obj, "add", "1", Handle<Value>() },
// Note: use 7 not 1 below, as the latter triggers a nifty VS10 compiler bug
// where instead of 1.0, a garbage value would be passed into Number::New.
- { obj, "updated", "foo", Number::New(7) },
- { obj, "updated", "1", Number::New(2) },
- { obj, "updated", "1", Number::New(4) },
- { obj, "new", "1.1", Handle<Value>() },
- { obj, "deleted", "foo", Number::New(3) },
- { obj, "deleted", "1", Number::New(5) },
- { obj, "deleted", "1.1", Number::New(6) }
+ { obj, "update", "foo", Number::New(v8_isolate, 7) },
+ { obj, "update", "1", Number::New(v8_isolate, 2) },
+ { obj, "update", "1", Number::New(v8_isolate, 4) },
+ { obj, "add", "1.1", Handle<Value>() },
+ { obj, "delete", "foo", Number::New(v8_isolate, 3) },
+ { obj, "delete", "1", Number::New(v8_isolate, 5) },
+ { obj, "delete", "1.1", Number::New(v8_isolate, 6) }
};
EXPECT_RECORDS(CompileRun("records"), expected_records);
}
@@ -374,16 +380,19 @@ TEST(APITestBasicMutation) {
TEST(HiddenPrototypeObservation) {
HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
- Handle<FunctionTemplate> tmpl = FunctionTemplate::New();
+ v8::Isolate* v8_isolate = isolate.GetIsolate();
+ HandleScope scope(v8_isolate);
+ LocalContext context(v8_isolate);
+ Handle<FunctionTemplate> tmpl = FunctionTemplate::New(v8_isolate);
tmpl->SetHiddenPrototype(true);
- tmpl->InstanceTemplate()->Set(String::New("foo"), Number::New(75));
+ tmpl->InstanceTemplate()->Set(
+ String::NewFromUtf8(v8_isolate, "foo"), Number::New(v8_isolate, 75));
Handle<Object> proto = tmpl->GetFunction()->NewInstance();
- Handle<Object> obj = Object::New();
+ Handle<Object> obj = Object::New(v8_isolate);
obj->SetPrototype(proto);
- context->Global()->Set(String::New("obj"), obj);
- context->Global()->Set(String::New("proto"), proto);
+ context->Global()->Set(String::NewFromUtf8(v8_isolate, "obj"), obj);
+ context->Global()->Set(String::NewFromUtf8(v8_isolate, "proto"),
+ proto);
CompileRun(
"var records;"
"function observer(r) { records = r; };"
@@ -391,13 +400,13 @@ TEST(HiddenPrototypeObservation) {
"obj.foo = 41;" // triggers a notification
"proto.foo = 42;"); // does not trigger a notification
const RecordExpectation expected_records[] = {
- { obj, "updated", "foo", Number::New(75) }
+ { obj, "update", "foo", Number::New(v8_isolate, 75) }
};
EXPECT_RECORDS(CompileRun("records"), expected_records);
- obj->SetPrototype(Null(isolate.GetIsolate()));
+ obj->SetPrototype(Null(v8_isolate));
CompileRun("obj.foo = 43");
const RecordExpectation expected_records2[] = {
- { obj, "new", "foo", Handle<Value>() }
+ { obj, "add", "foo", Handle<Value>() }
};
EXPECT_RECORDS(CompileRun("records"), expected_records2);
obj->SetPrototype(proto);
@@ -407,10 +416,10 @@ TEST(HiddenPrototypeObservation) {
"Object.unobserve(obj, observer);"
"obj.foo = 44;");
const RecordExpectation expected_records3[] = {
- { proto, "new", "bar", Handle<Value>() }
+ { proto, "add", "bar", Handle<Value>() }
// TODO(adamk): The below record should be emitted since proto is observed
// and has been modified. Not clear if this happens in practice.
- // { proto, "updated", "foo", Number::New(43) }
+ // { proto, "update", "foo", Number::New(43) }
};
EXPECT_RECORDS(CompileRun("records"), expected_records3);
}
@@ -504,15 +513,16 @@ static bool BlockAccessKeys(Local<Object> host, Local<Value> key,
static Handle<Object> CreateAccessCheckedObject(
+ v8::Isolate* isolate,
NamedSecurityCallback namedCallback,
IndexedSecurityCallback indexedCallback,
Handle<Value> data = Handle<Value>()) {
- Handle<ObjectTemplate> tmpl = ObjectTemplate::New();
+ Handle<ObjectTemplate> tmpl = ObjectTemplate::New(isolate);
tmpl->SetAccessCheckCallbacks(namedCallback, indexedCallback, data);
Handle<Object> instance = tmpl->NewInstance();
Handle<Object> global = instance->CreationContext()->Global();
- global->Set(String::New("obj"), instance);
- global->Set(kBlockedContextIndex, v8::True());
+ global->Set(String::NewFromUtf8(isolate, "obj"), instance);
+ global->Set(kBlockedContextIndex, v8::True(isolate));
return instance;
}
@@ -525,9 +535,10 @@ TEST(NamedAccessCheck) {
LocalContext context(isolate.GetIsolate());
g_access_block_type = types[i];
Handle<Object> instance = CreateAccessCheckedObject(
+ isolate.GetIsolate(),
NamedAccessAllowUnlessBlocked,
IndexedAccessAlwaysAllowed,
- String::New("foo"));
+ String::NewFromUtf8(isolate.GetIsolate(), "foo"));
CompileRun("var records = null;"
"var objNoCheck = {};"
"var observer = function(r) { records = r };"
@@ -536,8 +547,11 @@ TEST(NamedAccessCheck) {
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::New("obj"), instance);
- context2->Global()->Set(String::New("objNoCheck"), obj_no_check);
+ context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ instance);
+ context2->Global()->Set(
+ String::NewFromUtf8(isolate.GetIsolate(), "objNoCheck"),
+ obj_no_check);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
"Object.observe(obj, observer2);"
@@ -548,17 +562,19 @@ TEST(NamedAccessCheck) {
"obj.bar = 'baz';"
"objNoCheck.baz = 'quux'");
const RecordExpectation expected_records2[] = {
- { instance, "new", "foo", Handle<Value>() },
- { instance, "updated", "foo", String::New("bar") },
- { instance, "reconfigured", "foo", Number::New(5) },
- { instance, "new", "bar", Handle<Value>() },
- { obj_no_check, "new", "baz", Handle<Value>() },
+ { instance, "add", "foo", Handle<Value>() },
+ { instance, "update", "foo",
+ String::NewFromUtf8(isolate.GetIsolate(), "bar") },
+ { instance, "reconfigure", "foo",
+ Number::New(isolate.GetIsolate(), 5) },
+ { instance, "add", "bar", Handle<Value>() },
+ { obj_no_check, "add", "baz", Handle<Value>() },
};
EXPECT_RECORDS(CompileRun("records2"), expected_records2);
}
const RecordExpectation expected_records[] = {
- { instance, "new", "bar", Handle<Value>() },
- { obj_no_check, "new", "baz", Handle<Value>() }
+ { instance, "add", "bar", Handle<Value>() },
+ { obj_no_check, "add", "baz", Handle<Value>() }
};
EXPECT_RECORDS(CompileRun("records"), expected_records);
}
@@ -573,8 +589,8 @@ TEST(IndexedAccessCheck) {
LocalContext context(isolate.GetIsolate());
g_access_block_type = types[i];
Handle<Object> instance = CreateAccessCheckedObject(
- NamedAccessAlwaysAllowed, IndexedAccessAllowUnlessBlocked,
- Number::New(7));
+ isolate.GetIsolate(), NamedAccessAlwaysAllowed,
+ IndexedAccessAllowUnlessBlocked, Number::New(isolate.GetIsolate(), 7));
CompileRun("var records = null;"
"var objNoCheck = {};"
"var observer = function(r) { records = r };"
@@ -583,8 +599,11 @@ TEST(IndexedAccessCheck) {
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::New("obj"), instance);
- context2->Global()->Set(String::New("objNoCheck"), obj_no_check);
+ context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ instance);
+ context2->Global()->Set(
+ String::NewFromUtf8(isolate.GetIsolate(), "objNoCheck"),
+ obj_no_check);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
"Object.observe(obj, observer2);"
@@ -595,17 +614,18 @@ TEST(IndexedAccessCheck) {
"obj[8] = 'bar';"
"objNoCheck[42] = 'quux'");
const RecordExpectation expected_records2[] = {
- { instance, "new", "7", Handle<Value>() },
- { instance, "updated", "7", String::New("foo") },
- { instance, "reconfigured", "7", Number::New(5) },
- { instance, "new", "8", Handle<Value>() },
- { obj_no_check, "new", "42", Handle<Value>() }
+ { instance, "add", "7", Handle<Value>() },
+ { instance, "update", "7",
+ String::NewFromUtf8(isolate.GetIsolate(), "foo") },
+ { instance, "reconfigure", "7", Number::New(isolate.GetIsolate(), 5) },
+ { instance, "add", "8", Handle<Value>() },
+ { obj_no_check, "add", "42", Handle<Value>() }
};
EXPECT_RECORDS(CompileRun("records2"), expected_records2);
}
const RecordExpectation expected_records[] = {
- { instance, "new", "8", Handle<Value>() },
- { obj_no_check, "new", "42", Handle<Value>() }
+ { instance, "add", "8", Handle<Value>() },
+ { obj_no_check, "add", "42", Handle<Value>() }
};
EXPECT_RECORDS(CompileRun("records"), expected_records);
}
@@ -618,8 +638,8 @@ TEST(SpliceAccessCheck) {
LocalContext context(isolate.GetIsolate());
g_access_block_type = ACCESS_GET;
Handle<Object> instance = CreateAccessCheckedObject(
- NamedAccessAlwaysAllowed, IndexedAccessAllowUnlessBlocked,
- Number::New(1));
+ isolate.GetIsolate(), NamedAccessAlwaysAllowed,
+ IndexedAccessAllowUnlessBlocked, Number::New(isolate.GetIsolate(), 1));
CompileRun("var records = null;"
"obj[1] = 'foo';"
"obj.length = 2;"
@@ -630,8 +650,10 @@ TEST(SpliceAccessCheck) {
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::New("obj"), instance);
- context2->Global()->Set(String::New("objNoCheck"), obj_no_check);
+ context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ instance);
+ context2->Global()->Set(
+ String::NewFromUtf8(isolate.GetIsolate(), "objNoCheck"), obj_no_check);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
"Array.observe(obj, observer2);"
@@ -662,7 +684,7 @@ TEST(DisallowAllForAccessKeys) {
HandleScope scope(isolate.GetIsolate());
LocalContext context(isolate.GetIsolate());
Handle<Object> instance = CreateAccessCheckedObject(
- BlockAccessKeys, IndexedAccessAlwaysAllowed);
+ isolate.GetIsolate(), BlockAccessKeys, IndexedAccessAlwaysAllowed);
CompileRun("var records = null;"
"var objNoCheck = {};"
"var observer = function(r) { records = r };"
@@ -671,8 +693,10 @@ TEST(DisallowAllForAccessKeys) {
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::New("obj"), instance);
- context2->Global()->Set(String::New("objNoCheck"), obj_no_check);
+ context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ instance);
+ context2->Global()->Set(
+ String::NewFromUtf8(isolate.GetIsolate(), "objNoCheck"), obj_no_check);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
"Object.observe(obj, observer2);"
@@ -681,14 +705,14 @@ TEST(DisallowAllForAccessKeys) {
"obj[5] = 'baz';"
"objNoCheck.baz = 'quux'");
const RecordExpectation expected_records2[] = {
- { instance, "new", "foo", Handle<Value>() },
- { instance, "new", "5", Handle<Value>() },
- { obj_no_check, "new", "baz", Handle<Value>() },
+ { instance, "add", "foo", Handle<Value>() },
+ { instance, "add", "5", Handle<Value>() },
+ { obj_no_check, "add", "baz", Handle<Value>() },
};
EXPECT_RECORDS(CompileRun("records2"), expected_records2);
}
const RecordExpectation expected_records[] = {
- { obj_no_check, "new", "baz", Handle<Value>() }
+ { obj_no_check, "add", "baz", Handle<Value>() }
};
EXPECT_RECORDS(CompileRun("records"), expected_records);
}
@@ -699,22 +723,24 @@ TEST(AccessCheckDisallowApiModifications) {
HandleScope scope(isolate.GetIsolate());
LocalContext context(isolate.GetIsolate());
Handle<Object> instance = CreateAccessCheckedObject(
- BlockAccessKeys, IndexedAccessAlwaysAllowed);
+ isolate.GetIsolate(), BlockAccessKeys, IndexedAccessAlwaysAllowed);
CompileRun("var records = null;"
"var observer = function(r) { records = r };"
"Object.observe(obj, observer);");
{
LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::New("obj"), instance);
+ context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ instance);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
"Object.observe(obj, observer2);");
- instance->Set(5, String::New("bar"));
- instance->Set(String::New("foo"), String::New("bar"));
+ instance->Set(5, String::NewFromUtf8(isolate.GetIsolate(), "bar"));
+ instance->Set(String::NewFromUtf8(isolate.GetIsolate(), "foo"),
+ String::NewFromUtf8(isolate.GetIsolate(), "bar"));
CompileRun(""); // trigger delivery
const RecordExpectation expected_records2[] = {
- { instance, "new", "5", Handle<Value>() },
- { instance, "new", "foo", Handle<Value>() }
+ { instance, "add", "5", Handle<Value>() },
+ { instance, "add", "foo", Handle<Value>() }
};
EXPECT_RECORDS(CompileRun("records2"), expected_records2);
}
@@ -730,8 +756,11 @@ TEST(HiddenPropertiesLeakage) {
"var records = null;"
"var observer = function(r) { records = r };"
"Object.observe(obj, observer);");
- Handle<Value> obj = context->Global()->Get(String::New("obj"));
- Handle<Object>::Cast(obj)->SetHiddenValue(String::New("foo"), Null());
+ Handle<Value> obj =
+ context->Global()->Get(String::NewFromUtf8(isolate.GetIsolate(), "obj"));
+ Handle<Object>::Cast(obj)
+ ->SetHiddenValue(String::NewFromUtf8(isolate.GetIsolate(), "foo"),
+ Null(isolate.GetIsolate()));
CompileRun(""); // trigger delivery
CHECK(CompileRun("records")->IsNull());
}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 952cb68cec..30e97aabdc 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -108,6 +108,7 @@ TEST(ScanKeywords) {
TEST(ScanHTMLEndComments) {
v8::V8::Initialize();
v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handles(isolate);
// Regression test. See:
// http://code.google.com/p/chromium/issues/detail?id=53548
@@ -144,15 +145,20 @@ TEST(ScanHTMLEndComments) {
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
for (int i = 0; tests[i]; i++) {
- v8::ScriptData* data =
- v8::ScriptData::PreCompile(isolate, tests[i], i::StrLength(tests[i]));
+ v8::Handle<v8::String> source = v8::String::NewFromUtf8(
+ isolate, tests[i], v8::String::kNormalString, i::StrLength(tests[i]));
+ v8::ScriptData* data = v8::ScriptData::PreCompile(source);
CHECK(data != NULL && !data->HasError());
delete data;
}
for (int i = 0; fail_tests[i]; i++) {
- v8::ScriptData* data = v8::ScriptData::PreCompile(
- isolate, fail_tests[i], i::StrLength(fail_tests[i]));
+ v8::Handle<v8::String> source =
+ v8::String::NewFromUtf8(isolate,
+ fail_tests[i],
+ v8::String::kNormalString,
+ i::StrLength(fail_tests[i]));
+ v8::ScriptData* data = v8::ScriptData::PreCompile(source);
CHECK(data == NULL || data->HasError());
delete data;
}
@@ -199,14 +205,15 @@ TEST(Preparsing) {
const char* error_source = "var x = y z;";
int error_source_length = i::StrLength(error_source);
- v8::ScriptData* preparse =
- v8::ScriptData::PreCompile(isolate, source, source_length);
+ v8::ScriptData* preparse = v8::ScriptData::PreCompile(v8::String::NewFromUtf8(
+ isolate, source, v8::String::kNormalString, source_length));
CHECK(!preparse->HasError());
bool lazy_flag = i::FLAG_lazy;
{
i::FLAG_lazy = true;
ScriptResource* resource = new ScriptResource(source, source_length);
- v8::Local<v8::String> script_source = v8::String::NewExternal(resource);
+ v8::Local<v8::String> script_source =
+ v8::String::NewExternal(isolate, resource);
v8::Script::Compile(script_source, NULL, preparse);
}
@@ -214,15 +221,19 @@ TEST(Preparsing) {
i::FLAG_lazy = false;
ScriptResource* resource = new ScriptResource(source, source_length);
- v8::Local<v8::String> script_source = v8::String::NewExternal(resource);
+ v8::Local<v8::String> script_source =
+ v8::String::NewExternal(isolate, resource);
v8::Script::New(script_source, NULL, preparse, v8::Local<v8::String>());
}
delete preparse;
i::FLAG_lazy = lazy_flag;
// Syntax error.
- v8::ScriptData* error_preparse =
- v8::ScriptData::PreCompile(isolate, error_source, error_source_length);
+ v8::ScriptData* error_preparse = v8::ScriptData::PreCompile(
+ v8::String::NewFromUtf8(isolate,
+ error_source,
+ v8::String::kNormalString,
+ error_source_length));
CHECK(error_preparse->HasError());
i::ScriptDataImpl *pre_impl =
reinterpret_cast<i::ScriptDataImpl*>(error_preparse);
@@ -233,8 +244,11 @@ TEST(Preparsing) {
CHECK_EQ(11, error_location.end_pos);
// Should not crash.
const char* message = pre_impl->BuildMessage();
- pre_impl->BuildArgs();
+ i::Vector<const char*> args = pre_impl->BuildArgs();
CHECK_GT(strlen(message), 0);
+ args.Dispose();
+ i::DeleteArray(message);
+ delete error_preparse;
}
@@ -388,13 +402,13 @@ TEST(PreParseOverflow) {
size_t kProgramSize = 1024 * 1024;
i::SmartArrayPointer<char> program(i::NewArray<char>(kProgramSize + 1));
- memset(*program, '(', kProgramSize);
+ memset(program.get(), '(', kProgramSize);
program[kProgramSize] = '\0';
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
i::Utf8ToUtf16CharacterStream stream(
- reinterpret_cast<const i::byte*>(*program),
+ reinterpret_cast<const i::byte*>(program.get()),
static_cast<unsigned>(kProgramSize));
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
@@ -445,7 +459,7 @@ void TestCharacterStream(const char* ascii_source,
i::Vector<const char> ascii_vector(ascii_source, static_cast<int>(length));
i::Handle<i::String> ascii_string(
factory->NewStringFromAscii(ascii_vector));
- TestExternalResource resource(*uc16_buffer, length);
+ TestExternalResource resource(uc16_buffer.get(), length);
i::Handle<i::String> uc16_string(
factory->NewExternalStringFromTwoByte(&resource));
@@ -1008,7 +1022,7 @@ TEST(ScopePositions) {
int kSuffixByteLen = i::StrLength(source_data[i].outer_suffix);
int kProgramSize = kPrefixLen + kInnerLen + kSuffixLen;
int kProgramByteSize = kPrefixByteLen + kInnerByteLen + kSuffixByteLen;
- i::Vector<char> program = i::Vector<char>::New(kProgramByteSize + 1);
+ i::ScopedVector<char> program(kProgramByteSize + 1);
i::OS::SNPrintF(program, "%s%s%s",
source_data[i].outer_prefix,
source_data[i].inner_source,
@@ -1050,15 +1064,14 @@ i::Handle<i::String> FormatMessage(i::ScriptDataImpl* data) {
i::Factory* factory = isolate->factory();
const char* message = data->BuildMessage();
i::Handle<i::String> format = v8::Utils::OpenHandle(
- *v8::String::New(message));
+ *v8::String::NewFromUtf8(CcTest::isolate(), message));
i::Vector<const char*> args = data->BuildArgs();
i::Handle<i::JSArray> args_array = factory->NewJSArray(args.length());
for (int i = 0; i < args.length(); i++) {
- i::JSArray::SetElement(args_array,
- i,
- v8::Utils::OpenHandle(*v8::String::New(args[i])),
- NONE,
- i::kNonStrictMode);
+ i::JSArray::SetElement(
+ args_array, i, v8::Utils::OpenHandle(*v8::String::NewFromUtf8(
+ CcTest::isolate(), args[i])),
+ NONE, i::kNonStrictMode);
}
i::Handle<i::JSObject> builtins(isolate->js_builtins_object());
i::Handle<i::Object> format_fun =
@@ -1089,7 +1102,15 @@ enum ParserFlag {
};
-void SetParserFlags(i::ParserBase* parser, i::EnumSet<ParserFlag> flags) {
+enum ParserSyncTestResult {
+ kSuccessOrError,
+ kSuccess,
+ kError
+};
+
+template <typename Traits>
+void SetParserFlags(i::ParserBase<Traits>* parser,
+ i::EnumSet<ParserFlag> flags) {
parser->set_allow_lazy(flags.Contains(kAllowLazy));
parser->set_allow_natives_syntax(flags.Contains(kAllowNativesSyntax));
parser->set_allow_harmony_scoping(flags.Contains(kAllowHarmonyScoping));
@@ -1102,7 +1123,8 @@ void SetParserFlags(i::ParserBase* parser, i::EnumSet<ParserFlag> flags) {
void TestParserSyncWithFlags(i::Handle<i::String> source,
- i::EnumSet<ParserFlag> flags) {
+ i::EnumSet<ParserFlag> flags,
+ ParserSyncTestResult result) {
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
@@ -1144,6 +1166,17 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
i::Handle<i::String> message_string =
i::Handle<i::String>::cast(i::GetProperty(exception_handle, "message"));
+ if (result == kSuccess) {
+ i::OS::Print(
+ "Parser failed on:\n"
+ "\t%s\n"
+ "with error:\n"
+ "\t%s\n"
+ "However, we expected no error.",
+ source->ToCString().get(), message_string->ToCString().get());
+ CHECK(false);
+ }
+
if (!data.has_error()) {
i::OS::Print(
"Parser failed on:\n"
@@ -1151,7 +1184,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
"with error:\n"
"\t%s\n"
"However, the preparser succeeded",
- *source->ToCString(), *message_string->ToCString());
+ source->ToCString().get(), message_string->ToCString().get());
CHECK(false);
}
// Check that preparser and parser produce the same error.
@@ -1163,9 +1196,9 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
"However, found the following error messages\n"
"\tparser: %s\n"
"\tpreparser: %s\n",
- *source->ToCString(),
- *message_string->ToCString(),
- *preparser_message->ToCString());
+ source->ToCString().get(),
+ message_string->ToCString().get(),
+ preparser_message->ToCString().get());
CHECK(false);
}
} else if (data.has_error()) {
@@ -1175,7 +1208,14 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
"with error:\n"
"\t%s\n"
"However, the parser succeeded",
- *source->ToCString(), *FormatMessage(&data)->ToCString());
+ source->ToCString().get(), FormatMessage(&data)->ToCString().get());
+ CHECK(false);
+ } else if (result == kError) {
+ i::OS::Print(
+ "Expected error on:\n"
+ "\t%s\n"
+ "However, parser and preparser succeeded",
+ source->ToCString().get());
CHECK(false);
}
}
@@ -1183,7 +1223,8 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
void TestParserSync(const char* source,
const ParserFlag* flag_list,
- size_t flag_list_length) {
+ size_t flag_list_length,
+ ParserSyncTestResult result = kSuccessOrError) {
i::Handle<i::String> str =
CcTest::i_isolate()->factory()->NewStringFromAscii(i::CStrVector(source));
for (int bits = 0; bits < (1 << flag_list_length); bits++) {
@@ -1191,7 +1232,7 @@ void TestParserSync(const char* source,
for (size_t flag_index = 0; flag_index < flag_list_length; flag_index++) {
if ((bits & (1 << flag_index)) != 0) flags.Add(flag_list[flag_index]);
}
- TestParserSyncWithFlags(str, flags);
+ TestParserSyncWithFlags(str, flags, result);
}
}
@@ -1329,9 +1370,754 @@ TEST(PreparserStrictOctal) {
" 01; \n"
" }; \n"
"}; \n";
- v8::Script::Compile(v8::String::New(script));
+ v8::Script::Compile(v8::String::NewFromUtf8(CcTest::isolate(), script));
CHECK(try_catch.HasCaught());
v8::String::Utf8Value exception(try_catch.Exception());
CHECK_EQ("SyntaxError: Octal literals are not allowed in strict mode.",
*exception);
}
+
+
+void RunParserSyncTest(const char* context_data[][2],
+ const char* statement_data[],
+ ParserSyncTestResult result) {
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+
+ int marker;
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+ static const ParserFlag flags[] = {
+ kAllowLazy, kAllowHarmonyScoping, kAllowModules, kAllowGenerators,
+ kAllowForOf, kAllowNativesSyntax
+ };
+ for (int i = 0; context_data[i][0] != NULL; ++i) {
+ for (int j = 0; statement_data[j] != NULL; ++j) {
+ int kPrefixLen = i::StrLength(context_data[i][0]);
+ int kStatementLen = i::StrLength(statement_data[j]);
+ int kSuffixLen = i::StrLength(context_data[i][1]);
+ int kProgramSize = kPrefixLen + kStatementLen + kSuffixLen;
+
+ // Plug the source code pieces together.
+ i::ScopedVector<char> program(kProgramSize + 1);
+ int length = i::OS::SNPrintF(program,
+ "%s%s%s",
+ context_data[i][0],
+ statement_data[j],
+ context_data[i][1]);
+ CHECK(length == kProgramSize);
+ TestParserSync(program.start(),
+ flags,
+ ARRAY_SIZE(flags),
+ result);
+ }
+ }
+}
+
+
+TEST(ErrorsEvalAndArguments) {
+ // Tests that both preparsing and parsing produce the right kind of errors for
+ // using "eval" and "arguments" as identifiers. Without the strict mode, it's
+ // ok to use "eval" or "arguments" as identifiers. With the strict mode, it
+ // isn't.
+ const char* context_data[][2] = {
+ { "\"use strict\";", "" },
+ { "var eval; function test_func() {\"use strict\"; ", "}"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "var eval;",
+ "var arguments",
+ "var foo, eval;",
+ "var foo, arguments;",
+ "try { } catch (eval) { }",
+ "try { } catch (arguments) { }",
+ "function eval() { }",
+ "function arguments() { }",
+ "function foo(eval) { }",
+ "function foo(arguments) { }",
+ "function foo(bar, eval) { }",
+ "function foo(bar, arguments) { }",
+ "eval = 1;",
+ "arguments = 1;",
+ "var foo = eval = 1;",
+ "var foo = arguments = 1;",
+ "++eval;",
+ "++arguments;",
+ "eval++;",
+ "arguments++;",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsEvalAndArgumentsClassic) {
+ // Tests that both preparsing and parsing accept "eval" and "arguments" as
+ // identifiers when needed.
+ const char* context_data[][2] = {
+ { "", "" },
+ { "function test_func() {", "}"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "var eval;",
+ "var arguments",
+ "var foo, eval;",
+ "var foo, arguments;",
+ "try { } catch (eval) { }",
+ "try { } catch (arguments) { }",
+ "function eval() { }",
+ "function arguments() { }",
+ "function foo(eval) { }",
+ "function foo(arguments) { }",
+ "function foo(bar, eval) { }",
+ "function foo(bar, arguments) { }",
+ "eval = 1;",
+ "arguments = 1;",
+ "var foo = eval = 1;",
+ "var foo = arguments = 1;",
+ "++eval;",
+ "++arguments;",
+ "eval++;",
+ "arguments++;",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(NoErrorsEvalAndArgumentsStrict) {
+ const char* context_data[][2] = {
+ { "\"use strict\";", "" },
+ { "function test_func() { \"use strict\";", "}" },
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "eval;",
+ "arguments;",
+ "var foo = eval;",
+ "var foo = arguments;",
+ "var foo = { eval: 1 };",
+ "var foo = { arguments: 1 };",
+ "var foo = { }; foo.eval = {};",
+ "var foo = { }; foo.arguments = {};",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(ErrorsFutureStrictReservedWords) {
+ // Tests that both preparsing and parsing produce the right kind of errors for
+ // using future strict reserved words as identifiers. Without the strict mode,
+ // it's ok to use future strict reserved words as identifiers. With the strict
+ // mode, it isn't.
+ const char* context_data[][2] = {
+ { "\"use strict\";", "" },
+ { "function test_func() {\"use strict\"; ", "}"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "var interface;",
+ "var foo, interface;",
+ "try { } catch (interface) { }",
+ "function interface() { }",
+ "function foo(interface) { }",
+ "function foo(bar, interface) { }",
+ "interface = 1;",
+ "var foo = interface = 1;",
+ "++interface;",
+ "interface++;",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsFutureStrictReservedWords) {
+ const char* context_data[][2] = {
+ { "", "" },
+ { "function test_func() {", "}"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "var interface;",
+ "var foo, interface;",
+ "try { } catch (interface) { }",
+ "function interface() { }",
+ "function foo(interface) { }",
+ "function foo(bar, interface) { }",
+ "interface = 1;",
+ "var foo = interface = 1;",
+ "++interface;",
+ "interface++;",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(ErrorsReservedWords) {
+ // Tests that both preparsing and parsing produce the right kind of errors for
+ // using future reserved words as identifiers. These tests don't depend on the
+ // strict mode.
+ const char* context_data[][2] = {
+ { "", "" },
+ { "\"use strict\";", "" },
+ { "var eval; function test_func() {", "}"},
+ { "var eval; function test_func() {\"use strict\"; ", "}"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "var super;",
+ "var foo, super;",
+ "try { } catch (super) { }",
+ "function super() { }",
+ "function foo(super) { }",
+ "function foo(bar, super) { }",
+ "super = 1;",
+ "var foo = super = 1;",
+ "++super;",
+ "super++;",
+ "function foo super",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsYieldClassic) {
+ // In classic mode, it's okay to use "yield" as identifier, *except* inside a
+ // generator (see next test).
+ const char* context_data[][2] = {
+ { "", "" },
+ { "function is_not_gen() {", "}" },
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "var yield;",
+ "var foo, yield;",
+ "try { } catch (yield) { }",
+ "function yield() { }",
+ "function foo(yield) { }",
+ "function foo(bar, yield) { }",
+ "yield = 1;",
+ "var foo = yield = 1;",
+ "++yield;",
+ "yield++;",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(ErrorsYieldClassicGenerator) {
+ const char* context_data[][2] = {
+ { "function * is_gen() {", "}" },
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "var yield;",
+ "var foo, yield;",
+ "try { } catch (yield) { }",
+ "function yield() { }",
+ // BUG: These should not be allowed, but they are (if kAllowGenerators is
+ // set)
+ // "function foo(yield) { }",
+ // "function foo(bar, yield) { }",
+ "yield = 1;",
+ "var foo = yield = 1;",
+ "++yield;",
+ "yield++;",
+ NULL
+ };
+
+ // If generators are not allowed, the error will be produced at the '*' token,
+ // so this test works both with and without the kAllowGenerators flag.
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(ErrorsYieldStrict) {
+ const char* context_data[][2] = {
+ { "\"use strict\";", "" },
+ { "\"use strict\"; function is_not_gen() {", "}" },
+ { "function test_func() {\"use strict\"; ", "}"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "var yield;",
+ "var foo, yield;",
+ "try { } catch (yield) { }",
+ "function yield() { }",
+ "function foo(yield) { }",
+ "function foo(bar, yield) { }",
+ "yield = 1;",
+ "var foo = yield = 1;",
+ "++yield;",
+ "yield++;",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(ErrorsYield) {
+ const char* context_data[][2] = {
+ { "function * is_gen() {", "}" },
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "yield 2;", // this is legal inside generator
+ "yield * 2;", // this is legal inside generator
+ NULL
+ };
+
+ // Here we cannot assert that there is no error, since there will be without
+ // the kAllowGenerators flag. However, we test that Parser and PreParser
+ // produce the same errors.
+ RunParserSyncTest(context_data, statement_data, kSuccessOrError);
+}
+
+
+TEST(ErrorsNameOfStrictFunction) {
+ // Tests that illegal tokens as names of a strict function produce the correct
+ // errors.
+ const char* context_data[][2] = {
+ { "", ""},
+ { "\"use strict\";", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "function eval() {\"use strict\";}",
+ "function arguments() {\"use strict\";}",
+ "function interface() {\"use strict\";}",
+ "function yield() {\"use strict\";}",
+ // Future reserved words are always illegal
+ "function super() { }",
+ "function super() {\"use strict\";}",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsNameOfStrictFunction) {
+ const char* context_data[][2] = {
+ { "", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "function eval() { }",
+ "function arguments() { }",
+ "function interface() { }",
+ "function yield() { }",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+
+TEST(ErrorsIllegalWordsAsLabelsClassic) {
+ // Using future reserved words as labels is always an error.
+ const char* context_data[][2] = {
+ { "", ""},
+ { "function test_func() {", "}" },
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "super: while(true) { break super; }",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(ErrorsIllegalWordsAsLabelsStrict) {
+ // Tests that illegal tokens as labels produce the correct errors.
+ const char* context_data[][2] = {
+ { "\"use strict\";", "" },
+ { "function test_func() {\"use strict\"; ", "}"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "super: while(true) { break super; }",
+ "interface: while(true) { break interface; }",
+ "yield: while(true) { break yield; }",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsIllegalWordsAsLabels) {
+ // Using eval and arguments as labels is legal even in strict mode.
+ const char* context_data[][2] = {
+ { "", ""},
+ { "function test_func() {", "}" },
+ { "\"use strict\";", "" },
+ { "\"use strict\"; function test_func() {", "}" },
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "mylabel: while(true) { break mylabel; }",
+ "eval: while(true) { break eval; }",
+ "arguments: while(true) { break arguments; }",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(ErrorsParenthesizedLabels) {
+ // Parenthesized identifiers shouldn't be recognized as labels.
+ const char* context_data[][2] = {
+ { "", ""},
+ { "function test_func() {", "}" },
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "(mylabel): while(true) { break mylabel; }",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsParenthesizedDirectivePrologue) {
+ // Parenthesized directive prologue shouldn't be recognized.
+ const char* context_data[][2] = {
+ { "", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "(\"use strict\"); var eval;",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(ErrorsNotAnIdentifierName) {
+ const char* context_data[][2] = {
+ { "", ""},
+ { "\"use strict\";", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "var foo = {}; foo.{;",
+ "var foo = {}; foo.};",
+ "var foo = {}; foo.=;",
+ "var foo = {}; foo.888;",
+ "var foo = {}; foo.-;",
+ "var foo = {}; foo.--;",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsIdentifierNames) {
+ // Keywords etc. are valid as property names.
+ const char* context_data[][2] = {
+ { "", ""},
+ { "\"use strict\";", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "var foo = {}; foo.if;",
+ "var foo = {}; foo.yield;",
+ "var foo = {}; foo.super;",
+ "var foo = {}; foo.interface;",
+ "var foo = {}; foo.eval;",
+ "var foo = {}; foo.arguments;",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(DontRegressPreParserDataSizes) {
+ // These tests make sure that PreParser doesn't start producing less data.
+
+ v8::V8::Initialize();
+
+ int marker;
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+ struct TestCase {
+ const char* program;
+ int symbols;
+ int functions;
+ } test_cases[] = {
+ // Labels, variables and functions are recorded as symbols.
+ {"{label: 42}", 1, 0}, {"{label: 42; label2: 43}", 2, 0},
+ {"var x = 42;", 1, 0}, {"var x = 42, y = 43;", 2, 0},
+ {"function foo() {}", 1, 1}, {"function foo() {} function bar() {}", 2, 2},
+ // Labels, variables and functions insize lazy functions are not recorded.
+ {"function lazy() { var a, b, c; }", 1, 1},
+ {"function lazy() { a: 1; b: 2; c: 3; }", 1, 1},
+ {"function lazy() { function a() {} function b() {} function c() {} }", 1,
+ 1},
+ {NULL, 0, 0}
+ };
+ // Each function adds 5 elements to the preparse function data.
+ const int kDataPerFunction = 5;
+
+ uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
+ for (int i = 0; test_cases[i].program; i++) {
+ const char* program = test_cases[i].program;
+ i::Utf8ToUtf16CharacterStream stream(
+ reinterpret_cast<const i::byte*>(program),
+ static_cast<unsigned>(strlen(program)));
+ i::CompleteParserRecorder log;
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
+ scanner.Initialize(&stream);
+
+ i::PreParser preparser(&scanner, &log, stack_limit);
+ preparser.set_allow_lazy(true);
+ preparser.set_allow_natives_syntax(true);
+ i::PreParser::PreParseResult result = preparser.PreParseProgram();
+ CHECK_EQ(i::PreParser::kPreParseSuccess, result);
+ if (log.symbol_ids() != test_cases[i].symbols) {
+ i::OS::Print(
+ "Expected preparse data for program:\n"
+ "\t%s\n"
+ "to contain %d symbols, however, received %d symbols.\n",
+ program, test_cases[i].symbols, log.symbol_ids());
+ CHECK(false);
+ }
+ if (log.function_position() != test_cases[i].functions * kDataPerFunction) {
+ i::OS::Print(
+ "Expected preparse data for program:\n"
+ "\t%s\n"
+ "to contain %d functions, however, received %d functions.\n",
+ program, test_cases[i].functions,
+ log.function_position() / kDataPerFunction);
+ CHECK(false);
+ }
+ i::ScriptDataImpl data(log.ExtractData());
+ CHECK(!data.has_error());
+ }
+}
+
+
+TEST(FunctionDeclaresItselfStrict) {
+ // Tests that we produce the right kinds of errors when a function declares
+ // itself strict (we cannot produce there errors as soon as we see the
+ // offending identifiers, because we don't know at that point whether the
+ // function is strict or not).
+ const char* context_data[][2] = {
+ {"function eval() {", "}"},
+ {"function arguments() {", "}"},
+ {"function yield() {", "}"},
+ {"function interface() {", "}"},
+ {"function foo(eval) {", "}"},
+ {"function foo(arguments) {", "}"},
+ {"function foo(yield) {", "}"},
+ {"function foo(interface) {", "}"},
+ {"function foo(bar, eval) {", "}"},
+ {"function foo(bar, arguments) {", "}"},
+ {"function foo(bar, yield) {", "}"},
+ {"function foo(bar, interface) {", "}"},
+ {"function foo(bar, bar) {", "}"},
+ { NULL, NULL }
+ };
+
+ const char* strict_statement_data[] = {
+ "\"use strict\";",
+ NULL
+ };
+
+ const char* non_strict_statement_data[] = {
+ ";",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, strict_statement_data, kError);
+ RunParserSyncTest(context_data, non_strict_statement_data, kSuccess);
+}
+
+
+TEST(ErrorsTryWithoutCatchOrFinally) {
+ const char* context_data[][2] = {
+ {"", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "try { }",
+ "try { } foo();",
+ "try { } catch (e) foo();",
+ "try { } catch { }",
+ "try { } finally foo();",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsTryCatchFinally) {
+ const char* context_data[][2] = {
+ {"", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "try { } catch (e) { }",
+ "try { } catch (e) { } finally { }",
+ "try { } finally { }",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(ErrorsRegexpLiteral) {
+ const char* context_data[][2] = {
+ {"var r = ", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "/unterminated",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsRegexpLiteral) {
+ const char* context_data[][2] = {
+ {"var r = ", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "/foo/",
+ "/foo/g",
+ "/foo/whatever", // This is an error but not detected by the parser.
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(Intrinsics) {
+ const char* context_data[][2] = {
+ {"", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "%someintrinsic(arg)",
+ NULL
+ };
+
+ // Parsing will fail or succeed depending on whether we allow natives syntax
+ // or not.
+ RunParserSyncTest(context_data, statement_data, kSuccessOrError);
+}
+
+
+TEST(NoErrorsNewExpression) {
+ const char* context_data[][2] = {
+ {"", ""},
+ {"var f =", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "new foo",
+ "new foo();",
+ "new foo(1);",
+ "new foo(1, 2);",
+ // The first () will be processed as a part of the NewExpression and the
+ // second () will be processed as part of LeftHandSideExpression.
+ "new foo()();",
+ // The first () will be processed as a part of the inner NewExpression and
+ // the second () will be processed as a part of the outer NewExpression.
+ "new new foo()();",
+ "new foo.bar;",
+ "new foo.bar();",
+ "new foo.bar.baz;",
+ "new foo.bar().baz;",
+ "new foo[bar];",
+ "new foo[bar]();",
+ "new foo[bar][baz];",
+ "new foo[bar]()[baz];",
+ "new foo[bar].baz(baz)()[bar].baz;",
+ "new \"foo\"", // Runtime error
+ "new 1", // Runtime error
+ "new foo++",
+ // This even runs:
+ "(new new Function(\"this.x = 1\")).x;",
+ "new new Test_Two(String, 2).v(0123).length;",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(ErrorsNewExpression) {
+ const char* context_data[][2] = {
+ {"", ""},
+ {"var f =", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "new foo bar",
+ "new ) foo",
+ "new ++foo",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index 36ad487079..b9f8bafe4d 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -53,6 +53,12 @@ using namespace ::v8::internal;
do { \
ASM("str %%sp, %0" : "=g" (sp_addr)); \
} while (0)
+#elif defined(__AARCH64EL__)
+#define GET_STACK_POINTER() \
+ static int sp_addr = 0; \
+ do { \
+ ASM("mov x16, sp; str x16, %0" : "=g" (sp_addr)); \
+ } while (0)
#elif defined(__MIPSEL__)
#define GET_STACK_POINTER() \
static int sp_addr = 0; \
@@ -72,9 +78,10 @@ void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(StackAlignment) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
global_template->Set(v8_str("get_stack_pointer"),
- v8::FunctionTemplate::New(GetStackPointer));
+ v8::FunctionTemplate::New(isolate, GetStackPointer));
LocalContext env(NULL, global_template);
CompileRun(
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 47146ecc48..c3198b1512 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -29,6 +29,7 @@
#include "v8.h"
#include "profile-generator-inl.h"
+#include "profiler-extension.h"
#include "cctest.h"
#include "cpu-profiler.h"
#include "../include/v8-profiler.h"
@@ -47,42 +48,42 @@ using i::Vector;
TEST(ProfileNodeFindOrAddChild) {
ProfileTree tree;
- ProfileNode node(&tree, NULL);
+ ProfileNode* node = tree.root();
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
- ProfileNode* childNode1 = node.FindOrAddChild(&entry1);
+ ProfileNode* childNode1 = node->FindOrAddChild(&entry1);
CHECK_NE(NULL, childNode1);
- CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
+ CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
- ProfileNode* childNode2 = node.FindOrAddChild(&entry2);
+ ProfileNode* childNode2 = node->FindOrAddChild(&entry2);
CHECK_NE(NULL, childNode2);
CHECK_NE(childNode1, childNode2);
- CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
- CHECK_EQ(childNode2, node.FindOrAddChild(&entry2));
+ CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
+ CHECK_EQ(childNode2, node->FindOrAddChild(&entry2));
CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
- ProfileNode* childNode3 = node.FindOrAddChild(&entry3);
+ ProfileNode* childNode3 = node->FindOrAddChild(&entry3);
CHECK_NE(NULL, childNode3);
CHECK_NE(childNode1, childNode3);
CHECK_NE(childNode2, childNode3);
- CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
- CHECK_EQ(childNode2, node.FindOrAddChild(&entry2));
- CHECK_EQ(childNode3, node.FindOrAddChild(&entry3));
+ CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
+ CHECK_EQ(childNode2, node->FindOrAddChild(&entry2));
+ CHECK_EQ(childNode3, node->FindOrAddChild(&entry3));
}
TEST(ProfileNodeFindOrAddChildForSameFunction) {
const char* aaa = "aaa";
ProfileTree tree;
- ProfileNode node(&tree, NULL);
+ ProfileNode* node = tree.root();
CodeEntry entry1(i::Logger::FUNCTION_TAG, aaa);
- ProfileNode* childNode1 = node.FindOrAddChild(&entry1);
+ ProfileNode* childNode1 = node->FindOrAddChild(&entry1);
CHECK_NE(NULL, childNode1);
- CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
+ CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
// The same function again.
CodeEntry entry2(i::Logger::FUNCTION_TAG, aaa);
- CHECK_EQ(childNode1, node.FindOrAddChild(&entry2));
+ CHECK_EQ(childNode1, node->FindOrAddChild(&entry2));
// Now with a different security token.
CodeEntry entry3(i::Logger::FUNCTION_TAG, aaa);
- CHECK_EQ(childNode1, node.FindOrAddChild(&entry3));
+ CHECK_EQ(childNode1, node->FindOrAddChild(&entry3));
}
@@ -400,7 +401,7 @@ class TestSetup {
TEST(RecordTickSample) {
TestSetup test_setup;
CpuProfilesCollection profiles(CcTest::heap());
- profiles.StartProfiling("", 1, false);
+ profiles.StartProfiling("", false);
ProfileGenerator generator(&profiles);
CodeEntry* entry1 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* entry2 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "bbb");
@@ -466,7 +467,7 @@ static void CheckNodeIds(ProfileNode* node, int* expectedId) {
TEST(SampleIds) {
TestSetup test_setup;
CpuProfilesCollection profiles(CcTest::heap());
- profiles.StartProfiling("", 1, true);
+ profiles.StartProfiling("", true);
ProfileGenerator generator(&profiles);
CodeEntry* entry1 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* entry2 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "bbb");
@@ -514,7 +515,7 @@ TEST(SampleIds) {
TEST(NoSamples) {
TestSetup test_setup;
CpuProfilesCollection profiles(CcTest::heap());
- profiles.StartProfiling("", 1, false);
+ profiles.StartProfiling("", false);
ProfileGenerator generator(&profiles);
CodeEntry* entry1 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "aaa");
generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
@@ -536,60 +537,6 @@ TEST(NoSamples) {
}
-// --- P r o f i l e r E x t e n s i o n ---
-
-class ProfilerExtension : public v8::Extension {
- public:
- ProfilerExtension() : v8::Extension("v8/profiler", kSource) { }
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<v8::String> name);
- static void StartProfiling(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void StopProfiling(const v8::FunctionCallbackInfo<v8::Value>& args);
- private:
- static const char* kSource;
-};
-
-
-const char* ProfilerExtension::kSource =
- "native function startProfiling();"
- "native function stopProfiling();";
-
-v8::Handle<v8::FunctionTemplate> ProfilerExtension::GetNativeFunction(
- v8::Handle<v8::String> name) {
- if (name->Equals(v8::String::New("startProfiling"))) {
- return v8::FunctionTemplate::New(ProfilerExtension::StartProfiling);
- } else if (name->Equals(v8::String::New("stopProfiling"))) {
- return v8::FunctionTemplate::New(ProfilerExtension::StopProfiling);
- } else {
- CHECK(false);
- return v8::Handle<v8::FunctionTemplate>();
- }
-}
-
-
-void ProfilerExtension::StartProfiling(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::CpuProfiler* cpu_profiler = args.GetIsolate()->GetCpuProfiler();
- if (args.Length() > 0)
- cpu_profiler->StartCpuProfiling(args[0].As<v8::String>());
- else
- cpu_profiler->StartCpuProfiling(v8::String::New(""));
-}
-
-
-void ProfilerExtension::StopProfiling(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::CpuProfiler* cpu_profiler = args.GetIsolate()->GetCpuProfiler();
- if (args.Length() > 0)
- cpu_profiler->StopCpuProfiling(args[0].As<v8::String>());
- else
- cpu_profiler->StopCpuProfiling(v8::String::New(""));
-}
-
-
-static ProfilerExtension kProfilerExtension;
-v8::DeclareExtension kProfilerExtensionDeclaration(&kProfilerExtension);
-
static const ProfileNode* PickChild(const ProfileNode* parent,
const char* name) {
for (int i = 0; i < parent->children()->length(); ++i) {
@@ -605,12 +552,9 @@ TEST(RecordStackTraceAtStartProfiling) {
// don't appear in the stack trace.
i::FLAG_use_inlining = false;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- const char* extensions[] = { "v8/profiler" };
- v8::ExtensionConfiguration config(1, extensions);
- v8::Local<v8::Context> context = v8::Context::New(isolate, &config);
- context->Enter();
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
CpuProfiler* profiler = CcTest::i_isolate()->cpu_profiler();
CHECK_EQ(0, profiler->GetProfilesCount());
@@ -658,12 +602,10 @@ TEST(Issue51919) {
for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i) {
i::Vector<char> title = i::Vector<char>::New(16);
i::OS::SNPrintF(title, "%d", i);
- // UID must be > 0.
- CHECK(collection.StartProfiling(title.start(), i + 1, false));
+ CHECK(collection.StartProfiling(title.start(), false));
titles[i] = title.start();
}
- CHECK(!collection.StartProfiling(
- "maximum", CpuProfilesCollection::kMaxSimultaneousProfiles + 1, false));
+ CHECK(!collection.StartProfiling("maximum", false));
for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i)
i::DeleteArray(titles[i]);
}
@@ -673,7 +615,7 @@ static const v8::CpuProfileNode* PickChild(const v8::CpuProfileNode* parent,
const char* name) {
for (int i = 0; i < parent->GetChildrenCount(); ++i) {
const v8::CpuProfileNode* child = parent->GetChild(i);
- v8::String::AsciiValue function_name(child->GetFunctionName());
+ v8::String::Utf8Value function_name(child->GetFunctionName());
if (strcmp(*function_name, name) == 0) return child;
}
return NULL;
@@ -685,23 +627,24 @@ TEST(ProfileNodeScriptId) {
// don't appear in the stack trace.
i::FLAG_use_inlining = false;
- const char* extensions[] = { "v8/profiler" };
- v8::ExtensionConfiguration config(1, extensions);
- LocalContext env(&config);
- v8::HandleScope hs(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
v8::CpuProfiler* profiler = env->GetIsolate()->GetCpuProfiler();
- CHECK_EQ(0, profiler->GetProfileCount());
- v8::Handle<v8::Script> script_a = v8::Script::Compile(v8::String::New(
- "function a() { startProfiling(); }\n"));
+ i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
+ CHECK_EQ(0, iprofiler->GetProfilesCount());
+ v8::Handle<v8::Script> script_a = v8::Script::Compile(v8::String::NewFromUtf8(
+ env->GetIsolate(), "function a() { startProfiling(); }\n"));
script_a->Run();
- v8::Handle<v8::Script> script_b = v8::Script::Compile(v8::String::New(
- "function b() { a(); }\n"
- "b();\n"
- "stopProfiling();\n"));
+ v8::Handle<v8::Script> script_b =
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ "function b() { a(); }\n"
+ "b();\n"
+ "stopProfiling();\n"));
script_b->Run();
- CHECK_EQ(1, profiler->GetProfileCount());
- const v8::CpuProfile* profile = profiler->GetCpuProfile(0);
+ CHECK_EQ(1, iprofiler->GetProfilesCount());
+ const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
const v8::CpuProfileNode* current = profile->GetTopDownRoot();
reinterpret_cast<ProfileNode*>(
const_cast<v8::CpuProfileNode*>(current))->Print(0);
@@ -786,29 +729,31 @@ TEST(LineNumber) {
TEST(BailoutReason) {
- const char* extensions[] = { "v8/profiler" };
- v8::ExtensionConfiguration config(1, extensions);
- LocalContext env(&config);
- v8::HandleScope hs(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
v8::CpuProfiler* profiler = env->GetIsolate()->GetCpuProfiler();
- CHECK_EQ(0, profiler->GetProfileCount());
- v8::Handle<v8::Script> script = v8::Script::Compile(v8::String::New(
- "function TryCatch() {\n"
- " try {\n"
- " startProfiling();\n"
- " } catch (e) { };\n"
- "}\n"
- "function TryFinally() {\n"
- " try {\n"
- " TryCatch();\n"
- " } finally { };\n"
- "}\n"
- "TryFinally();\n"
- "stopProfiling();"));
+ i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
+ CHECK_EQ(0, iprofiler->GetProfilesCount());
+ v8::Handle<v8::Script> script =
+ v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
+ "function TryCatch() {\n"
+ " try {\n"
+ " startProfiling();\n"
+ " } catch (e) { };\n"
+ "}\n"
+ "function TryFinally() {\n"
+ " try {\n"
+ " TryCatch();\n"
+ " } finally { };\n"
+ "}\n"
+ "TryFinally();\n"
+ "stopProfiling();"));
script->Run();
- CHECK_EQ(1, profiler->GetProfileCount());
- const v8::CpuProfile* profile = profiler->GetCpuProfile(0);
+ CHECK_EQ(1, iprofiler->GetProfilesCount());
+ const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
+ CHECK(profile);
const v8::CpuProfileNode* current = profile->GetTopDownRoot();
reinterpret_cast<ProfileNode*>(
const_cast<v8::CpuProfileNode*>(current))->Print(0);
diff --git a/deps/v8/test/cctest/test-random.cc b/deps/v8/test/cctest/test-random.cc
deleted file mode 100644
index ea1f36f24c..0000000000
--- a/deps/v8/test/cctest/test-random.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cctest.h"
-#include "compiler.h"
-#include "execution.h"
-#include "isolate.h"
-
-
-using namespace v8::internal;
-
-
-void SetSeeds(Handle<ByteArray> seeds, uint32_t state0, uint32_t state1) {
- for (int i = 0; i < 4; i++) {
- seeds->set(i, static_cast<byte>(state0 >> (i * kBitsPerByte)));
- seeds->set(i + 4, static_cast<byte>(state1 >> (i * kBitsPerByte)));
- }
-}
-
-
-void TestSeeds(Handle<JSFunction> fun,
- Handle<Context> context,
- uint32_t state0,
- uint32_t state1) {
- bool has_pending_exception;
- Handle<JSObject> global(context->global_object());
- Handle<ByteArray> seeds(context->random_seed());
-
- SetSeeds(seeds, state0, state1);
- Handle<Object> value = Execution::Call(
- context->GetIsolate(), fun, global, 0, NULL, &has_pending_exception);
- CHECK(value->IsHeapNumber());
- CHECK(fun->IsOptimized());
- double crankshaft_value = HeapNumber::cast(*value)->value();
-
- SetSeeds(seeds, state0, state1);
- V8::FillHeapNumberWithRandom(*value, *context);
- double runtime_value = HeapNumber::cast(*value)->value();
- CHECK_EQ(runtime_value, crankshaft_value);
-}
-
-
-TEST(CrankshaftRandom) {
- v8::V8::Initialize();
- // Skip test if crankshaft is disabled.
- if (!CcTest::i_isolate()->use_crankshaft()) return;
- v8::Isolate* v8_isolate = CcTest::isolate();
- v8::HandleScope scope(v8_isolate);
- v8::Context::Scope context_scope(v8::Context::New(v8_isolate));
-
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- Handle<Context> context(isolate->context());
- Handle<JSObject> global(context->global_object());
- Handle<ByteArray> seeds(context->random_seed());
- bool has_pending_exception;
-
- CompileRun("function f() { return Math.random(); }");
-
- Object* string = CcTest::i_isolate()->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("f"))->ToObjectChecked();
- MaybeObject* fun_object =
- context->global_object()->GetProperty(String::cast(string));
- Handle<JSFunction> fun(JSFunction::cast(fun_object->ToObjectChecked()));
-
- // Optimize function.
- Execution::Call(isolate, fun, global, 0, NULL, &has_pending_exception);
- Execution::Call(isolate, fun, global, 0, NULL, &has_pending_exception);
- if (!fun->IsOptimized()) fun->MarkForLazyRecompilation();
-
- // Test with some random values.
- TestSeeds(fun, context, 0xC0C0AFFE, 0x31415926);
- TestSeeds(fun, context, 0x01020304, 0xFFFFFFFF);
- TestSeeds(fun, context, 0x00000001, 0x00000001);
-}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index cc946464b2..b3d62f4717 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -49,6 +49,11 @@
#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
+#if V8_TARGET_ARCH_A64
+#include "a64/assembler-a64.h"
+#include "a64/macro-assembler-a64.h"
+#include "a64/regexp-macro-assembler-a64.h"
+#endif
#if V8_TARGET_ARCH_MIPS
#include "mips/assembler-mips.h"
#include "mips/macro-assembler-mips.h"
@@ -132,7 +137,7 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
#define CHECK_PARSE_ERROR(input) CHECK(!CheckParse(input))
-#define CHECK_PARSE_EQ(input, expected) CHECK_EQ(expected, *Parse(input))
+#define CHECK_PARSE_EQ(input, expected) CHECK_EQ(expected, Parse(input).get())
#define CHECK_SIMPLE(input, simple) CHECK_EQ(simple, CheckSimple(input));
#define CHECK_MIN_MAX(input, min, max) \
{ MinMaxPair min_max = CheckMinMaxMatch(input); \
@@ -399,7 +404,7 @@ static void ExpectError(const char* input,
CHECK(result.tree == NULL);
CHECK(!result.error.is_null());
SmartArrayPointer<char> str = result.error->ToCString(ALLOW_NULLS);
- CHECK_EQ(expected, *str);
+ CHECK_EQ(expected, str.get());
}
@@ -430,7 +435,7 @@ TEST(Errors) {
accumulator.Add("()");
}
SmartArrayPointer<const char> many_captures(accumulator.ToCString());
- ExpectError(*many_captures, kTooManyCaptures);
+ ExpectError(many_captures.get(), kTooManyCaptures);
}
@@ -444,27 +449,15 @@ static bool NotDigit(uc16 c) {
}
-static bool IsWhiteSpace(uc16 c) {
- switch (c) {
- case 0x09:
- case 0x0A:
- case 0x0B:
- case 0x0C:
- case 0x0d:
- case 0x20:
- case 0xA0:
- case 0x2028:
- case 0x2029:
- case 0xFEFF:
- return true;
- default:
- return unibrow::Space::Is(c);
- }
+static bool IsWhiteSpaceOrLineTerminator(uc16 c) {
+ // According to ECMA 5.1, 15.10.2.12 the CharacterClassEscape \s includes
+ // WhiteSpace (7.2) and LineTerminator (7.3) values.
+ return v8::internal::WhiteSpaceOrLineTerminator::Is(c);
}
-static bool NotWhiteSpace(uc16 c) {
- return !IsWhiteSpace(c);
+static bool NotWhiteSpaceNorLineTermiantor(uc16 c) {
+ return !IsWhiteSpaceOrLineTerminator(c);
}
@@ -494,8 +487,8 @@ TEST(CharacterClassEscapes) {
TestCharacterClassEscapes('.', IsRegExpNewline);
TestCharacterClassEscapes('d', IsDigit);
TestCharacterClassEscapes('D', NotDigit);
- TestCharacterClassEscapes('s', IsWhiteSpace);
- TestCharacterClassEscapes('S', NotWhiteSpace);
+ TestCharacterClassEscapes('s', IsWhiteSpaceOrLineTerminator);
+ TestCharacterClassEscapes('S', NotWhiteSpaceNorLineTermiantor);
TestCharacterClassEscapes('w', IsRegExpWord);
TestCharacterClassEscapes('W', NotWord);
}
@@ -539,7 +532,6 @@ static void Execute(const char* input,
#ifdef DEBUG
if (dot_output) {
RegExpEngine::DotPrint(input, node, false);
- exit(0);
}
#endif // DEBUG
}
@@ -702,6 +694,8 @@ typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler;
typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_A64
+typedef RegExpMacroAssemblerA64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#endif
diff --git a/deps/v8/test/cctest/test-reloc-info.cc b/deps/v8/test/cctest/test-reloc-info.cc
index e638201db2..5ab9e803c2 100644
--- a/deps/v8/test/cctest/test-reloc-info.cc
+++ b/deps/v8/test/cctest/test-reloc-info.cc
@@ -47,8 +47,8 @@ TEST(Positions) {
const int buffer_size = code_size + relocation_info_size;
SmartArrayPointer<byte> buffer(new byte[buffer_size]);
- byte* pc = *buffer;
- byte* buffer_end = *buffer + buffer_size;
+ byte* pc = buffer.get();
+ byte* buffer_end = buffer.get() + buffer_size;
RelocInfoWriter writer(buffer_end, pc);
byte* relocation_info_end = buffer_end - relocation_info_size;
@@ -60,13 +60,13 @@ TEST(Positions) {
}
relocation_info_size = static_cast<int>(buffer_end - writer.pos());
- CodeDesc desc = { *buffer, buffer_size, code_size,
+ CodeDesc desc = { buffer.get(), buffer_size, code_size,
relocation_info_size, NULL };
// Read only (non-statement) positions.
{
RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::POSITION));
- pc = *buffer;
+ pc = buffer.get();
for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
RelocInfo::Mode mode = (i % 2 == 0) ?
RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
@@ -83,7 +83,7 @@ TEST(Positions) {
// Read only statement positions.
{
RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION));
- pc = *buffer;
+ pc = buffer.get();
for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
RelocInfo::Mode mode = (i % 2 == 0) ?
RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
@@ -100,7 +100,7 @@ TEST(Positions) {
// Read both types of positions.
{
RelocIterator it(desc, RelocInfo::kPositionMask);
- pc = *buffer;
+ pc = buffer.get();
for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
RelocInfo::Mode mode = (i % 2 == 0) ?
RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
diff --git a/deps/v8/test/cctest/test-representation.cc b/deps/v8/test/cctest/test-representation.cc
new file mode 100644
index 0000000000..95a65cbbf7
--- /dev/null
+++ b/deps/v8/test/cctest/test-representation.cc
@@ -0,0 +1,128 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "cctest.h"
+#include "types.h"
+#include "property-details.h"
+
+using namespace v8::internal;
+
+
+void TestPairPositive(Representation more_general,
+ Representation less_general) {
+ CHECK(more_general.is_more_general_than(less_general));
+}
+
+
+void TestPairNegative(Representation more_general,
+ Representation less_general) {
+ CHECK(!more_general.is_more_general_than(less_general));
+}
+
+
+TEST(RepresentationMoreGeneralThan) {
+ TestPairNegative(Representation::None(), Representation::None());
+ TestPairPositive(Representation::Integer8(), Representation::None());
+ TestPairPositive(Representation::UInteger8(), Representation::None());
+ TestPairPositive(Representation::Integer16(), Representation::None());
+ TestPairPositive(Representation::UInteger16(), Representation::None());
+ TestPairPositive(Representation::Smi(), Representation::None());
+ TestPairPositive(Representation::Integer32(), Representation::None());
+ TestPairPositive(Representation::HeapObject(), Representation::None());
+ TestPairPositive(Representation::Double(), Representation::None());
+ TestPairPositive(Representation::Tagged(), Representation::None());
+
+ TestPairNegative(Representation::None(), Representation::Integer8());
+ TestPairNegative(Representation::Integer8(), Representation::Integer8());
+ TestPairNegative(Representation::UInteger8(), Representation::Integer8());
+ TestPairPositive(Representation::Integer16(), Representation::Integer8());
+ TestPairPositive(Representation::UInteger16(), Representation::Integer8());
+ TestPairPositive(Representation::Smi(), Representation::Integer8());
+ TestPairPositive(Representation::Integer32(), Representation::Integer8());
+ TestPairNegative(Representation::HeapObject(), Representation::Integer8());
+ TestPairPositive(Representation::Double(), Representation::Integer8());
+ TestPairPositive(Representation::Tagged(), Representation::Integer8());
+
+ TestPairNegative(Representation::None(), Representation::UInteger8());
+ TestPairNegative(Representation::Integer8(), Representation::UInteger8());
+ TestPairNegative(Representation::UInteger8(), Representation::UInteger8());
+ TestPairPositive(Representation::Integer16(), Representation::UInteger8());
+ TestPairPositive(Representation::UInteger16(), Representation::UInteger8());
+ TestPairPositive(Representation::Smi(), Representation::UInteger8());
+ TestPairPositive(Representation::Integer32(), Representation::UInteger8());
+ TestPairNegative(Representation::HeapObject(), Representation::UInteger8());
+ TestPairPositive(Representation::Double(), Representation::UInteger8());
+ TestPairPositive(Representation::Tagged(), Representation::UInteger8());
+
+ TestPairNegative(Representation::None(), Representation::Integer16());
+ TestPairNegative(Representation::Integer8(), Representation::Integer16());
+ TestPairNegative(Representation::UInteger8(), Representation::Integer16());
+ TestPairNegative(Representation::Integer16(), Representation::Integer16());
+ TestPairNegative(Representation::UInteger16(), Representation::Integer16());
+ TestPairPositive(Representation::Smi(), Representation::Integer16());
+ TestPairPositive(Representation::Integer32(), Representation::Integer16());
+ TestPairNegative(Representation::HeapObject(), Representation::Integer16());
+ TestPairPositive(Representation::Double(), Representation::Integer16());
+ TestPairPositive(Representation::Tagged(), Representation::Integer16());
+
+ TestPairNegative(Representation::None(), Representation::UInteger16());
+ TestPairNegative(Representation::Integer8(), Representation::UInteger16());
+ TestPairNegative(Representation::UInteger8(), Representation::UInteger16());
+ TestPairNegative(Representation::Integer16(), Representation::UInteger16());
+ TestPairNegative(Representation::UInteger16(), Representation::UInteger16());
+ TestPairPositive(Representation::Smi(), Representation::UInteger16());
+ TestPairPositive(Representation::Integer32(), Representation::UInteger16());
+ TestPairNegative(Representation::HeapObject(), Representation::UInteger16());
+ TestPairPositive(Representation::Double(), Representation::UInteger16());
+ TestPairPositive(Representation::Tagged(), Representation::UInteger16());
+
+ TestPairNegative(Representation::None(), Representation::Smi());
+ TestPairNegative(Representation::Integer8(), Representation::Smi());
+ TestPairNegative(Representation::UInteger8(), Representation::Smi());
+ TestPairNegative(Representation::Integer16(), Representation::Smi());
+ TestPairNegative(Representation::UInteger16(), Representation::Smi());
+ TestPairNegative(Representation::Smi(), Representation::Smi());
+ TestPairPositive(Representation::Integer32(), Representation::Smi());
+ TestPairNegative(Representation::HeapObject(), Representation::Smi());
+ TestPairPositive(Representation::Double(), Representation::Smi());
+ TestPairPositive(Representation::Tagged(), Representation::Smi());
+
+ TestPairNegative(Representation::None(), Representation::Integer32());
+ TestPairNegative(Representation::Integer8(), Representation::Integer32());
+ TestPairNegative(Representation::UInteger8(), Representation::Integer32());
+ TestPairNegative(Representation::Integer16(), Representation::Integer32());
+ TestPairNegative(Representation::UInteger16(), Representation::Integer32());
+ TestPairNegative(Representation::Smi(), Representation::Integer32());
+ TestPairNegative(Representation::Integer32(), Representation::Integer32());
+ TestPairNegative(Representation::HeapObject(), Representation::Integer32());
+ TestPairPositive(Representation::Double(), Representation::Integer32());
+ TestPairPositive(Representation::Tagged(), Representation::Integer32());
+
+ TestPairNegative(Representation::None(), Representation::External());
+ TestPairNegative(Representation::External(), Representation::External());
+ TestPairPositive(Representation::External(), Representation::None());
+}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 4132d2d4cf..c01a6889c3 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -347,7 +347,7 @@ DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
env->Enter();
const char* c_source = "\"1234\".length";
- v8::Local<v8::String> source = v8::String::New(c_source);
+ v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
v8::Local<v8::Script> script = v8::Script::Compile(source);
CHECK_EQ(4, script->Run()->Int32Value());
}
@@ -365,7 +365,7 @@ DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
env->Enter();
const char* c_source = "\"1234\".length";
- v8::Local<v8::String> source = v8::String::New(c_source);
+ v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
v8::Local<v8::Script> script = v8::Script::Compile(source);
CHECK_EQ(4, script->Run()->Int32Value());
}
@@ -402,7 +402,7 @@ TEST(PartialSerialization) {
Object* raw_foo;
{
v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::String> foo = v8::String::New("foo");
+ v8::Local<v8::String> foo = v8::String::NewFromUtf8(v8_isolate, "foo");
ASSERT(!foo.IsEmpty());
raw_foo = *(v8::Utils::OpenHandle(*foo));
}
@@ -415,7 +415,7 @@ TEST(PartialSerialization) {
v8::HandleScope handle_scope(v8_isolate);
v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
}
- env.Dispose();
+ env.Reset();
FileByteSink startup_sink(startup_name.start());
StartupSerializer startup_serializer(isolate, &startup_sink);
@@ -562,7 +562,7 @@ TEST(ContextSerialization) {
i::Object* raw_context = *v8::Utils::OpenPersistent(env);
- env.Dispose();
+ env.Reset();
FileByteSink startup_sink(startup_name.start());
StartupSerializer startup_serializer(isolate, &startup_sink);
diff --git a/deps/v8/test/cctest/test-spaces.cc b/deps/v8/test/cctest/test-spaces.cc
index 73710658a2..223912e37b 100644
--- a/deps/v8/test/cctest/test-spaces.cc
+++ b/deps/v8/test/cctest/test-spaces.cc
@@ -327,9 +327,9 @@ TEST(NewSpace) {
CcTest::heap()->ReservedSemiSpaceSize()));
CHECK(new_space.HasBeenSetUp());
- while (new_space.Available() >= Page::kMaxNonCodeHeapObjectSize) {
+ while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
Object* obj =
- new_space.AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->
+ new_space.AllocateRaw(Page::kMaxRegularHeapObjectSize)->
ToObjectUnchecked();
CHECK(new_space.Contains(HeapObject::cast(obj)));
}
@@ -359,7 +359,7 @@ TEST(OldSpace) {
CHECK(s->SetUp());
while (s->Available() > 0) {
- s->AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->ToObjectUnchecked();
+ s->AllocateRaw(Page::kMaxRegularHeapObjectSize)->ToObjectUnchecked();
}
s->TearDown();
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 4aa74a8191..4b31e614d5 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -38,7 +38,6 @@
#include "factory.h"
#include "objects.h"
#include "cctest.h"
-#include "zone-inl.h"
// Adapted from http://en.wikipedia.org/wiki/Multiply-with-carry
class MyRandomNumberGenerator {
@@ -100,12 +99,10 @@ static const int DEEP_DEPTH = 8 * 1024;
static const int SUPER_DEEP_DEPTH = 80 * 1024;
-class Resource: public v8::String::ExternalStringResource,
- public ZoneObject {
+class Resource: public v8::String::ExternalStringResource {
public:
- explicit Resource(Vector<const uc16> string): data_(string.start()) {
- length_ = string.length();
- }
+ Resource(const uc16* data, size_t length): data_(data), length_(length) {}
+ ~Resource() { i::DeleteArray(data_); }
virtual const uint16_t* data() const { return data_; }
virtual size_t length() const { return length_; }
@@ -115,12 +112,11 @@ class Resource: public v8::String::ExternalStringResource,
};
-class AsciiResource: public v8::String::ExternalAsciiStringResource,
- public ZoneObject {
+class AsciiResource: public v8::String::ExternalAsciiStringResource {
public:
- explicit AsciiResource(Vector<const char> string): data_(string.start()) {
- length_ = string.length();
- }
+ AsciiResource(const char* data, size_t length)
+ : data_(data), length_(length) {}
+ ~AsciiResource() { i::DeleteArray(data_); }
virtual const char* data() const { return data_; }
virtual size_t length() const { return length_; }
@@ -133,8 +129,7 @@ class AsciiResource: public v8::String::ExternalAsciiStringResource,
static void InitializeBuildingBlocks(Handle<String>* building_blocks,
int bb_length,
bool long_blocks,
- MyRandomNumberGenerator* rng,
- Zone* zone) {
+ MyRandomNumberGenerator* rng) {
// A list of pointers that we don't have any interest in cleaning up.
// If they are reachable from a root then leak detection won't complain.
Isolate* isolate = CcTest::i_isolate();
@@ -189,25 +184,28 @@ static void InitializeBuildingBlocks(Handle<String>* building_blocks,
break;
}
case 2: {
- uc16* buf = zone->NewArray<uc16>(len);
+ uc16* buf = NewArray<uc16>(len);
for (int j = 0; j < len; j++) {
buf[j] = rng->next(0x10000);
}
- Resource* resource = new(zone) Resource(Vector<const uc16>(buf, len));
- building_blocks[i] = factory->NewExternalStringFromTwoByte(resource);
+ Resource* resource = new Resource(buf, len);
+ building_blocks[i] =
+ v8::Utils::OpenHandle(
+ *v8::String::NewExternal(CcTest::isolate(), resource));
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
break;
}
case 3: {
- char* buf = zone->NewArray<char>(len);
+ char* buf = NewArray<char>(len);
for (int j = 0; j < len; j++) {
buf[j] = rng->next(0x80);
}
- AsciiResource* resource =
- new(zone) AsciiResource(Vector<const char>(buf, len));
- building_blocks[i] = factory->NewExternalStringFromAscii(resource);
+ AsciiResource* resource = new AsciiResource(buf, len);
+ building_blocks[i] =
+ v8::Utils::OpenHandle(
+ *v8::String::NewExternal(CcTest::isolate(), resource));
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
@@ -263,7 +261,7 @@ void ConsStringStats::VerifyEqual(const ConsStringStats& that) const {
class ConsStringGenerationData {
public:
static const int kNumberOfBuildingBlocks = 256;
- ConsStringGenerationData(bool long_blocks, Zone* zone);
+ explicit ConsStringGenerationData(bool long_blocks);
void Reset();
inline Handle<String> block(int offset);
inline Handle<String> block(uint32_t offset);
@@ -285,11 +283,10 @@ class ConsStringGenerationData {
};
-ConsStringGenerationData::ConsStringGenerationData(bool long_blocks,
- Zone* zone) {
+ConsStringGenerationData::ConsStringGenerationData(bool long_blocks) {
rng_.init();
InitializeBuildingBlocks(
- building_blocks_, kNumberOfBuildingBlocks, long_blocks, &rng_, zone);
+ building_blocks_, kNumberOfBuildingBlocks, long_blocks, &rng_);
empty_string_ = CcTest::heap()->empty_string();
Reset();
}
@@ -571,8 +568,7 @@ TEST(Traverse) {
printf("TestTraverse\n");
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate());
- ConsStringGenerationData data(false, &zone);
+ ConsStringGenerationData data(false);
Handle<String> flat = ConstructBalanced(&data);
FlattenString(flat);
Handle<String> left_asymmetric = ConstructLeft(&data, DEEP_DEPTH);
@@ -661,8 +657,7 @@ void TestStringCharacterStream(BuildString build, int test_cases) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Zone zone(isolate);
- ConsStringGenerationData data(true, &zone);
+ ConsStringGenerationData data(true);
for (int i = 0; i < test_cases; i++) {
printf("%d\n", i);
HandleScope inner_scope(isolate);
@@ -889,9 +884,9 @@ TEST(Utf8Conversion) {
v8::HandleScope handle_scope(CcTest::isolate());
// A simple ascii string
const char* ascii_string = "abcdef12345";
- int len =
- v8::String::New(ascii_string,
- StrLength(ascii_string))->Utf8Length();
+ int len = v8::String::NewFromUtf8(CcTest::isolate(), ascii_string,
+ v8::String::kNormalString,
+ StrLength(ascii_string))->Utf8Length();
CHECK_EQ(StrLength(ascii_string), len);
// A mixed ascii and non-ascii string
// U+02E4 -> CB A4
@@ -906,7 +901,8 @@ TEST(Utf8Conversion) {
// The number of bytes expected to be written for each length
const int lengths[12] = {0, 0, 2, 3, 3, 3, 6, 7, 7, 7, 10, 11};
const int char_lengths[12] = {0, 0, 1, 2, 2, 2, 3, 4, 4, 4, 5, 5};
- v8::Handle<v8::String> mixed = v8::String::New(mixed_string, 5);
+ v8::Handle<v8::String> mixed = v8::String::NewFromTwoByte(
+ CcTest::isolate(), mixed_string, v8::String::kNormalString, 5);
CHECK_EQ(10, mixed->Utf8Length());
// Try encoding the string with all capacities
char buffer[11];
@@ -930,9 +926,6 @@ TEST(Utf8Conversion) {
TEST(ExternalShortStringAdd) {
- Isolate* isolate = CcTest::i_isolate();
- Zone zone(isolate);
-
LocalContext context;
v8::HandleScope handle_scope(CcTest::isolate());
@@ -942,34 +935,34 @@ TEST(ExternalShortStringAdd) {
// Allocate two JavaScript arrays for holding short strings.
v8::Handle<v8::Array> ascii_external_strings =
- v8::Array::New(kMaxLength + 1);
+ v8::Array::New(CcTest::isolate(), kMaxLength + 1);
v8::Handle<v8::Array> non_ascii_external_strings =
- v8::Array::New(kMaxLength + 1);
+ v8::Array::New(CcTest::isolate(), kMaxLength + 1);
// Generate short ascii and non-ascii external strings.
for (int i = 0; i <= kMaxLength; i++) {
- char* ascii = zone.NewArray<char>(i + 1);
+ char* ascii = NewArray<char>(i + 1);
for (int j = 0; j < i; j++) {
ascii[j] = 'a';
}
// Terminating '\0' is left out on purpose. It is not required for external
// string data.
- AsciiResource* ascii_resource =
- new(&zone) AsciiResource(Vector<const char>(ascii, i));
+ AsciiResource* ascii_resource = new AsciiResource(ascii, i);
v8::Local<v8::String> ascii_external_string =
- v8::String::NewExternal(ascii_resource);
+ v8::String::NewExternal(CcTest::isolate(), ascii_resource);
- ascii_external_strings->Set(v8::Integer::New(i), ascii_external_string);
- uc16* non_ascii = zone.NewArray<uc16>(i + 1);
+ ascii_external_strings->Set(v8::Integer::New(CcTest::isolate(), i),
+ ascii_external_string);
+ uc16* non_ascii = NewArray<uc16>(i + 1);
for (int j = 0; j < i; j++) {
non_ascii[j] = 0x1234;
}
// Terminating '\0' is left out on purpose. It is not required for external
// string data.
- Resource* resource = new(&zone) Resource(Vector<const uc16>(non_ascii, i));
+ Resource* resource = new Resource(non_ascii, i);
v8::Local<v8::String> non_ascii_external_string =
- v8::String::NewExternal(resource);
- non_ascii_external_strings->Set(v8::Integer::New(i),
+ v8::String::NewExternal(CcTest::isolate(), resource);
+ non_ascii_external_strings->Set(v8::Integer::New(CcTest::isolate(), i),
non_ascii_external_string);
}
@@ -977,7 +970,8 @@ TEST(ExternalShortStringAdd) {
v8::Handle<v8::Object> global = context->Global();
global->Set(v8_str("external_ascii"), ascii_external_strings);
global->Set(v8_str("external_non_ascii"), non_ascii_external_strings);
- global->Set(v8_str("max_length"), v8::Integer::New(kMaxLength));
+ global->Set(v8_str("max_length"),
+ v8::Integer::New(CcTest::isolate(), kMaxLength));
// Add short external ascii and non-ascii strings checking the result.
static const char* source =
@@ -1019,8 +1013,6 @@ TEST(ExternalShortStringAdd) {
TEST(JSONStringifySliceMadeExternal) {
CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Zone zone(isolate);
// Create a sliced string from a one-byte string. The latter is turned
// into a two-byte external string. Check that JSON.stringify works.
v8::HandleScope handle_scope(CcTest::isolate());
@@ -1034,10 +1026,9 @@ TEST(JSONStringifySliceMadeExternal) {
CHECK(v8::Utils::OpenHandle(*underlying)->IsSeqOneByteString());
int length = underlying->Length();
- uc16* two_byte = zone.NewArray<uc16>(length + 1);
+ uc16* two_byte = NewArray<uc16>(length + 1);
underlying->Write(two_byte);
- Resource* resource =
- new(&zone) Resource(Vector<const uc16>(two_byte, length));
+ Resource* resource = new Resource(two_byte, length);
CHECK(underlying->MakeExternal(resource));
CHECK(v8::Utils::OpenHandle(*slice)->IsSlicedString());
CHECK(v8::Utils::OpenHandle(*underlying)->IsExternalTwoByteString());
@@ -1053,7 +1044,6 @@ TEST(CachedHashOverflow) {
// values didn't fit in the hash field.
// See http://code.google.com/p/v8/issues/detail?id=728
Isolate* isolate = CcTest::i_isolate();
- Zone zone(isolate);
v8::HandleScope handle_scope(CcTest::isolate());
// Lines must be executed sequentially. Combining them into one script
@@ -1083,8 +1073,8 @@ TEST(CachedHashOverflow) {
const char* line;
for (int i = 0; (line = lines[i]); i++) {
printf("%s\n", line);
- v8::Local<v8::Value> result =
- v8::Script::Compile(v8::String::New(line))->Run();
+ v8::Local<v8::Value> result = v8::Script::Compile(
+ v8::String::NewFromUtf8(CcTest::isolate(), line))->Run();
CHECK_EQ(results[i]->IsUndefined(), result->IsUndefined());
CHECK_EQ(results[i]->IsNumber(), result->IsNumber());
if (result->IsNumber()) {
@@ -1174,7 +1164,7 @@ TEST(TrivialSlice) {
CHECK(result->IsString());
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
CHECK(string->IsSlicedString());
- CHECK_EQ("bcdefghijklmnopqrstuvwxy", *(string->ToCString()));
+ CHECK_EQ("bcdefghijklmnopqrstuvwxy", string->ToCString().get());
}
@@ -1196,14 +1186,14 @@ TEST(SliceFromSlice) {
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
CHECK(string->IsSlicedString());
CHECK(SlicedString::cast(*string)->parent()->IsSeqString());
- CHECK_EQ("bcdefghijklmnopqrstuvwxy", *(string->ToCString()));
+ CHECK_EQ("bcdefghijklmnopqrstuvwxy", string->ToCString().get());
result = CompileRun(slice_from_slice);
CHECK(result->IsString());
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
CHECK(string->IsSlicedString());
CHECK(SlicedString::cast(*string)->parent()->IsSeqString());
- CHECK_EQ("cdefghijklmnopqrstuvwx", *(string->ToCString()));
+ CHECK_EQ("cdefghijklmnopqrstuvwx", string->ToCString().get());
}
@@ -1213,7 +1203,7 @@ TEST(AsciiArrayJoin) {
v8::ResourceConstraints constraints;
constraints.set_max_young_space_size(256 * K);
constraints.set_max_old_space_size(4 * K * K);
- v8::SetResourceConstraints(&constraints);
+ v8::SetResourceConstraints(CcTest::isolate(), &constraints);
// String s is made of 2^17 = 131072 'c' characters and a is an array
// starting with 'bad', followed by 2^14 times the string s. That means the
@@ -1230,8 +1220,8 @@ TEST(AsciiArrayJoin) {
v8::HandleScope scope(CcTest::isolate());
LocalContext context;
v8::V8::IgnoreOutOfMemoryException();
- v8::Local<v8::Script> script =
- v8::Script::Compile(v8::String::New(join_causing_out_of_memory));
+ v8::Local<v8::Script> script = v8::Script::Compile(
+ v8::String::NewFromUtf8(CcTest::isolate(), join_causing_out_of_memory));
v8::Local<v8::Value> result = script->Run();
// Check for out of memory state.
@@ -1268,7 +1258,7 @@ TEST(RobustSubStringStub) {
// Ordinary HeapNumbers can be handled (in runtime).
result = CompileRun("%_SubString(short, Math.sqrt(4), 5.1);");
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
- CHECK_EQ("cde", *(string->ToCString()));
+ CHECK_EQ("cde", string->ToCString().get());
CompileRun("var long = 'abcdefghijklmnopqrstuvwxyz';");
// Invalid indices.
@@ -1283,7 +1273,7 @@ TEST(RobustSubStringStub) {
// Ordinary HeapNumbers within bounds can be handled (in runtime).
result = CompileRun("%_SubString(long, Math.sqrt(4), 17.1);");
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
- CHECK_EQ("cdefghijklmnopq", *(string->ToCString()));
+ CHECK_EQ("cdefghijklmnopq", string->ToCString().get());
// Test that out-of-bounds substring of a slice fails when the indices
// would have been valid for the underlying string.
diff --git a/deps/v8/test/cctest/test-symbols.cc b/deps/v8/test/cctest/test-symbols.cc
index a04ffa70c5..6fceea6135 100644
--- a/deps/v8/test/cctest/test-symbols.cc
+++ b/deps/v8/test/cctest/test-symbols.cc
@@ -37,7 +37,7 @@ TEST(Create) {
#endif
}
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
// All symbols should be distinct.
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 13f594096f..569ee95c64 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -51,8 +51,8 @@ void Fail(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Loop(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Handle<v8::String> source =
- v8::String::New("try { doloop(); fail(); } catch(e) { fail(); }");
+ v8::Handle<v8::String> source = v8::String::NewFromUtf8(
+ args.GetIsolate(), "try { doloop(); fail(); } catch(e) { fail(); }");
v8::Handle<v8::Value> result = v8::Script::Compile(source)->Run();
CHECK(result.IsEmpty());
CHECK(v8::V8::IsExecutionTerminating(args.GetIsolate()));
@@ -62,19 +62,20 @@ void Loop(const v8::FunctionCallbackInfo<v8::Value>& args) {
void DoLoop(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Script::Compile(v8::String::New("function f() {"
- " var term = true;"
- " try {"
- " while(true) {"
- " if (term) terminate();"
- " term = false;"
- " }"
- " fail();"
- " } catch(e) {"
- " fail();"
- " }"
- "}"
- "f()"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(args.GetIsolate(),
+ "function f() {"
+ " var term = true;"
+ " try {"
+ " while(true) {"
+ " if (term) terminate();"
+ " term = false;"
+ " }"
+ " fail();"
+ " } catch(e) {"
+ " fail();"
+ " }"
+ "}"
+ "f()"))->Run();
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
@@ -86,11 +87,12 @@ void DoLoop(const v8::FunctionCallbackInfo<v8::Value>& args) {
void DoLoopNoCall(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Script::Compile(v8::String::New("var term = true;"
- "while(true) {"
- " if (term) terminate();"
- " term = false;"
- "}"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(args.GetIsolate(),
+ "var term = true;"
+ "while(true) {"
+ " if (term) terminate();"
+ " term = false;"
+ "}"))->Run();
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
@@ -100,14 +102,18 @@ void DoLoopNoCall(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Handle<v8::ObjectTemplate> CreateGlobalTemplate(
+ v8::Isolate* isolate,
v8::FunctionCallback terminate,
v8::FunctionCallback doloop) {
- v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
- global->Set(v8::String::New("terminate"),
- v8::FunctionTemplate::New(terminate));
- global->Set(v8::String::New("fail"), v8::FunctionTemplate::New(Fail));
- global->Set(v8::String::New("loop"), v8::FunctionTemplate::New(Loop));
- global->Set(v8::String::New("doloop"), v8::FunctionTemplate::New(doloop));
+ v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
+ global->Set(v8::String::NewFromUtf8(isolate, "terminate"),
+ v8::FunctionTemplate::New(isolate, terminate));
+ global->Set(v8::String::NewFromUtf8(isolate, "fail"),
+ v8::FunctionTemplate::New(isolate, Fail));
+ global->Set(v8::String::NewFromUtf8(isolate, "loop"),
+ v8::FunctionTemplate::New(isolate, Loop));
+ global->Set(v8::String::NewFromUtf8(isolate, "doloop"),
+ v8::FunctionTemplate::New(isolate, doloop));
return global;
}
@@ -117,14 +123,14 @@ v8::Handle<v8::ObjectTemplate> CreateGlobalTemplate(
TEST(TerminateOnlyV8ThreadFromThreadItself) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> global =
- CreateGlobalTemplate(TerminateCurrentThread, DoLoop);
+ CreateGlobalTemplate(CcTest::isolate(), TerminateCurrentThread, DoLoop);
v8::Handle<v8::Context> context =
v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
// Run a loop that will be infinite if thread termination does not work.
- v8::Handle<v8::String> source =
- v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
+ v8::Handle<v8::String> source = v8::String::NewFromUtf8(
+ CcTest::isolate(), "try { loop(); fail(); } catch(e) { fail(); }");
v8::Script::Compile(source)->Run();
// Test that we can run the code again after thread termination.
CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
@@ -136,15 +142,15 @@ TEST(TerminateOnlyV8ThreadFromThreadItself) {
// itself in a loop that performs no calls.
TEST(TerminateOnlyV8ThreadFromThreadItselfNoLoop) {
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> global =
- CreateGlobalTemplate(TerminateCurrentThread, DoLoopNoCall);
+ v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(
+ CcTest::isolate(), TerminateCurrentThread, DoLoopNoCall);
v8::Handle<v8::Context> context =
v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
// Run a loop that will be infinite if thread termination does not work.
- v8::Handle<v8::String> source =
- v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
+ v8::Handle<v8::String> source = v8::String::NewFromUtf8(
+ CcTest::isolate(), "try { loop(); fail(); } catch(e) { fail(); }");
v8::Script::Compile(source)->Run();
CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
// Test that we can run the code again after thread termination.
@@ -176,14 +182,15 @@ TEST(TerminateOnlyV8ThreadFromOtherThread) {
thread.Start();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(Signal, DoLoop);
+ v8::Handle<v8::ObjectTemplate> global =
+ CreateGlobalTemplate(CcTest::isolate(), Signal, DoLoop);
v8::Handle<v8::Context> context =
v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
// Run a loop that will be infinite if thread termination does not work.
- v8::Handle<v8::String> source =
- v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
+ v8::Handle<v8::String> source = v8::String::NewFromUtf8(
+ CcTest::isolate(), "try { loop(); fail(); } catch(e) { fail(); }");
v8::Script::Compile(source)->Run();
thread.Join();
@@ -201,8 +208,9 @@ void TerminateOrReturnObject(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::V8::TerminateExecution(args.GetIsolate());
return;
}
- v8::Local<v8::Object> result = v8::Object::New();
- result->Set(v8::String::New("x"), v8::Integer::New(42));
+ v8::Local<v8::Object> result = v8::Object::New(args.GetIsolate());
+ result->Set(v8::String::NewFromUtf8(args.GetIsolate(), "x"),
+ v8::Integer::New(args.GetIsolate(), 42));
args.GetReturnValue().Set(result);
}
@@ -210,17 +218,19 @@ void TerminateOrReturnObject(const v8::FunctionCallbackInfo<v8::Value>& args) {
void LoopGetProperty(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Script::Compile(v8::String::New("function f() {"
- " try {"
- " while(true) {"
- " terminate_or_return_object().x;"
- " }"
- " fail();"
- " } catch(e) {"
- " fail();"
- " }"
- "}"
- "f()"))->Run();
+ v8::Script::Compile(
+ v8::String::NewFromUtf8(args.GetIsolate(),
+ "function f() {"
+ " try {"
+ " while(true) {"
+ " terminate_or_return_object().x;"
+ " }"
+ " fail();"
+ " } catch(e) {"
+ " fail();"
+ " }"
+ "}"
+ "f()"))->Run();
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
@@ -232,25 +242,28 @@ void LoopGetProperty(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Test that we correctly handle termination exceptions if they are
// triggered by the creation of error objects in connection with ICs.
TEST(TerminateLoadICException) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
- global->Set(v8::String::New("terminate_or_return_object"),
- v8::FunctionTemplate::New(TerminateOrReturnObject));
- global->Set(v8::String::New("fail"), v8::FunctionTemplate::New(Fail));
- global->Set(v8::String::New("loop"),
- v8::FunctionTemplate::New(LoopGetProperty));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
+ global->Set(
+ v8::String::NewFromUtf8(isolate, "terminate_or_return_object"),
+ v8::FunctionTemplate::New(isolate, TerminateOrReturnObject));
+ global->Set(v8::String::NewFromUtf8(isolate, "fail"),
+ v8::FunctionTemplate::New(isolate, Fail));
+ global->Set(v8::String::NewFromUtf8(isolate, "loop"),
+ v8::FunctionTemplate::New(isolate, LoopGetProperty));
v8::Handle<v8::Context> context =
- v8::Context::New(CcTest::isolate(), NULL, global);
+ v8::Context::New(isolate, NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
+ CHECK(!v8::V8::IsExecutionTerminating(isolate));
// Run a loop that will be infinite if thread termination does not work.
- v8::Handle<v8::String> source =
- v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
+ v8::Handle<v8::String> source = v8::String::NewFromUtf8(
+ isolate, "try { loop(); fail(); } catch(e) { fail(); }");
call_count = 0;
v8::Script::Compile(source)->Run();
// Test that we can run the code again after thread termination.
- CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
+ CHECK(!v8::V8::IsExecutionTerminating(isolate));
call_count = 0;
v8::Script::Compile(source)->Run();
}
@@ -259,57 +272,66 @@ TEST(TerminateLoadICException) {
void ReenterAfterTermination(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Script::Compile(v8::String::New("function f() {"
- " var term = true;"
- " try {"
- " while(true) {"
- " if (term) terminate();"
- " term = false;"
- " }"
- " fail();"
- " } catch(e) {"
- " fail();"
- " }"
- "}"
- "f()"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(args.GetIsolate(),
+ "function f() {"
+ " var term = true;"
+ " try {"
+ " while(true) {"
+ " if (term) terminate();"
+ " term = false;"
+ " }"
+ " fail();"
+ " } catch(e) {"
+ " fail();"
+ " }"
+ "}"
+ "f()"))->Run();
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
CHECK(v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Script::Compile(v8::String::New("function f() { fail(); } f()"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(args.GetIsolate(),
+ "function f() { fail(); } f()"))
+ ->Run();
}
// Test that reentry into V8 while the termination exception is still pending
// (has not yet unwound the 0-level JS frame) does not crash.
TEST(TerminateAndReenterFromThreadItself) {
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> global =
- CreateGlobalTemplate(TerminateCurrentThread, ReenterAfterTermination);
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(
+ isolate, TerminateCurrentThread, ReenterAfterTermination);
v8::Handle<v8::Context> context =
- v8::Context::New(CcTest::isolate(), NULL, global);
+ v8::Context::New(isolate, NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating());
- v8::Handle<v8::String> source =
- v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
+ v8::Handle<v8::String> source = v8::String::NewFromUtf8(
+ isolate, "try { loop(); fail(); } catch(e) { fail(); }");
v8::Script::Compile(source)->Run();
- CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
+ CHECK(!v8::V8::IsExecutionTerminating(isolate));
// Check we can run JS again after termination.
- CHECK(v8::Script::Compile(v8::String::New("function f() { return true; }"
- "f()"))->Run()->IsTrue());
+ CHECK(v8::Script::Compile(
+ v8::String::NewFromUtf8(isolate,
+ "function f() { return true; }"
+ "f()"))
+ ->Run()
+ ->IsTrue());
}
void DoLoopCancelTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
CHECK(!v8::V8::IsExecutionTerminating());
- v8::Script::Compile(v8::String::New("var term = true;"
- "while(true) {"
- " if (term) terminate();"
- " term = false;"
- "}"
- "fail();"))->Run();
+ v8::Script::Compile(v8::String::NewFromUtf8(args.GetIsolate(),
+ "var term = true;"
+ "while(true) {"
+ " if (term) terminate();"
+ " term = false;"
+ "}"
+ "fail();"))->Run();
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
@@ -326,13 +348,13 @@ void DoLoopCancelTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(TerminateCancelTerminateFromThreadItself) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> global =
- CreateGlobalTemplate(TerminateCurrentThread, DoLoopCancelTerminate);
+ v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(
+ isolate, TerminateCurrentThread, DoLoopCancelTerminate);
v8::Handle<v8::Context> context = v8::Context::New(isolate, NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
- v8::Handle<v8::String> source =
- v8::String::New("try { doloop(); } catch(e) { fail(); } 'completed';");
+ v8::Handle<v8::String> source = v8::String::NewFromUtf8(
+ isolate, "try { doloop(); } catch(e) { fail(); } 'completed';");
// Check that execution completed with correct return value.
CHECK(v8::Script::Compile(source)->Run()->Equals(v8_str("completed")));
}
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index 4709961636..24fb1d1d75 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -33,28 +33,6 @@
#include "cctest.h"
-TEST(Preemption) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::Locker locker(isolate);
- v8::V8::Initialize();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
-
- v8::Locker::StartPreemption(isolate, 100);
-
- v8::Handle<v8::Script> script = v8::Script::Compile(
- v8::String::New("var count = 0; var obj = new Object(); count++;\n"));
-
- script->Run();
-
- v8::Locker::StopPreemption(isolate);
- v8::internal::OS::Sleep(500); // Make sure the timer fires.
-
- script->Run();
-}
-
-
enum Turn {
FILL_CACHE,
CLEAN_CACHE,
@@ -80,7 +58,8 @@ class ThreadA : public v8::internal::Thread {
// Fill String.search cache.
v8::Handle<v8::Script> script = v8::Script::Compile(
- v8::String::New(
+ v8::String::NewFromUtf8(
+ isolate,
"for (var i = 0; i < 3; i++) {"
" var result = \"a\".search(\"a\");"
" if (result != 0) throw \"result: \" + result + \" @\" + i;"
@@ -180,18 +159,18 @@ TEST(ThreadIdValidation) {
const int kNThreads = 100;
i::List<ThreadIdValidationThread*> threads(kNThreads);
i::List<i::ThreadId> refs(kNThreads);
- i::Semaphore* semaphore = new i::Semaphore(0);
+ i::Semaphore semaphore(0);
ThreadIdValidationThread* prev = NULL;
for (int i = kNThreads - 1; i >= 0; i--) {
ThreadIdValidationThread* newThread =
- new ThreadIdValidationThread(prev, &refs, i, semaphore);
+ new ThreadIdValidationThread(prev, &refs, i, &semaphore);
threads.Add(newThread);
prev = newThread;
refs.Add(i::ThreadId::Invalid());
}
prev->Start();
for (int i = 0; i < kNThreads; i++) {
- semaphore->Wait();
+ semaphore.Wait();
}
for (int i = 0; i < kNThreads; i++) {
delete threads[i];
diff --git a/deps/v8/test/cctest/test-time.cc b/deps/v8/test/cctest/test-time.cc
index 28d647a5c1..1ef9e08f65 100644
--- a/deps/v8/test/cctest/test-time.cc
+++ b/deps/v8/test/cctest/test-time.cc
@@ -25,10 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <cstdlib>
-
#include "v8.h"
+#if V8_OS_POSIX
+#include <sys/time.h> // NOLINT
+#endif
+
#include "cctest.h"
#if V8_OS_WIN
#include "win32-headers.h"
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index 264d2ed881..d29ee41100 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -30,716 +30,848 @@
using namespace v8::internal;
+template<class Type, class TypeHandle, class Region>
+class Types {
+ public:
+ Types(Region* region, Isolate* isolate) :
+ None(Type::None(region)),
+ Any(Type::Any(region)),
+ Oddball(Type::Oddball(region)),
+ Boolean(Type::Boolean(region)),
+ Null(Type::Null(region)),
+ Undefined(Type::Undefined(region)),
+ Number(Type::Number(region)),
+ Smi(Type::Smi(region)),
+ Signed32(Type::Signed32(region)),
+ Double(Type::Double(region)),
+ Name(Type::Name(region)),
+ UniqueName(Type::UniqueName(region)),
+ String(Type::String(region)),
+ InternalizedString(Type::InternalizedString(region)),
+ Symbol(Type::Symbol(region)),
+ Receiver(Type::Receiver(region)),
+ Object(Type::Object(region)),
+ Array(Type::Array(region)),
+ Function(Type::Function(region)),
+ Proxy(Type::Proxy(region)),
+ object_map(isolate->factory()->NewMap(JS_OBJECT_TYPE, 3 * kPointerSize)),
+ array_map(isolate->factory()->NewMap(JS_ARRAY_TYPE, 4 * kPointerSize)),
+ region_(region) {
+ smi = handle(Smi::FromInt(666), isolate);
+ signed32 = isolate->factory()->NewHeapNumber(0x40000000);
+ object1 = isolate->factory()->NewJSObjectFromMap(object_map);
+ object2 = isolate->factory()->NewJSObjectFromMap(object_map);
+ array = isolate->factory()->NewJSArray(20);
+ ObjectClass = Type::Class(object_map, region);
+ ArrayClass = Type::Class(array_map, region);
+ SmiConstant = Type::Constant(smi, region);
+ Signed32Constant = Type::Constant(signed32, region);
+ ObjectConstant1 = Type::Constant(object1, region);
+ ObjectConstant2 = Type::Constant(object2, region);
+ ArrayConstant1 = Type::Constant(array, region);
+ ArrayConstant2 = Type::Constant(array, region);
+ }
+
+ TypeHandle None;
+ TypeHandle Any;
+ TypeHandle Oddball;
+ TypeHandle Boolean;
+ TypeHandle Null;
+ TypeHandle Undefined;
+ TypeHandle Number;
+ TypeHandle Smi;
+ TypeHandle Signed32;
+ TypeHandle Double;
+ TypeHandle Name;
+ TypeHandle UniqueName;
+ TypeHandle String;
+ TypeHandle InternalizedString;
+ TypeHandle Symbol;
+ TypeHandle Receiver;
+ TypeHandle Object;
+ TypeHandle Array;
+ TypeHandle Function;
+ TypeHandle Proxy;
+
+ TypeHandle ObjectClass;
+ TypeHandle ArrayClass;
+
+ TypeHandle SmiConstant;
+ TypeHandle Signed32Constant;
+ TypeHandle ObjectConstant1;
+ TypeHandle ObjectConstant2;
+ TypeHandle ArrayConstant1;
+ TypeHandle ArrayConstant2;
+
+ Handle<i::Map> object_map;
+ Handle<i::Map> array_map;
+
+ Handle<i::Smi> smi;
+ Handle<i::HeapNumber> signed32;
+ Handle<i::JSObject> object1;
+ Handle<i::JSObject> object2;
+ Handle<i::JSArray> array;
+
+ TypeHandle Union(TypeHandle t1, TypeHandle t2) {
+ return Type::Union(t1, t2, region_);
+ }
+ TypeHandle Intersect(TypeHandle t1, TypeHandle t2) {
+ return Type::Intersect(t1, t2, region_);
+ }
+
+ template<class Type2, class TypeHandle2>
+ TypeHandle Convert(TypeHandle2 t) {
+ return Type::template Convert<Type2>(t, region_);
+ }
+
+ TypeHandle Fuzz(int depth = 5) {
+ switch (rand() % (depth == 0 ? 3 : 20)) {
+ case 0: { // bitset
+ int n = 0
+ #define COUNT_BITSET_TYPES(type, value) + 1
+ BITSET_TYPE_LIST(COUNT_BITSET_TYPES)
+ #undef COUNT_BITSET_TYPES
+ ;
+ int i = rand() % n;
+ #define PICK_BITSET_TYPE(type, value) \
+ if (i-- == 0) return Type::type(region_);
+ BITSET_TYPE_LIST(PICK_BITSET_TYPE)
+ #undef PICK_BITSET_TYPE
+ UNREACHABLE();
+ }
+ case 1: // class
+ switch (rand() % 2) {
+ case 0: return ObjectClass;
+ case 1: return ArrayClass;
+ }
+ UNREACHABLE();
+ case 2: // constant
+ switch (rand() % 6) {
+ case 0: return SmiConstant;
+ case 1: return Signed32Constant;
+ case 2: return ObjectConstant1;
+ case 3: return ObjectConstant2;
+ case 4: return ArrayConstant1;
+ case 5: return ArrayConstant2;
+ }
+ UNREACHABLE();
+ default: { // union
+ int n = rand() % 10;
+ TypeHandle type = None;
+ for (int i = 0; i < n; ++i) {
+ type = Type::Union(type, Fuzz(depth - 1), region_);
+ }
+ return type;
+ }
+ }
+ UNREACHABLE();
+ }
+
+ private:
+ Region* region_;
+};
+
+
// Testing auxiliaries (breaking the Type abstraction).
-static bool IsBitset(Type* type) { return type->IsSmi(); }
-static bool IsClass(Type* type) { return type->IsMap(); }
-static bool IsConstant(Type* type) { return type->IsBox(); }
-static bool IsUnion(Type* type) { return type->IsFixedArray(); }
-
-static int AsBitset(Type* type) { return Smi::cast(type)->value(); }
-static Map* AsClass(Type* type) { return Map::cast(type); }
-static Object* AsConstant(Type* type) { return Box::cast(type)->value(); }
-static FixedArray* AsUnion(Type* type) { return FixedArray::cast(type); }
-
-
-static void CheckEqual(Handle<Type> type1, Handle<Type> type2) {
- CHECK_EQ(IsBitset(*type1), IsBitset(*type2));
- CHECK_EQ(IsClass(*type1), IsClass(*type2));
- CHECK_EQ(IsConstant(*type1), IsConstant(*type2));
- CHECK_EQ(IsUnion(*type1), IsUnion(*type2));
- CHECK_EQ(type1->NumClasses(), type2->NumClasses());
- CHECK_EQ(type1->NumConstants(), type2->NumConstants());
- if (IsBitset(*type1)) {
- CHECK_EQ(AsBitset(*type1), AsBitset(*type2));
- } else if (IsClass(*type1)) {
- CHECK_EQ(AsClass(*type1), AsClass(*type2));
- } else if (IsConstant(*type1)) {
- CHECK_EQ(AsConstant(*type1), AsConstant(*type2));
- } else if (IsUnion(*type1)) {
- CHECK_EQ(AsUnion(*type1)->length(), AsUnion(*type2)->length());
+struct ZoneRep {
+ static bool IsTagged(Type* t, int tag) {
+ return !IsBitset(t)
+ && reinterpret_cast<intptr_t>(AsTagged(t)->at(0)) == tag;
+ }
+ static bool IsBitset(Type* t) { return reinterpret_cast<intptr_t>(t) & 1; }
+ static bool IsClass(Type* t) { return IsTagged(t, 0); }
+ static bool IsConstant(Type* t) { return IsTagged(t, 1); }
+ static bool IsUnion(Type* t) { return IsTagged(t, 2); }
+
+ static ZoneList<void*>* AsTagged(Type* t) {
+ return reinterpret_cast<ZoneList<void*>*>(t);
+ }
+ static int AsBitset(Type* t) {
+ return static_cast<int>(reinterpret_cast<intptr_t>(t) >> 1);
+ }
+ static Map* AsClass(Type* t) {
+ return *reinterpret_cast<Map**>(AsTagged(t)->at(1));
+ }
+ static Object* AsConstant(Type* t) {
+ return *reinterpret_cast<Object**>(AsTagged(t)->at(1));
+ }
+ static ZoneList<Type*>* AsUnion(Type* t) {
+ return reinterpret_cast<ZoneList<Type*>*>(AsTagged(t));
}
- CHECK(type1->Is(type2));
- CHECK(type2->Is(type1));
-}
+ static Zone* ToRegion(Zone* zone, Isolate* isolate) { return zone; }
+};
-static void CheckSub(Handle<Type> type1, Handle<Type> type2) {
- CHECK(type1->Is(type2));
- CHECK(!type2->Is(type1));
- if (IsBitset(*type1) && IsBitset(*type2)) {
- CHECK_NE(AsBitset(*type1), AsBitset(*type2));
+
+struct HeapRep {
+ static bool IsBitset(Handle<HeapType> t) { return t->IsSmi(); }
+ static bool IsClass(Handle<HeapType> t) { return t->IsMap(); }
+ static bool IsConstant(Handle<HeapType> t) { return t->IsBox(); }
+ static bool IsUnion(Handle<HeapType> t) { return t->IsFixedArray(); }
+
+ static int AsBitset(Handle<HeapType> t) { return Smi::cast(*t)->value(); }
+ static Map* AsClass(Handle<HeapType> t) { return Map::cast(*t); }
+ static Object* AsConstant(Handle<HeapType> t) {
+ return Box::cast(*t)->value();
+ }
+ static FixedArray* AsUnion(Handle<HeapType> t) {
+ return FixedArray::cast(*t);
}
-}
+ static Isolate* ToRegion(Zone* zone, Isolate* isolate) { return isolate; }
+};
+
+
+template<class Type, class TypeHandle, class Region, class Rep>
+struct Tests : Rep {
+ Isolate* isolate;
+ HandleScope scope;
+ Zone zone;
+ Types<Type, TypeHandle, Region> T;
-static void CheckUnordered(Handle<Type> type1, Handle<Type> type2) {
- CHECK(!type1->Is(type2));
- CHECK(!type2->Is(type1));
- if (IsBitset(*type1) && IsBitset(*type2)) {
- CHECK_NE(AsBitset(*type1), AsBitset(*type2));
+ Tests() :
+ isolate(CcTest::i_isolate()),
+ scope(isolate),
+ zone(isolate),
+ T(Rep::ToRegion(&zone, isolate), isolate) {
}
-}
+ static void CheckEqual(TypeHandle type1, TypeHandle type2) {
+ CHECK_EQ(Rep::IsBitset(type1), Rep::IsBitset(type2));
+ CHECK_EQ(Rep::IsClass(type1), Rep::IsClass(type2));
+ CHECK_EQ(Rep::IsConstant(type1), Rep::IsConstant(type2));
+ CHECK_EQ(Rep::IsUnion(type1), Rep::IsUnion(type2));
+ CHECK_EQ(type1->NumClasses(), type2->NumClasses());
+ CHECK_EQ(type1->NumConstants(), type2->NumConstants());
+ if (Rep::IsBitset(type1)) {
+ CHECK_EQ(Rep::AsBitset(type1), Rep::AsBitset(type2));
+ } else if (Rep::IsClass(type1)) {
+ CHECK_EQ(Rep::AsClass(type1), Rep::AsClass(type2));
+ } else if (Rep::IsConstant(type1)) {
+ CHECK_EQ(Rep::AsConstant(type1), Rep::AsConstant(type2));
+ } else if (Rep::IsUnion(type1)) {
+ CHECK_EQ(Rep::AsUnion(type1)->length(), Rep::AsUnion(type2)->length());
+ }
+ CHECK(type1->Is(type2));
+ CHECK(type2->Is(type1));
+ }
-static void CheckOverlap(Handle<Type> type1, Handle<Type> type2) {
- CHECK(type1->Maybe(type2));
- CHECK(type2->Maybe(type1));
- if (IsBitset(*type1) && IsBitset(*type2)) {
- CHECK_NE(0, AsBitset(*type1) & AsBitset(*type2));
+ static void CheckSub(TypeHandle type1, TypeHandle type2) {
+ CHECK(type1->Is(type2));
+ CHECK(!type2->Is(type1));
+ if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
+ CHECK_NE(Rep::AsBitset(type1), Rep::AsBitset(type2));
+ }
}
-}
+ static void CheckUnordered(TypeHandle type1, TypeHandle type2) {
+ CHECK(!type1->Is(type2));
+ CHECK(!type2->Is(type1));
+ if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
+ CHECK_NE(Rep::AsBitset(type1), Rep::AsBitset(type2));
+ }
+ }
-static void CheckDisjoint(Handle<Type> type1, Handle<Type> type2) {
- CHECK(!type1->Is(type2));
- CHECK(!type2->Is(type1));
- CHECK(!type1->Maybe(type2));
- CHECK(!type2->Maybe(type1));
- if (IsBitset(*type1) && IsBitset(*type2)) {
- CHECK_EQ(0, AsBitset(*type1) & AsBitset(*type2));
+ static void CheckOverlap(TypeHandle type1, TypeHandle type2) {
+ CHECK(type1->Maybe(type2));
+ CHECK(type2->Maybe(type1));
+ if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
+ CHECK_NE(0, Rep::AsBitset(type1) & Rep::AsBitset(type2));
+ }
+ }
+
+ static void CheckDisjoint(TypeHandle type1, TypeHandle type2) {
+ CHECK(!type1->Is(type2));
+ CHECK(!type2->Is(type1));
+ CHECK(!type1->Maybe(type2));
+ CHECK(!type2->Maybe(type1));
+ if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
+ CHECK_EQ(0, Rep::AsBitset(type1) & Rep::AsBitset(type2));
+ }
}
-}
+ void Bitset() {
+ CHECK(this->IsBitset(T.None));
+ CHECK(this->IsBitset(T.Any));
+ CHECK(this->IsBitset(T.String));
+ CHECK(this->IsBitset(T.Object));
-class HandlifiedTypes {
- public:
- explicit HandlifiedTypes(Isolate* isolate) :
- None(Type::None(), isolate),
- Any(Type::Any(), isolate),
- Oddball(Type::Oddball(), isolate),
- Boolean(Type::Boolean(), isolate),
- Null(Type::Null(), isolate),
- Undefined(Type::Undefined(), isolate),
- Number(Type::Number(), isolate),
- Smi(Type::Smi(), isolate),
- Signed32(Type::Signed32(), isolate),
- Double(Type::Double(), isolate),
- Name(Type::Name(), isolate),
- UniqueName(Type::UniqueName(), isolate),
- String(Type::String(), isolate),
- InternalizedString(Type::InternalizedString(), isolate),
- Symbol(Type::Symbol(), isolate),
- Receiver(Type::Receiver(), isolate),
- Object(Type::Object(), isolate),
- Array(Type::Array(), isolate),
- Function(Type::Function(), isolate),
- Proxy(Type::Proxy(), isolate),
- object_map(isolate->factory()->NewMap(JS_OBJECT_TYPE, 3 * kPointerSize)),
- array_map(isolate->factory()->NewMap(JS_ARRAY_TYPE, 4 * kPointerSize)),
- isolate_(isolate) {
- smi = handle(Smi::FromInt(666), isolate);
- signed32 = isolate->factory()->NewHeapNumber(0x40000000);
- object1 = isolate->factory()->NewJSObjectFromMap(object_map);
- object2 = isolate->factory()->NewJSObjectFromMap(object_map);
- array = isolate->factory()->NewJSArray(20);
- ObjectClass = Class(object_map);
- ArrayClass = Class(array_map);
- SmiConstant = Constant(smi);
- Signed32Constant = Constant(signed32);
- ObjectConstant1 = Constant(object1);
- ObjectConstant2 = Constant(object2);
- ArrayConstant1 = Constant(array);
- ArrayConstant2 = Constant(array);
+ CHECK(this->IsBitset(T.Union(T.String, T.Number)));
+ CHECK(this->IsBitset(T.Union(T.String, T.Receiver)));
+
+ CHECK_EQ(0, this->AsBitset(T.None));
+ CHECK_EQ(this->AsBitset(T.Number) | this->AsBitset(T.String),
+ this->AsBitset(T.Union(T.String, T.Number)));
+ CHECK_EQ(this->AsBitset(T.Receiver),
+ this->AsBitset(T.Union(T.Receiver, T.Object)));
}
- Handle<Type> None;
- Handle<Type> Any;
- Handle<Type> Oddball;
- Handle<Type> Boolean;
- Handle<Type> Null;
- Handle<Type> Undefined;
- Handle<Type> Number;
- Handle<Type> Smi;
- Handle<Type> Signed32;
- Handle<Type> Double;
- Handle<Type> Name;
- Handle<Type> UniqueName;
- Handle<Type> String;
- Handle<Type> InternalizedString;
- Handle<Type> Symbol;
- Handle<Type> Receiver;
- Handle<Type> Object;
- Handle<Type> Array;
- Handle<Type> Function;
- Handle<Type> Proxy;
-
- Handle<Type> ObjectClass;
- Handle<Type> ArrayClass;
-
- Handle<Type> SmiConstant;
- Handle<Type> Signed32Constant;
- Handle<Type> ObjectConstant1;
- Handle<Type> ObjectConstant2;
- Handle<Type> ArrayConstant1;
- Handle<Type> ArrayConstant2;
-
- Handle<Map> object_map;
- Handle<Map> array_map;
+ void Class() {
+ CHECK(this->IsClass(T.ObjectClass));
+ CHECK(this->IsClass(T.ArrayClass));
- Handle<i::Smi> smi;
- Handle<HeapNumber> signed32;
- Handle<JSObject> object1;
- Handle<JSObject> object2;
- Handle<JSArray> array;
+ CHECK(*T.object_map == this->AsClass(T.ObjectClass));
+ CHECK(*T.array_map == this->AsClass(T.ArrayClass));
+ }
- Handle<Type> Class(Handle<Map> map) {
- return handle(Type::Class(map), isolate_);
+ void Constant() {
+ CHECK(this->IsConstant(T.SmiConstant));
+ CHECK(this->IsConstant(T.ObjectConstant1));
+ CHECK(this->IsConstant(T.ObjectConstant2));
+ CHECK(this->IsConstant(T.ArrayConstant1));
+ CHECK(this->IsConstant(T.ArrayConstant2));
+
+ CHECK(*T.smi == this->AsConstant(T.SmiConstant));
+ CHECK(*T.object1 == this->AsConstant(T.ObjectConstant1));
+ CHECK(*T.object2 == this->AsConstant(T.ObjectConstant2));
+ CHECK(*T.object1 != this->AsConstant(T.ObjectConstant2));
+ CHECK(*T.array == this->AsConstant(T.ArrayConstant1));
+ CHECK(*T.array == this->AsConstant(T.ArrayConstant2));
}
- Handle<Type> Constant(Handle<i::Object> value) {
- return handle(Type::Constant(value, isolate_), isolate_);
+
+ void Is() {
+ // Reflexivity
+ CHECK(T.None->Is(T.None));
+ CHECK(T.Any->Is(T.Any));
+ CHECK(T.Object->Is(T.Object));
+
+ CHECK(T.ObjectClass->Is(T.ObjectClass));
+ CHECK(T.ObjectConstant1->Is(T.ObjectConstant1));
+ CHECK(T.ArrayConstant1->Is(T.ArrayConstant2));
+
+ // Symmetry and Transitivity
+ CheckSub(T.None, T.Number);
+ CheckSub(T.None, T.Any);
+
+ CheckSub(T.Oddball, T.Any);
+ CheckSub(T.Boolean, T.Oddball);
+ CheckSub(T.Null, T.Oddball);
+ CheckSub(T.Undefined, T.Oddball);
+ CheckUnordered(T.Boolean, T.Null);
+ CheckUnordered(T.Undefined, T.Null);
+ CheckUnordered(T.Boolean, T.Undefined);
+
+ CheckSub(T.Number, T.Any);
+ CheckSub(T.Smi, T.Number);
+ CheckSub(T.Signed32, T.Number);
+ CheckSub(T.Double, T.Number);
+ CheckSub(T.Smi, T.Signed32);
+ CheckUnordered(T.Smi, T.Double);
+ CheckUnordered(T.Signed32, T.Double);
+
+ CheckSub(T.Name, T.Any);
+ CheckSub(T.UniqueName, T.Any);
+ CheckSub(T.UniqueName, T.Name);
+ CheckSub(T.String, T.Name);
+ CheckSub(T.InternalizedString, T.String);
+ CheckSub(T.InternalizedString, T.UniqueName);
+ CheckSub(T.InternalizedString, T.Name);
+ CheckSub(T.Symbol, T.UniqueName);
+ CheckSub(T.Symbol, T.Name);
+ CheckUnordered(T.String, T.UniqueName);
+ CheckUnordered(T.String, T.Symbol);
+ CheckUnordered(T.InternalizedString, T.Symbol);
+
+ CheckSub(T.Receiver, T.Any);
+ CheckSub(T.Object, T.Any);
+ CheckSub(T.Object, T.Receiver);
+ CheckSub(T.Array, T.Object);
+ CheckSub(T.Function, T.Object);
+ CheckSub(T.Proxy, T.Receiver);
+ CheckUnordered(T.Object, T.Proxy);
+ CheckUnordered(T.Array, T.Function);
+
+ // Structured subtyping
+ CheckSub(T.None, T.ObjectClass);
+ CheckSub(T.None, T.ObjectConstant1);
+ CheckSub(T.ObjectClass, T.Any);
+ CheckSub(T.ObjectConstant1, T.Any);
+
+ CheckSub(T.ObjectClass, T.Object);
+ CheckSub(T.ArrayClass, T.Object);
+ CheckUnordered(T.ObjectClass, T.ArrayClass);
+
+ CheckSub(T.SmiConstant, T.Smi);
+ CheckSub(T.SmiConstant, T.Signed32);
+ CheckSub(T.SmiConstant, T.Number);
+ CheckSub(T.ObjectConstant1, T.Object);
+ CheckSub(T.ObjectConstant2, T.Object);
+ CheckSub(T.ArrayConstant1, T.Object);
+ CheckSub(T.ArrayConstant1, T.Array);
+ CheckUnordered(T.ObjectConstant1, T.ObjectConstant2);
+ CheckUnordered(T.ObjectConstant1, T.ArrayConstant1);
+
+ CheckUnordered(T.ObjectConstant1, T.ObjectClass);
+ CheckUnordered(T.ObjectConstant2, T.ObjectClass);
+ CheckUnordered(T.ObjectConstant1, T.ArrayClass);
+ CheckUnordered(T.ObjectConstant2, T.ArrayClass);
+ CheckUnordered(T.ArrayConstant1, T.ObjectClass);
}
- Handle<Type> Union(Handle<Type> type1, Handle<Type> type2) {
- return handle(Type::Union(type1, type2), isolate_);
+
+ void Maybe() {
+ CheckOverlap(T.Any, T.Any);
+ CheckOverlap(T.Object, T.Object);
+
+ CheckOverlap(T.Oddball, T.Any);
+ CheckOverlap(T.Boolean, T.Oddball);
+ CheckOverlap(T.Null, T.Oddball);
+ CheckOverlap(T.Undefined, T.Oddball);
+ CheckDisjoint(T.Boolean, T.Null);
+ CheckDisjoint(T.Undefined, T.Null);
+ CheckDisjoint(T.Boolean, T.Undefined);
+
+ CheckOverlap(T.Number, T.Any);
+ CheckOverlap(T.Smi, T.Number);
+ CheckOverlap(T.Double, T.Number);
+ CheckDisjoint(T.Signed32, T.Double);
+
+ CheckOverlap(T.Name, T.Any);
+ CheckOverlap(T.UniqueName, T.Any);
+ CheckOverlap(T.UniqueName, T.Name);
+ CheckOverlap(T.String, T.Name);
+ CheckOverlap(T.InternalizedString, T.String);
+ CheckOverlap(T.InternalizedString, T.UniqueName);
+ CheckOverlap(T.InternalizedString, T.Name);
+ CheckOverlap(T.Symbol, T.UniqueName);
+ CheckOverlap(T.Symbol, T.Name);
+ CheckOverlap(T.String, T.UniqueName);
+ CheckDisjoint(T.String, T.Symbol);
+ CheckDisjoint(T.InternalizedString, T.Symbol);
+
+ CheckOverlap(T.Receiver, T.Any);
+ CheckOverlap(T.Object, T.Any);
+ CheckOverlap(T.Object, T.Receiver);
+ CheckOverlap(T.Array, T.Object);
+ CheckOverlap(T.Function, T.Object);
+ CheckOverlap(T.Proxy, T.Receiver);
+ CheckDisjoint(T.Object, T.Proxy);
+ CheckDisjoint(T.Array, T.Function);
+
+ CheckOverlap(T.ObjectClass, T.Any);
+ CheckOverlap(T.ObjectConstant1, T.Any);
+
+ CheckOverlap(T.ObjectClass, T.Object);
+ CheckOverlap(T.ArrayClass, T.Object);
+ CheckOverlap(T.ObjectClass, T.ObjectClass);
+ CheckOverlap(T.ArrayClass, T.ArrayClass);
+ CheckDisjoint(T.ObjectClass, T.ArrayClass);
+
+ CheckOverlap(T.SmiConstant, T.Smi);
+ CheckOverlap(T.SmiConstant, T.Signed32);
+ CheckOverlap(T.SmiConstant, T.Number);
+ CheckDisjoint(T.SmiConstant, T.Double);
+ CheckOverlap(T.ObjectConstant1, T.Object);
+ CheckOverlap(T.ObjectConstant2, T.Object);
+ CheckOverlap(T.ArrayConstant1, T.Object);
+ CheckOverlap(T.ArrayConstant1, T.Array);
+ CheckOverlap(T.ArrayConstant1, T.ArrayConstant2);
+ CheckOverlap(T.ObjectConstant1, T.ObjectConstant1);
+ CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2);
+ CheckDisjoint(T.ObjectConstant1, T.ArrayConstant1);
+
+ CheckDisjoint(T.ObjectConstant1, T.ObjectClass);
+ CheckDisjoint(T.ObjectConstant2, T.ObjectClass);
+ CheckDisjoint(T.ObjectConstant1, T.ArrayClass);
+ CheckDisjoint(T.ObjectConstant2, T.ArrayClass);
+ CheckDisjoint(T.ArrayConstant1, T.ObjectClass);
}
- Handle<Type> Intersect(Handle<Type> type1, Handle<Type> type2) {
- return handle(Type::Intersect(type1, type2), isolate_);
+
+ void Union() {
+ // Bitset-bitset
+ CHECK(this->IsBitset(T.Union(T.Object, T.Number)));
+ CHECK(this->IsBitset(T.Union(T.Object, T.Object)));
+ CHECK(this->IsBitset(T.Union(T.Any, T.None)));
+
+ CheckEqual(T.Union(T.None, T.Number), T.Number);
+ CheckEqual(T.Union(T.Object, T.Proxy), T.Receiver);
+ CheckEqual(T.Union(T.Number, T.String), T.Union(T.String, T.Number));
+ CheckSub(T.Union(T.Number, T.String), T.Any);
+
+ // Class-class
+ CHECK(this->IsClass(T.Union(T.ObjectClass, T.ObjectClass)));
+ CHECK(this->IsUnion(T.Union(T.ObjectClass, T.ArrayClass)));
+
+ CheckEqual(T.Union(T.ObjectClass, T.ObjectClass), T.ObjectClass);
+ CheckSub(T.None, T.Union(T.ObjectClass, T.ArrayClass));
+ CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Any);
+ CheckSub(T.ObjectClass, T.Union(T.ObjectClass, T.ArrayClass));
+ CheckSub(T.ArrayClass, T.Union(T.ObjectClass, T.ArrayClass));
+ CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Object);
+ CheckUnordered(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
+ CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
+ CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number);
+
+ // Constant-constant
+ CHECK(this->IsConstant(T.Union(T.ObjectConstant1, T.ObjectConstant1)));
+ CHECK(this->IsConstant(T.Union(T.ArrayConstant1, T.ArrayConstant1)));
+ CHECK(this->IsUnion(T.Union(T.ObjectConstant1, T.ObjectConstant2)));
+
+ CheckEqual(
+ T.Union(T.ObjectConstant1, T.ObjectConstant1),
+ T.ObjectConstant1);
+ CheckEqual(T.Union(T.ArrayConstant1, T.ArrayConstant1), T.ArrayConstant1);
+ CheckEqual(T.Union(T.ArrayConstant1, T.ArrayConstant1), T.ArrayConstant2);
+ CheckSub(T.None, T.Union(T.ObjectConstant1, T.ObjectConstant2));
+ CheckSub(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.Any);
+ CheckSub(T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2));
+ CheckSub(T.ObjectConstant2, T.Union(T.ObjectConstant1, T.ObjectConstant2));
+ CheckSub(T.ArrayConstant2, T.Union(T.ArrayConstant1, T.ObjectConstant2));
+ CheckSub(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.Object);
+ CheckUnordered(
+ T.Union(T.ObjectConstant1, T.ObjectConstant2), T.ObjectClass);
+ CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Array);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Array);
+ CheckOverlap(
+ T.Union(T.ObjectConstant1, T.ArrayConstant1), T.ArrayConstant2);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Number);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.ObjectClass);
+
+ // Bitset-class
+ CHECK(this->IsBitset(T.Union(T.ObjectClass, T.Object)));
+ CHECK(this->IsUnion(T.Union(T.ObjectClass, T.Number)));
+
+ CheckEqual(T.Union(T.ObjectClass, T.Object), T.Object);
+ CheckSub(T.None, T.Union(T.ObjectClass, T.Number));
+ CheckSub(T.Union(T.ObjectClass, T.Number), T.Any);
+ CheckSub(T.Union(T.ObjectClass, T.Smi), T.Union(T.Object, T.Number));
+ CheckSub(T.Union(T.ObjectClass, T.Array), T.Object);
+ CheckUnordered(T.Union(T.ObjectClass, T.String), T.Array);
+ CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object);
+ CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number);
+
+ // Bitset-constant
+ CHECK(this->IsBitset(T.Union(T.SmiConstant, T.Number)));
+ CHECK(this->IsBitset(T.Union(T.ObjectConstant1, T.Object)));
+ CHECK(this->IsUnion(T.Union(T.ObjectConstant2, T.Number)));
+
+ CheckEqual(T.Union(T.SmiConstant, T.Number), T.Number);
+ CheckEqual(T.Union(T.ObjectConstant1, T.Object), T.Object);
+ CheckSub(T.None, T.Union(T.ObjectConstant1, T.Number));
+ CheckSub(T.Union(T.ObjectConstant1, T.Number), T.Any);
+ CheckSub(
+ T.Union(T.ObjectConstant1, T.Signed32), T.Union(T.Object, T.Number));
+ CheckSub(T.Union(T.ObjectConstant1, T.Array), T.Object);
+ CheckUnordered(T.Union(T.ObjectConstant1, T.String), T.Array);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number);
+ CheckEqual(T.Union(T.Signed32, T.Signed32Constant), T.Signed32);
+
+ // Class-constant
+ CHECK(this->IsUnion(T.Union(T.ObjectConstant1, T.ObjectClass)));
+ CHECK(this->IsUnion(T.Union(T.ArrayClass, T.ObjectConstant2)));
+
+ CheckSub(T.None, T.Union(T.ObjectConstant1, T.ArrayClass));
+ CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass), T.Any);
+ CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass), T.Object);
+ CheckSub(T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ArrayClass));
+ CheckSub(T.ArrayClass, T.Union(T.ObjectConstant1, T.ArrayClass));
+ CheckUnordered(T.ObjectClass, T.Union(T.ObjectConstant1, T.ArrayClass));
+ CheckSub(
+ T.Union(T.ObjectConstant1, T.ArrayClass), T.Union(T.Array, T.Object));
+ CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayClass), T.ArrayConstant1);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectClass);
+
+ // Bitset-union
+ CHECK(this->IsBitset(
+ T.Union(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass))));
+ CHECK(this->IsUnion(
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.Number)));
+
+ CheckEqual(
+ T.Union(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass)),
+ T.Object);
+ CheckEqual(
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number),
+ T.Union(T.ObjectConstant1, T.Union(T.Number, T.ArrayClass)));
+ CheckSub(
+ T.Double,
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number));
+ CheckSub(
+ T.ObjectConstant1,
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double));
+ CheckSub(
+ T.None,
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double));
+ CheckSub(
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double),
+ T.Any);
+ CheckSub(
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double),
+ T.Union(T.ObjectConstant1, T.Union(T.Number, T.ArrayClass)));
+
+ // Class-union
+ CHECK(this->IsUnion(
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.ArrayClass)));
+ CHECK(this->IsUnion(
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.ObjectClass)));
+
+ CheckEqual(
+ T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
+ T.Union(T.ObjectClass, T.ObjectConstant1));
+ CheckSub(
+ T.None,
+ T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)));
+ CheckSub(
+ T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
+ T.Any);
+ CheckSub(
+ T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
+ T.Object);
+ CheckEqual(
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.ArrayClass),
+ T.Union(T.ArrayClass, T.ObjectConstant2));
+
+ // Constant-union
+ CHECK(this->IsUnion(T.Union(
+ T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2))));
+ CHECK(this->IsUnion(T.Union(
+ T.Union(T.ArrayConstant1, T.ObjectClass), T.ObjectConstant1)));
+ CHECK(this->IsUnion(T.Union(
+ T.Union(T.ArrayConstant1, T.ObjectConstant2), T.ObjectConstant1)));
+
+ CheckEqual(
+ T.Union(
+ T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2)),
+ T.Union(T.ObjectConstant2, T.ObjectConstant1));
+ CheckEqual(
+ T.Union(
+ T.Union(T.ArrayConstant1, T.ObjectConstant2), T.ObjectConstant1),
+ T.Union(
+ T.ObjectConstant2, T.Union(T.ArrayConstant1, T.ObjectConstant1)));
+
+ // Union-union
+ CHECK(this->IsBitset(T.Union(
+ T.Union(T.Number, T.ArrayClass),
+ T.Union(T.Signed32, T.Array))));
+ CHECK(this->IsUnion(T.Union(
+ T.Union(T.Number, T.ArrayClass),
+ T.Union(T.ObjectClass, T.ArrayClass))));
+
+ CheckEqual(
+ T.Union(
+ T.Union(T.ObjectConstant2, T.ObjectConstant1),
+ T.Union(T.ObjectConstant1, T.ObjectConstant2)),
+ T.Union(T.ObjectConstant2, T.ObjectConstant1));
+ CheckEqual(
+ T.Union(
+ T.Union(T.ObjectConstant2, T.ArrayConstant1),
+ T.Union(T.ObjectConstant1, T.ArrayConstant2)),
+ T.Union(
+ T.Union(T.ObjectConstant1, T.ObjectConstant2),
+ T.ArrayConstant1));
+ CheckEqual(
+ T.Union(T.Union(T.Number, T.ArrayClass), T.Union(T.Smi, T.Array)),
+ T.Union(T.Number, T.Array));
}
- private:
- Isolate* isolate_;
+ void Intersect() {
+ // Bitset-bitset
+ CHECK(this->IsBitset(T.Intersect(T.Object, T.Number)));
+ CHECK(this->IsBitset(T.Intersect(T.Object, T.Object)));
+ CHECK(this->IsBitset(T.Intersect(T.Any, T.None)));
+
+ CheckEqual(T.Intersect(T.None, T.Number), T.None);
+ CheckEqual(T.Intersect(T.Object, T.Proxy), T.None);
+ CheckEqual(T.Intersect(T.Name, T.String), T.Intersect(T.String, T.Name));
+ CheckEqual(T.Intersect(T.UniqueName, T.String), T.InternalizedString);
+
+ // Class-class
+ CHECK(this->IsClass(T.Intersect(T.ObjectClass, T.ObjectClass)));
+ CHECK(this->IsBitset(T.Intersect(T.ObjectClass, T.ArrayClass)));
+
+ CheckEqual(T.Intersect(T.ObjectClass, T.ObjectClass), T.ObjectClass);
+ CheckEqual(T.Intersect(T.ObjectClass, T.ArrayClass), T.None);
+
+ // Constant-constant
+ CHECK(this->IsConstant(T.Intersect(T.ObjectConstant1, T.ObjectConstant1)));
+ CHECK(this->IsConstant(T.Intersect(T.ArrayConstant1, T.ArrayConstant2)));
+ CHECK(this->IsBitset(T.Intersect(T.ObjectConstant1, T.ObjectConstant2)));
+
+ CheckEqual(
+ T.Intersect(T.ObjectConstant1, T.ObjectConstant1), T.ObjectConstant1);
+ CheckEqual(
+ T.Intersect(T.ArrayConstant1, T.ArrayConstant2), T.ArrayConstant1);
+ CheckEqual(T.Intersect(T.ObjectConstant1, T.ObjectConstant2), T.None);
+
+ // Bitset-class
+ CHECK(this->IsClass(T.Intersect(T.ObjectClass, T.Object)));
+ CHECK(this->IsBitset(T.Intersect(T.ObjectClass, T.Number)));
+
+ CheckEqual(T.Intersect(T.ObjectClass, T.Object), T.ObjectClass);
+ CheckEqual(T.Intersect(T.ObjectClass, T.Array), T.None);
+ CheckEqual(T.Intersect(T.ObjectClass, T.Number), T.None);
+
+ // Bitset-constant
+ CHECK(this->IsBitset(T.Intersect(T.Smi, T.Number)));
+ CHECK(this->IsConstant(T.Intersect(T.SmiConstant, T.Number)));
+ CHECK(this->IsConstant(T.Intersect(T.ObjectConstant1, T.Object)));
+
+ CheckEqual(T.Intersect(T.Smi, T.Number), T.Smi);
+ CheckEqual(T.Intersect(T.SmiConstant, T.Number), T.SmiConstant);
+ CheckEqual(T.Intersect(T.ObjectConstant1, T.Object), T.ObjectConstant1);
+
+ // Class-constant
+ CHECK(this->IsBitset(T.Intersect(T.ObjectConstant1, T.ObjectClass)));
+ CHECK(this->IsBitset(T.Intersect(T.ArrayClass, T.ObjectConstant2)));
+
+ CheckEqual(T.Intersect(T.ObjectConstant1, T.ObjectClass), T.None);
+ CheckEqual(T.Intersect(T.ArrayClass, T.ObjectConstant2), T.None);
+
+ // Bitset-union
+ CHECK(this->IsUnion(
+ T.Intersect(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass))));
+ CHECK(this->IsBitset(
+ T.Intersect(T.Union(T.ArrayClass, T.ObjectConstant2), T.Number)));
+
+ CheckEqual(
+ T.Intersect(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass)),
+ T.Union(T.ObjectConstant1, T.ObjectClass));
+ CheckEqual(
+ T.Intersect(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number),
+ T.None);
+
+ // Class-union
+ CHECK(this->IsClass(
+ T.Intersect(T.Union(T.ArrayClass, T.ObjectConstant2), T.ArrayClass)));
+ CHECK(this->IsClass(
+ T.Intersect(T.Union(T.Object, T.SmiConstant), T.ArrayClass)));
+ CHECK(this->IsBitset(
+ T.Intersect(T.Union(T.ObjectClass, T.ArrayConstant1), T.ArrayClass)));
+
+ CheckEqual(
+ T.Intersect(T.ArrayClass, T.Union(T.ObjectConstant2, T.ArrayClass)),
+ T.ArrayClass);
+ CheckEqual(
+ T.Intersect(T.ArrayClass, T.Union(T.Object, T.SmiConstant)),
+ T.ArrayClass);
+ CheckEqual(
+ T.Intersect(T.Union(T.ObjectClass, T.ArrayConstant1), T.ArrayClass),
+ T.None);
+
+ // Constant-union
+ CHECK(this->IsConstant(T.Intersect(
+ T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2))));
+ CHECK(this->IsConstant(T.Intersect(
+ T.Union(T.Number, T.ObjectClass), T.SmiConstant)));
+ CHECK(this->IsBitset(T.Intersect(
+ T.Union(T.ArrayConstant1, T.ObjectClass), T.ObjectConstant1)));
+
+ CheckEqual(
+ T.Intersect(
+ T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2)),
+ T.ObjectConstant1);
+ CheckEqual(
+ T.Intersect(T.SmiConstant, T.Union(T.Number, T.ObjectConstant2)),
+ T.SmiConstant);
+ CheckEqual(
+ T.Intersect(
+ T.Union(T.ArrayConstant1, T.ObjectClass), T.ObjectConstant1),
+ T.None);
+
+ // Union-union
+ CHECK(this->IsUnion(T.Intersect(
+ T.Union(T.Number, T.ArrayClass), T.Union(T.Signed32, T.Array))));
+ CHECK(this->IsBitset(T.Intersect(
+ T.Union(T.Number, T.ObjectClass), T.Union(T.Signed32, T.Array))));
+
+ CheckEqual(
+ T.Intersect(
+ T.Union(T.Number, T.ArrayClass),
+ T.Union(T.Smi, T.Array)),
+ T.Union(T.Smi, T.ArrayClass));
+ CheckEqual(
+ T.Intersect(
+ T.Union(T.Number, T.ObjectClass),
+ T.Union(T.Signed32, T.Array)),
+ T.Signed32);
+ CheckEqual(
+ T.Intersect(
+ T.Union(T.ObjectConstant2, T.ObjectConstant1),
+ T.Union(T.ObjectConstant1, T.ObjectConstant2)),
+ T.Union(T.ObjectConstant2, T.ObjectConstant1));
+ CheckEqual(
+ T.Intersect(
+ T.Union(
+ T.Union(T.ObjectConstant2, T.ObjectConstant1), T.ArrayClass),
+ T.Union(
+ T.ObjectConstant1,
+ T.Union(T.ArrayConstant1, T.ObjectConstant2))),
+ T.Union(T.ObjectConstant2, T.ObjectConstant1));
+ CheckEqual(
+ T.Intersect(
+ T.Union(T.ObjectConstant2, T.ArrayConstant1),
+ T.Union(T.ObjectConstant1, T.ArrayConstant2)),
+ T.ArrayConstant1);
+ }
+
+ template<class Type2, class TypeHandle2, class Region2, class Rep2>
+ void Convert() {
+ Types<Type2, TypeHandle2, Region2> T2(
+ Rep2::ToRegion(&zone, isolate), isolate);
+ for (int i = 0; i < 100; ++i) {
+ TypeHandle type = T.Fuzz();
+ CheckEqual(type,
+ T.template Convert<Type2>(T2.template Convert<Type>(type)));
+ }
+ }
};
+typedef Tests<Type, Type*, Zone, ZoneRep> ZoneTests;
+typedef Tests<HeapType, Handle<HeapType>, Isolate, HeapRep> HeapTests;
+
TEST(Bitset) {
CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- HandlifiedTypes T(isolate);
-
- CHECK(IsBitset(*T.None));
- CHECK(IsBitset(*T.Any));
- CHECK(IsBitset(*T.String));
- CHECK(IsBitset(*T.Object));
-
- CHECK(IsBitset(Type::Union(T.String, T.Number)));
- CHECK(IsBitset(Type::Union(T.String, T.Receiver)));
- CHECK(IsBitset(Type::Optional(T.Object)));
-
- CHECK_EQ(0, AsBitset(*T.None));
- CHECK_EQ(AsBitset(*T.Number) | AsBitset(*T.String),
- AsBitset(Type::Union(T.String, T.Number)));
- CHECK_EQ(AsBitset(*T.Receiver),
- AsBitset(Type::Union(T.Receiver, T.Object)));
- CHECK_EQ(AsBitset(*T.String) | AsBitset(*T.Undefined),
- AsBitset(Type::Optional(T.String)));
+ ZoneTests().Bitset();
+ HeapTests().Bitset();
}
TEST(Class) {
CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- HandlifiedTypes T(isolate);
-
- CHECK(IsClass(*T.ObjectClass));
- CHECK(IsClass(*T.ArrayClass));
-
- CHECK(*T.object_map == AsClass(*T.ObjectClass));
- CHECK(*T.array_map == AsClass(*T.ArrayClass));
+ ZoneTests().Class();
+ HeapTests().Class();
}
TEST(Constant) {
CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- HandlifiedTypes T(isolate);
-
- CHECK(IsConstant(*T.SmiConstant));
- CHECK(IsConstant(*T.ObjectConstant1));
- CHECK(IsConstant(*T.ObjectConstant2));
- CHECK(IsConstant(*T.ArrayConstant1));
- CHECK(IsConstant(*T.ArrayConstant2));
-
- CHECK(*T.smi == AsConstant(*T.SmiConstant));
- CHECK(*T.object1 == AsConstant(*T.ObjectConstant1));
- CHECK(*T.object2 == AsConstant(*T.ObjectConstant2));
- CHECK(*T.object1 != AsConstant(*T.ObjectConstant2));
- CHECK(*T.array == AsConstant(*T.ArrayConstant1));
- CHECK(*T.array == AsConstant(*T.ArrayConstant2));
+ ZoneTests().Constant();
+ HeapTests().Constant();
}
TEST(Is) {
CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- HandlifiedTypes T(isolate);
-
- // Reflexivity
- CHECK(T.None->Is(T.None));
- CHECK(T.Any->Is(T.Any));
- CHECK(T.Object->Is(T.Object));
-
- CHECK(T.ObjectClass->Is(T.ObjectClass));
- CHECK(T.ObjectConstant1->Is(T.ObjectConstant1));
- CHECK(T.ArrayConstant1->Is(T.ArrayConstant2));
-
- // Symmetry and Transitivity
- CheckSub(T.None, T.Number);
- CheckSub(T.None, T.Any);
-
- CheckSub(T.Oddball, T.Any);
- CheckSub(T.Boolean, T.Oddball);
- CheckSub(T.Null, T.Oddball);
- CheckSub(T.Undefined, T.Oddball);
- CheckUnordered(T.Boolean, T.Null);
- CheckUnordered(T.Undefined, T.Null);
- CheckUnordered(T.Boolean, T.Undefined);
-
- CheckSub(T.Number, T.Any);
- CheckSub(T.Smi, T.Number);
- CheckSub(T.Signed32, T.Number);
- CheckSub(T.Double, T.Number);
- CheckSub(T.Smi, T.Signed32);
- CheckUnordered(T.Smi, T.Double);
- CheckUnordered(T.Signed32, T.Double);
-
- CheckSub(T.Name, T.Any);
- CheckSub(T.UniqueName, T.Any);
- CheckSub(T.UniqueName, T.Name);
- CheckSub(T.String, T.Name);
- CheckSub(T.InternalizedString, T.String);
- CheckSub(T.InternalizedString, T.UniqueName);
- CheckSub(T.InternalizedString, T.Name);
- CheckSub(T.Symbol, T.UniqueName);
- CheckSub(T.Symbol, T.Name);
- CheckUnordered(T.String, T.UniqueName);
- CheckUnordered(T.String, T.Symbol);
- CheckUnordered(T.InternalizedString, T.Symbol);
-
- CheckSub(T.Receiver, T.Any);
- CheckSub(T.Object, T.Any);
- CheckSub(T.Object, T.Receiver);
- CheckSub(T.Array, T.Object);
- CheckSub(T.Function, T.Object);
- CheckSub(T.Proxy, T.Receiver);
- CheckUnordered(T.Object, T.Proxy);
- CheckUnordered(T.Array, T.Function);
-
- // Structured subtyping
- CheckSub(T.None, T.ObjectClass);
- CheckSub(T.None, T.ObjectConstant1);
- CheckSub(T.ObjectClass, T.Any);
- CheckSub(T.ObjectConstant1, T.Any);
-
- CheckSub(T.ObjectClass, T.Object);
- CheckSub(T.ArrayClass, T.Object);
- CheckUnordered(T.ObjectClass, T.ArrayClass);
-
- CheckSub(T.SmiConstant, T.Smi);
- CheckSub(T.SmiConstant, T.Signed32);
- CheckSub(T.SmiConstant, T.Number);
- CheckSub(T.ObjectConstant1, T.Object);
- CheckSub(T.ObjectConstant2, T.Object);
- CheckSub(T.ArrayConstant1, T.Object);
- CheckSub(T.ArrayConstant1, T.Array);
- CheckUnordered(T.ObjectConstant1, T.ObjectConstant2);
- CheckUnordered(T.ObjectConstant1, T.ArrayConstant1);
-
- CheckUnordered(T.ObjectConstant1, T.ObjectClass);
- CheckUnordered(T.ObjectConstant2, T.ObjectClass);
- CheckUnordered(T.ObjectConstant1, T.ArrayClass);
- CheckUnordered(T.ObjectConstant2, T.ArrayClass);
- CheckUnordered(T.ArrayConstant1, T.ObjectClass);
+ ZoneTests().Is();
+ HeapTests().Is();
}
TEST(Maybe) {
CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- HandlifiedTypes T(isolate);
-
- CheckOverlap(T.Any, T.Any);
- CheckOverlap(T.Object, T.Object);
-
- CheckOverlap(T.Oddball, T.Any);
- CheckOverlap(T.Boolean, T.Oddball);
- CheckOverlap(T.Null, T.Oddball);
- CheckOverlap(T.Undefined, T.Oddball);
- CheckDisjoint(T.Boolean, T.Null);
- CheckDisjoint(T.Undefined, T.Null);
- CheckDisjoint(T.Boolean, T.Undefined);
-
- CheckOverlap(T.Number, T.Any);
- CheckOverlap(T.Smi, T.Number);
- CheckOverlap(T.Double, T.Number);
- CheckDisjoint(T.Signed32, T.Double);
-
- CheckOverlap(T.Name, T.Any);
- CheckOverlap(T.UniqueName, T.Any);
- CheckOverlap(T.UniqueName, T.Name);
- CheckOverlap(T.String, T.Name);
- CheckOverlap(T.InternalizedString, T.String);
- CheckOverlap(T.InternalizedString, T.UniqueName);
- CheckOverlap(T.InternalizedString, T.Name);
- CheckOverlap(T.Symbol, T.UniqueName);
- CheckOverlap(T.Symbol, T.Name);
- CheckOverlap(T.String, T.UniqueName);
- CheckDisjoint(T.String, T.Symbol);
- CheckDisjoint(T.InternalizedString, T.Symbol);
-
- CheckOverlap(T.Receiver, T.Any);
- CheckOverlap(T.Object, T.Any);
- CheckOverlap(T.Object, T.Receiver);
- CheckOverlap(T.Array, T.Object);
- CheckOverlap(T.Function, T.Object);
- CheckOverlap(T.Proxy, T.Receiver);
- CheckDisjoint(T.Object, T.Proxy);
- CheckDisjoint(T.Array, T.Function);
-
- CheckOverlap(T.ObjectClass, T.Any);
- CheckOverlap(T.ObjectConstant1, T.Any);
-
- CheckOverlap(T.ObjectClass, T.Object);
- CheckOverlap(T.ArrayClass, T.Object);
- CheckOverlap(T.ObjectClass, T.ObjectClass);
- CheckOverlap(T.ArrayClass, T.ArrayClass);
- CheckDisjoint(T.ObjectClass, T.ArrayClass);
-
- CheckOverlap(T.SmiConstant, T.Smi);
- CheckOverlap(T.SmiConstant, T.Signed32);
- CheckOverlap(T.SmiConstant, T.Number);
- CheckDisjoint(T.SmiConstant, T.Double);
- CheckOverlap(T.ObjectConstant1, T.Object);
- CheckOverlap(T.ObjectConstant2, T.Object);
- CheckOverlap(T.ArrayConstant1, T.Object);
- CheckOverlap(T.ArrayConstant1, T.Array);
- CheckOverlap(T.ArrayConstant1, T.ArrayConstant2);
- CheckOverlap(T.ObjectConstant1, T.ObjectConstant1);
- CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2);
- CheckDisjoint(T.ObjectConstant1, T.ArrayConstant1);
-
- CheckDisjoint(T.ObjectConstant1, T.ObjectClass);
- CheckDisjoint(T.ObjectConstant2, T.ObjectClass);
- CheckDisjoint(T.ObjectConstant1, T.ArrayClass);
- CheckDisjoint(T.ObjectConstant2, T.ArrayClass);
- CheckDisjoint(T.ArrayConstant1, T.ObjectClass);
+ ZoneTests().Maybe();
+ HeapTests().Maybe();
}
TEST(Union) {
CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- HandlifiedTypes T(isolate);
-
- // Bitset-bitset
- CHECK(IsBitset(Type::Union(T.Object, T.Number)));
- CHECK(IsBitset(Type::Union(T.Object, T.Object)));
- CHECK(IsBitset(Type::Union(T.Any, T.None)));
-
- CheckEqual(T.Union(T.None, T.Number), T.Number);
- CheckEqual(T.Union(T.Object, T.Proxy), T.Receiver);
- CheckEqual(T.Union(T.Number, T.String), T.Union(T.String, T.Number));
- CheckSub(T.Union(T.Number, T.String), T.Any);
-
- // Class-class
- CHECK(IsClass(Type::Union(T.ObjectClass, T.ObjectClass)));
- CHECK(IsUnion(Type::Union(T.ObjectClass, T.ArrayClass)));
-
- CheckEqual(T.Union(T.ObjectClass, T.ObjectClass), T.ObjectClass);
- CheckSub(T.None, T.Union(T.ObjectClass, T.ArrayClass));
- CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Any);
- CheckSub(T.ObjectClass, T.Union(T.ObjectClass, T.ArrayClass));
- CheckSub(T.ArrayClass, T.Union(T.ObjectClass, T.ArrayClass));
- CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Object);
- CheckUnordered(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
- CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
- CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number);
-
- // Constant-constant
- CHECK(IsConstant(Type::Union(T.ObjectConstant1, T.ObjectConstant1)));
- CHECK(IsConstant(Type::Union(T.ArrayConstant1, T.ArrayConstant1)));
- CHECK(IsUnion(Type::Union(T.ObjectConstant1, T.ObjectConstant2)));
-
- CheckEqual(T.Union(T.ObjectConstant1, T.ObjectConstant1), T.ObjectConstant1);
- CheckEqual(T.Union(T.ArrayConstant1, T.ArrayConstant1), T.ArrayConstant1);
- CheckEqual(T.Union(T.ArrayConstant1, T.ArrayConstant1), T.ArrayConstant2);
- CheckSub(T.None, T.Union(T.ObjectConstant1, T.ObjectConstant2));
- CheckSub(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.Any);
- CheckSub(T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2));
- CheckSub(T.ObjectConstant2, T.Union(T.ObjectConstant1, T.ObjectConstant2));
- CheckSub(T.ArrayConstant2, T.Union(T.ArrayConstant1, T.ObjectConstant2));
- CheckSub(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.Object);
- CheckUnordered(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.ObjectClass);
- CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Array);
- CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Array);
- CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.ArrayConstant2);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Number);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.ObjectClass);
-
- // Bitset-class
- CHECK(IsBitset(Type::Union(T.ObjectClass, T.Object)));
- CHECK(IsUnion(Type::Union(T.ObjectClass, T.Number)));
-
- CheckEqual(T.Union(T.ObjectClass, T.Object), T.Object);
- CheckSub(T.None, T.Union(T.ObjectClass, T.Number));
- CheckSub(T.Union(T.ObjectClass, T.Number), T.Any);
- CheckSub(T.Union(T.ObjectClass, T.Smi), T.Union(T.Object, T.Number));
- CheckSub(T.Union(T.ObjectClass, T.Array), T.Object);
- CheckUnordered(T.Union(T.ObjectClass, T.String), T.Array);
- CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object);
- CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number);
-
- // Bitset-constant
- CHECK(IsBitset(Type::Union(T.SmiConstant, T.Number)));
- CHECK(IsBitset(Type::Union(T.ObjectConstant1, T.Object)));
- CHECK(IsUnion(Type::Union(T.ObjectConstant2, T.Number)));
-
- CheckEqual(T.Union(T.SmiConstant, T.Number), T.Number);
- CheckEqual(T.Union(T.ObjectConstant1, T.Object), T.Object);
- CheckSub(T.None, T.Union(T.ObjectConstant1, T.Number));
- CheckSub(T.Union(T.ObjectConstant1, T.Number), T.Any);
- CheckSub(T.Union(T.ObjectConstant1, T.Signed32), T.Union(T.Object, T.Number));
- CheckSub(T.Union(T.ObjectConstant1, T.Array), T.Object);
- CheckUnordered(T.Union(T.ObjectConstant1, T.String), T.Array);
- CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number);
- CheckEqual(T.Union(T.Signed32, T.Signed32Constant), T.Signed32);
-
- // Class-constant
- CHECK(IsUnion(Type::Union(T.ObjectConstant1, T.ObjectClass)));
- CHECK(IsUnion(Type::Union(T.ArrayClass, T.ObjectConstant2)));
-
- CheckSub(T.None, T.Union(T.ObjectConstant1, T.ArrayClass));
- CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass), T.Any);
- CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass), T.Object);
- CheckSub(T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ArrayClass));
- CheckSub(T.ArrayClass, T.Union(T.ObjectConstant1, T.ArrayClass));
- CheckUnordered(T.ObjectClass, T.Union(T.ObjectConstant1, T.ArrayClass));
- CheckSub(
- T.Union(T.ObjectConstant1, T.ArrayClass), T.Union(T.Array, T.Object));
- CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayClass), T.ArrayConstant1);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectClass);
-
- // Bitset-union
- CHECK(IsBitset(
- Type::Union(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass))));
- CHECK(IsUnion(
- Type::Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.Number)));
-
- CheckEqual(
- T.Union(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass)),
- T.Object);
- CheckEqual(
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number),
- T.Union(T.ObjectConstant1, T.Union(T.Number, T.ArrayClass)));
- CheckSub(
- T.Double,
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number));
- CheckSub(
- T.ObjectConstant1,
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double));
- CheckSub(
- T.None,
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double));
- CheckSub(
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double),
- T.Any);
- CheckSub(
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double),
- T.Union(T.ObjectConstant1, T.Union(T.Number, T.ArrayClass)));
-
- // Class-union
- CHECK(IsUnion(
- Type::Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.ArrayClass)));
- CHECK(IsUnion(
- Type::Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.ObjectClass)));
-
- CheckEqual(
- T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
- T.Union(T.ObjectClass, T.ObjectConstant1));
- CheckSub(
- T.None,
- T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)));
- CheckSub(
- T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
- T.Any);
- CheckSub(
- T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
- T.Object);
- CheckEqual(
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.ArrayClass),
- T.Union(T.ArrayClass, T.ObjectConstant2));
-
- // Constant-union
- CHECK(IsUnion(Type::Union(
- T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2))));
- CHECK(IsUnion(Type::Union(
- T.Union(T.ArrayConstant1, T.ObjectClass), T.ObjectConstant1)));
- CHECK(IsUnion(Type::Union(
- T.Union(T.ArrayConstant1, T.ObjectConstant2), T.ObjectConstant1)));
-
- CheckEqual(
- T.Union(T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2)),
- T.Union(T.ObjectConstant2, T.ObjectConstant1));
- CheckEqual(
- T.Union(T.Union(T.ArrayConstant1, T.ObjectConstant2), T.ObjectConstant1),
- T.Union(T.ObjectConstant2, T.Union(T.ArrayConstant1, T.ObjectConstant1)));
-
- // Union-union
- CHECK(IsBitset(Type::Union(
- T.Union(T.Number, T.ArrayClass), T.Union(T.Signed32, T.Array))));
- CHECK(IsUnion(Type::Union(
- T.Union(T.Number, T.ArrayClass), T.Union(T.ObjectClass, T.ArrayClass))));
-
- CheckEqual(
- T.Union(
- T.Union(T.ObjectConstant2, T.ObjectConstant1),
- T.Union(T.ObjectConstant1, T.ObjectConstant2)),
- T.Union(T.ObjectConstant2, T.ObjectConstant1));
- CheckEqual(
- T.Union(
- T.Union(T.ObjectConstant2, T.ArrayConstant1),
- T.Union(T.ObjectConstant1, T.ArrayConstant2)),
- T.Union(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.ArrayConstant1));
- CheckEqual(
- T.Union(T.Union(T.Number, T.ArrayClass), T.Union(T.Smi, T.Array)),
- T.Union(T.Number, T.Array));
+ ZoneTests().Union();
+ HeapTests().Union();
}
TEST(Intersect) {
CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- HandlifiedTypes T(isolate);
-
- // Bitset-bitset
- CHECK(IsBitset(Type::Intersect(T.Object, T.Number)));
- CHECK(IsBitset(Type::Intersect(T.Object, T.Object)));
- CHECK(IsBitset(Type::Intersect(T.Any, T.None)));
-
- CheckEqual(T.Intersect(T.None, T.Number), T.None);
- CheckEqual(T.Intersect(T.Object, T.Proxy), T.None);
- CheckEqual(T.Intersect(T.Name, T.String), T.Intersect(T.String, T.Name));
- CheckEqual(T.Intersect(T.UniqueName, T.String), T.InternalizedString);
-
- // Class-class
- CHECK(IsClass(Type::Intersect(T.ObjectClass, T.ObjectClass)));
- CHECK(IsBitset(Type::Intersect(T.ObjectClass, T.ArrayClass)));
-
- CheckEqual(T.Intersect(T.ObjectClass, T.ObjectClass), T.ObjectClass);
- CheckEqual(T.Intersect(T.ObjectClass, T.ArrayClass), T.None);
-
- // Constant-constant
- CHECK(IsConstant(Type::Intersect(T.ObjectConstant1, T.ObjectConstant1)));
- CHECK(IsConstant(Type::Intersect(T.ArrayConstant1, T.ArrayConstant2)));
- CHECK(IsBitset(Type::Intersect(T.ObjectConstant1, T.ObjectConstant2)));
-
- CheckEqual(
- T.Intersect(T.ObjectConstant1, T.ObjectConstant1), T.ObjectConstant1);
- CheckEqual(
- T.Intersect(T.ArrayConstant1, T.ArrayConstant2), T.ArrayConstant1);
- CheckEqual(T.Intersect(T.ObjectConstant1, T.ObjectConstant2), T.None);
-
- // Bitset-class
- CHECK(IsClass(Type::Intersect(T.ObjectClass, T.Object)));
- CHECK(IsBitset(Type::Intersect(T.ObjectClass, T.Number)));
-
- CheckEqual(T.Intersect(T.ObjectClass, T.Object), T.ObjectClass);
- CheckEqual(T.Intersect(T.ObjectClass, T.Array), T.None);
- CheckEqual(T.Intersect(T.ObjectClass, T.Number), T.None);
-
- // Bitset-constant
- CHECK(IsBitset(Type::Intersect(T.Smi, T.Number)));
- CHECK(IsConstant(Type::Intersect(T.SmiConstant, T.Number)));
- CHECK(IsConstant(Type::Intersect(T.ObjectConstant1, T.Object)));
-
- CheckEqual(T.Intersect(T.Smi, T.Number), T.Smi);
- CheckEqual(T.Intersect(T.SmiConstant, T.Number), T.SmiConstant);
- CheckEqual(T.Intersect(T.ObjectConstant1, T.Object), T.ObjectConstant1);
-
- // Class-constant
- CHECK(IsBitset(Type::Intersect(T.ObjectConstant1, T.ObjectClass)));
- CHECK(IsBitset(Type::Intersect(T.ArrayClass, T.ObjectConstant2)));
-
- CheckEqual(T.Intersect(T.ObjectConstant1, T.ObjectClass), T.None);
- CheckEqual(T.Intersect(T.ArrayClass, T.ObjectConstant2), T.None);
-
- // Bitset-union
- CHECK(IsUnion(
- Type::Intersect(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass))));
- CHECK(IsBitset(
- Type::Intersect(T.Union(T.ArrayClass, T.ObjectConstant2), T.Number)));
-
- CheckEqual(
- T.Intersect(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass)),
- T.Union(T.ObjectConstant1, T.ObjectClass));
- CheckEqual(
- T.Intersect(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number),
- T.None);
-
- // Class-union
- CHECK(IsClass(
- Type::Intersect(T.Union(T.ArrayClass, T.ObjectConstant2), T.ArrayClass)));
- CHECK(IsClass(
- Type::Intersect(T.Union(T.Object, T.SmiConstant), T.ArrayClass)));
- CHECK(IsBitset(
- Type::Intersect(T.Union(T.ObjectClass, T.ArrayConstant1), T.ArrayClass)));
-
- CheckEqual(
- T.Intersect(T.ArrayClass, T.Union(T.ObjectConstant2, T.ArrayClass)),
- T.ArrayClass);
- CheckEqual(
- T.Intersect(T.ArrayClass, T.Union(T.Object, T.SmiConstant)),
- T.ArrayClass);
- CheckEqual(
- T.Intersect(T.Union(T.ObjectClass, T.ArrayConstant1), T.ArrayClass),
- T.None);
-
- // Constant-union
- CHECK(IsConstant(Type::Intersect(
- T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2))));
- CHECK(IsConstant(Type::Intersect(
- T.Union(T.Number, T.ObjectClass), T.SmiConstant)));
- CHECK(IsBitset(Type::Intersect(
- T.Union(T.ArrayConstant1, T.ObjectClass), T.ObjectConstant1)));
-
- CheckEqual(
- T.Intersect(
- T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2)),
- T.ObjectConstant1);
- CheckEqual(
- T.Intersect(T.SmiConstant, T.Union(T.Number, T.ObjectConstant2)),
- T.SmiConstant);
- CheckEqual(
- T.Intersect(T.Union(T.ArrayConstant1, T.ObjectClass), T.ObjectConstant1),
- T.None);
-
- // Union-union
- CHECK(IsUnion(Type::Intersect(
- T.Union(T.Number, T.ArrayClass), T.Union(T.Signed32, T.Array))));
- CHECK(IsBitset(Type::Intersect(
- T.Union(T.Number, T.ObjectClass), T.Union(T.Signed32, T.Array))));
-
- CheckEqual(
- T.Intersect(
- T.Union(T.Number, T.ArrayClass),
- T.Union(T.Smi, T.Array)),
- T.Union(T.Smi, T.ArrayClass));
- CheckEqual(
- T.Intersect(
- T.Union(T.Number, T.ObjectClass),
- T.Union(T.Signed32, T.Array)),
- T.Signed32);
- CheckEqual(
- T.Intersect(
- T.Union(T.ObjectConstant2, T.ObjectConstant1),
- T.Union(T.ObjectConstant1, T.ObjectConstant2)),
- T.Union(T.ObjectConstant2, T.ObjectConstant1));
- CheckEqual(
- T.Intersect(
- T.Union(T.Union(T.ObjectConstant2, T.ObjectConstant1), T.ArrayClass),
- T.Union(
- T.ObjectConstant1, T.Union(T.ArrayConstant1, T.ObjectConstant2))),
- T.Union(T.ObjectConstant2, T.ObjectConstant1));
- CheckEqual(
- T.Intersect(
- T.Union(T.ObjectConstant2, T.ArrayConstant1),
- T.Union(T.ObjectConstant1, T.ArrayConstant2)),
- T.ArrayConstant1);
+ ZoneTests().Intersect();
+ HeapTests().Intersect();
+}
+
+
+TEST(Convert) {
+ CcTest::InitializeVM();
+ ZoneTests().Convert<HeapType, Handle<HeapType>, Isolate, HeapRep>();
+ HeapTests().Convert<Type, Type*, Zone, ZoneRep>();
}
diff --git a/deps/v8/test/cctest/test-unbound-queue.cc b/deps/v8/test/cctest/test-unbound-queue.cc
index 6d02fff0bb..dd9b9c142b 100644
--- a/deps/v8/test/cctest/test-unbound-queue.cc
+++ b/deps/v8/test/cctest/test-unbound-queue.cc
@@ -74,4 +74,3 @@ TEST(MultipleRecords) {
}
CHECK(cq.IsEmpty());
}
-
diff --git a/deps/v8/test/cctest/test-unique.cc b/deps/v8/test/cctest/test-unique.cc
index 0936908f12..ad14ff1334 100644
--- a/deps/v8/test/cctest/test-unique.cc
+++ b/deps/v8/test/cctest/test-unique.cc
@@ -546,4 +546,3 @@ TEST(UniqueSet_UnionExhaustive) {
}
}
}
-
diff --git a/deps/v8/test/cctest/test-utils-a64.cc b/deps/v8/test/cctest/test-utils-a64.cc
new file mode 100644
index 0000000000..e637a6e52b
--- /dev/null
+++ b/deps/v8/test/cctest/test-utils-a64.cc
@@ -0,0 +1,426 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "a64/utils-a64.h"
+#include "cctest.h"
+#include "test-utils-a64.h"
+
+using namespace v8::internal;
+
+
+#define __ masm->
+
+
+bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result) {
+ if (result != expected) {
+ printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
+ expected, result);
+ }
+
+ return expected == result;
+}
+
+
+bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result) {
+ if (result != expected) {
+ printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ expected, result);
+ }
+
+ return expected == result;
+}
+
+
+bool EqualFP32(float expected, const RegisterDump*, float result) {
+ if (float_to_rawbits(expected) == float_to_rawbits(result)) {
+ return true;
+ } else {
+ if (std::isnan(expected) || (expected == 0.0)) {
+ printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
+ float_to_rawbits(expected), float_to_rawbits(result));
+ } else {
+ printf("Expected %.9f (0x%08" PRIx32 ")\t "
+ "Found %.9f (0x%08" PRIx32 ")\n",
+ expected, float_to_rawbits(expected),
+ result, float_to_rawbits(result));
+ }
+ return false;
+ }
+}
+
+
+bool EqualFP64(double expected, const RegisterDump*, double result) {
+ if (double_to_rawbits(expected) == double_to_rawbits(result)) {
+ return true;
+ }
+
+ if (std::isnan(expected) || (expected == 0.0)) {
+ printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ double_to_rawbits(expected), double_to_rawbits(result));
+ } else {
+ printf("Expected %.17f (0x%016" PRIx64 ")\t "
+ "Found %.17f (0x%016" PRIx64 ")\n",
+ expected, double_to_rawbits(expected),
+ result, double_to_rawbits(result));
+ }
+ return false;
+}
+
+
+bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
+ ASSERT(reg.Is32Bits());
+ // Retrieve the corresponding X register so we can check that the upper part
+ // was properly cleared.
+ int64_t result_x = core->xreg(reg.code());
+ if ((result_x & 0xffffffff00000000L) != 0) {
+ printf("Expected 0x%08" PRIx32 "\t Found 0x%016" PRIx64 "\n",
+ expected, result_x);
+ return false;
+ }
+ uint32_t result_w = core->wreg(reg.code());
+ return Equal32(expected, core, result_w);
+}
+
+
+bool Equal64(uint64_t expected,
+ const RegisterDump* core,
+ const Register& reg) {
+ ASSERT(reg.Is64Bits());
+ uint64_t result = core->xreg(reg.code());
+ return Equal64(expected, core, result);
+}
+
+
+bool EqualFP32(float expected,
+ const RegisterDump* core,
+ const FPRegister& fpreg) {
+ ASSERT(fpreg.Is32Bits());
+ // Retrieve the corresponding D register so we can check that the upper part
+ // was properly cleared.
+ uint64_t result_64 = core->dreg_bits(fpreg.code());
+ if ((result_64 & 0xffffffff00000000L) != 0) {
+ printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
+ float_to_rawbits(expected), expected, result_64);
+ return false;
+ }
+
+ return EqualFP32(expected, core, core->sreg(fpreg.code()));
+}
+
+
+bool EqualFP64(double expected,
+ const RegisterDump* core,
+ const FPRegister& fpreg) {
+ ASSERT(fpreg.Is64Bits());
+ return EqualFP64(expected, core, core->dreg(fpreg.code()));
+}
+
+
+bool Equal64(const Register& reg0,
+ const RegisterDump* core,
+ const Register& reg1) {
+ ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
+ int64_t expected = core->xreg(reg0.code());
+ int64_t result = core->xreg(reg1.code());
+ return Equal64(expected, core, result);
+}
+
+
+static char FlagN(uint32_t flags) {
+ return (flags & NFlag) ? 'N' : 'n';
+}
+
+
+static char FlagZ(uint32_t flags) {
+ return (flags & ZFlag) ? 'Z' : 'z';
+}
+
+
+static char FlagC(uint32_t flags) {
+ return (flags & CFlag) ? 'C' : 'c';
+}
+
+
+static char FlagV(uint32_t flags) {
+ return (flags & VFlag) ? 'V' : 'v';
+}
+
+
+bool EqualNzcv(uint32_t expected, uint32_t result) {
+ ASSERT((expected & ~NZCVFlag) == 0);
+ ASSERT((result & ~NZCVFlag) == 0);
+ if (result != expected) {
+ printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
+ FlagN(expected), FlagZ(expected), FlagC(expected), FlagV(expected),
+ FlagN(result), FlagZ(result), FlagC(result), FlagV(result));
+ return false;
+ }
+
+ return true;
+}
+
+
+bool EqualRegisters(const RegisterDump* a, const RegisterDump* b) {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (a->xreg(i) != b->xreg(i)) {
+ printf("x%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ i, a->xreg(i), b->xreg(i));
+ return false;
+ }
+ }
+
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ uint64_t a_bits = a->dreg_bits(i);
+ uint64_t b_bits = b->dreg_bits(i);
+ if (a_bits != b_bits) {
+ printf("d%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ i, a_bits, b_bits);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
+ int reg_size, int reg_count, RegList allowed) {
+ RegList list = 0;
+ int i = 0;
+ for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
+ if (((1UL << n) & allowed) != 0) {
+ // Only assign allowed registers.
+ if (r) {
+ r[i] = Register::Create(n, reg_size);
+ }
+ if (x) {
+ x[i] = Register::Create(n, kXRegSize);
+ }
+ if (w) {
+ w[i] = Register::Create(n, kWRegSize);
+ }
+ list |= (1UL << n);
+ i++;
+ }
+ }
+ // Check that we got enough registers.
+ ASSERT(CountSetBits(list, kNumberOfRegisters) == reg_count);
+
+ return list;
+}
+
+
+RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
+ int reg_size, int reg_count, RegList allowed) {
+ RegList list = 0;
+ int i = 0;
+ for (unsigned n = 0; (n < kNumberOfFPRegisters) && (i < reg_count); n++) {
+ if (((1UL << n) & allowed) != 0) {
+ // Only assigned allowed registers.
+ if (v) {
+ v[i] = FPRegister::Create(n, reg_size);
+ }
+ if (d) {
+ d[i] = FPRegister::Create(n, kDRegSize);
+ }
+ if (s) {
+ s[i] = FPRegister::Create(n, kSRegSize);
+ }
+ list |= (1UL << n);
+ i++;
+ }
+ }
+ // Check that we got enough registers.
+ ASSERT(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
+
+ return list;
+}
+
+
+void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
+ Register first = NoReg;
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (reg_list & (1UL << i)) {
+ Register xn = Register::Create(i, kXRegSize);
+ // We should never write into csp here.
+ ASSERT(!xn.Is(csp));
+ if (!xn.IsZero()) {
+ if (!first.IsValid()) {
+ // This is the first register we've hit, so construct the literal.
+ __ Mov(xn, value);
+ first = xn;
+ } else {
+ // We've already loaded the literal, so re-use the value already
+ // loaded into the first register we hit.
+ __ Mov(xn, first);
+ }
+ }
+ }
+ }
+}
+
+
+void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
+ FPRegister first = NoFPReg;
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if (reg_list & (1UL << i)) {
+ FPRegister dn = FPRegister::Create(i, kDRegSize);
+ if (!first.IsValid()) {
+ // This is the first register we've hit, so construct the literal.
+ __ Fmov(dn, value);
+ first = dn;
+ } else {
+ // We've already loaded the literal, so re-use the value already loaded
+ // into the first register we hit.
+ __ Fmov(dn, first);
+ }
+ }
+ }
+}
+
+
+void Clobber(MacroAssembler* masm, CPURegList reg_list) {
+ if (reg_list.type() == CPURegister::kRegister) {
+ // This will always clobber X registers.
+ Clobber(masm, reg_list.list());
+ } else if (reg_list.type() == CPURegister::kFPRegister) {
+ // This will always clobber D registers.
+ ClobberFP(masm, reg_list.list());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void RegisterDump::Dump(MacroAssembler* masm) {
+ ASSERT(__ StackPointer().Is(csp));
+
+ // Ensure that we don't unintentionally clobber any registers.
+ Register old_tmp0 = __ Tmp0();
+ Register old_tmp1 = __ Tmp1();
+ FPRegister old_fptmp0 = __ FPTmp0();
+ __ SetScratchRegisters(NoReg, NoReg);
+ __ SetFPScratchRegister(NoFPReg);
+
+ // Preserve some temporary registers.
+ Register dump_base = x0;
+ Register dump = x1;
+ Register tmp = x2;
+ Register dump_base_w = dump_base.W();
+ Register dump_w = dump.W();
+ Register tmp_w = tmp.W();
+
+ // Offsets into the dump_ structure.
+ const int x_offset = offsetof(dump_t, x_);
+ const int w_offset = offsetof(dump_t, w_);
+ const int d_offset = offsetof(dump_t, d_);
+ const int s_offset = offsetof(dump_t, s_);
+ const int sp_offset = offsetof(dump_t, sp_);
+ const int wsp_offset = offsetof(dump_t, wsp_);
+ const int flags_offset = offsetof(dump_t, flags_);
+
+ __ Push(xzr, dump_base, dump, tmp);
+
+ // Load the address where we will dump the state.
+ __ Mov(dump_base, reinterpret_cast<uint64_t>(&dump_));
+
+ // Dump the stack pointer (csp and wcsp).
+ // The stack pointer cannot be stored directly; it needs to be moved into
+ // another register first. Also, we pushed four X registers, so we need to
+ // compensate here.
+ __ Add(tmp, csp, 4 * kXRegSizeInBytes);
+ __ Str(tmp, MemOperand(dump_base, sp_offset));
+ __ Add(tmp_w, wcsp, 4 * kXRegSizeInBytes);
+ __ Str(tmp_w, MemOperand(dump_base, wsp_offset));
+
+ // Dump X registers.
+ __ Add(dump, dump_base, x_offset);
+ for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
+ __ Stp(Register::XRegFromCode(i), Register::XRegFromCode(i + 1),
+ MemOperand(dump, i * kXRegSizeInBytes));
+ }
+
+ // Dump W registers.
+ __ Add(dump, dump_base, w_offset);
+ for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
+ __ Stp(Register::WRegFromCode(i), Register::WRegFromCode(i + 1),
+ MemOperand(dump, i * kWRegSizeInBytes));
+ }
+
+ // Dump D registers.
+ __ Add(dump, dump_base, d_offset);
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
+ __ Stp(FPRegister::DRegFromCode(i), FPRegister::DRegFromCode(i + 1),
+ MemOperand(dump, i * kDRegSizeInBytes));
+ }
+
+ // Dump S registers.
+ __ Add(dump, dump_base, s_offset);
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
+ __ Stp(FPRegister::SRegFromCode(i), FPRegister::SRegFromCode(i + 1),
+ MemOperand(dump, i * kSRegSizeInBytes));
+ }
+
+ // Dump the flags.
+ __ Mrs(tmp, NZCV);
+ __ Str(tmp, MemOperand(dump_base, flags_offset));
+
+ // To dump the values that were in tmp amd dump, we need a new scratch
+ // register. We can use any of the already dumped registers since we can
+ // easily restore them.
+ Register dump2_base = x10;
+ Register dump2 = x11;
+ ASSERT(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
+
+ // Don't lose the dump_ address.
+ __ Mov(dump2_base, dump_base);
+
+ __ Pop(tmp, dump, dump_base, xzr);
+
+ __ Add(dump2, dump2_base, w_offset);
+ __ Str(dump_base_w, MemOperand(dump2, dump_base.code() * kWRegSizeInBytes));
+ __ Str(dump_w, MemOperand(dump2, dump.code() * kWRegSizeInBytes));
+ __ Str(tmp_w, MemOperand(dump2, tmp.code() * kWRegSizeInBytes));
+
+ __ Add(dump2, dump2_base, x_offset);
+ __ Str(dump_base, MemOperand(dump2, dump_base.code() * kXRegSizeInBytes));
+ __ Str(dump, MemOperand(dump2, dump.code() * kXRegSizeInBytes));
+ __ Str(tmp, MemOperand(dump2, tmp.code() * kXRegSizeInBytes));
+
+ // Finally, restore dump2_base and dump2.
+ __ Ldr(dump2_base, MemOperand(dump2, dump2_base.code() * kXRegSizeInBytes));
+ __ Ldr(dump2, MemOperand(dump2, dump2.code() * kXRegSizeInBytes));
+
+ // Restore the MacroAssembler's scratch registers.
+ __ SetScratchRegisters(old_tmp0, old_tmp1);
+ __ SetFPScratchRegister(old_fptmp0);
+
+ completed_ = true;
+}
diff --git a/deps/v8/test/cctest/test-utils-a64.h b/deps/v8/test/cctest/test-utils-a64.h
new file mode 100644
index 0000000000..250277542a
--- /dev/null
+++ b/deps/v8/test/cctest/test-utils-a64.h
@@ -0,0 +1,233 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_TEST_UTILS_A64_H_
+#define V8_A64_TEST_UTILS_A64_H_
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "a64/macro-assembler-a64.h"
+#include "a64/utils-a64.h"
+#include "cctest.h"
+
+
+using namespace v8::internal;
+
+
+// RegisterDump: Object allowing integer, floating point and flags registers
+// to be saved to itself for future reference.
+class RegisterDump {
+ public:
+ RegisterDump() : completed_(false) {}
+
+ // The Dump method generates code to store a snapshot of the register values.
+ // It needs to be able to use the stack temporarily, and requires that the
+ // current stack pointer is csp, and is properly aligned.
+ //
+ // The dumping code is generated though the given MacroAssembler. No registers
+ // are corrupted in the process, but the stack is used briefly. The flags will
+ // be corrupted during this call.
+ void Dump(MacroAssembler* assm);
+
+ // Register accessors.
+ inline int32_t wreg(unsigned code) const {
+ if (code == kSPRegInternalCode) {
+ return wspreg();
+ }
+ ASSERT(RegAliasesMatch(code));
+ return dump_.w_[code];
+ }
+
+ inline int64_t xreg(unsigned code) const {
+ if (code == kSPRegInternalCode) {
+ return spreg();
+ }
+ ASSERT(RegAliasesMatch(code));
+ return dump_.x_[code];
+ }
+
+ // FPRegister accessors.
+ inline uint32_t sreg_bits(unsigned code) const {
+ ASSERT(FPRegAliasesMatch(code));
+ return dump_.s_[code];
+ }
+
+ inline float sreg(unsigned code) const {
+ return rawbits_to_float(sreg_bits(code));
+ }
+
+ inline uint64_t dreg_bits(unsigned code) const {
+ ASSERT(FPRegAliasesMatch(code));
+ return dump_.d_[code];
+ }
+
+ inline double dreg(unsigned code) const {
+ return rawbits_to_double(dreg_bits(code));
+ }
+
+ // Stack pointer accessors.
+ inline int64_t spreg() const {
+ ASSERT(SPRegAliasesMatch());
+ return dump_.sp_;
+ }
+
+ inline int64_t wspreg() const {
+ ASSERT(SPRegAliasesMatch());
+ return dump_.wsp_;
+ }
+
+ // Flags accessors.
+ inline uint64_t flags_nzcv() const {
+ ASSERT(IsComplete());
+ ASSERT((dump_.flags_ & ~Flags_mask) == 0);
+ return dump_.flags_ & Flags_mask;
+ }
+
+ inline bool IsComplete() const {
+ return completed_;
+ }
+
+ private:
+ // Indicate whether the dump operation has been completed.
+ bool completed_;
+
+ // Check that the lower 32 bits of x<code> exactly match the 32 bits of
+ // w<code>. A failure of this test most likely represents a failure in the
+ // ::Dump method, or a failure in the simulator.
+ bool RegAliasesMatch(unsigned code) const {
+ ASSERT(IsComplete());
+ ASSERT(code < kNumberOfRegisters);
+ return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]);
+ }
+
+ // As RegAliasesMatch, but for the stack pointer.
+ bool SPRegAliasesMatch() const {
+ ASSERT(IsComplete());
+ return ((dump_.sp_ & kWRegMask) == dump_.wsp_);
+ }
+
+ // As RegAliasesMatch, but for floating-point registers.
+ bool FPRegAliasesMatch(unsigned code) const {
+ ASSERT(IsComplete());
+ ASSERT(code < kNumberOfFPRegisters);
+ return (dump_.d_[code] & kSRegMask) == dump_.s_[code];
+ }
+
+ // Store all the dumped elements in a simple struct so the implementation can
+ // use offsetof to quickly find the correct field.
+ struct dump_t {
+ // Core registers.
+ uint64_t x_[kNumberOfRegisters];
+ uint32_t w_[kNumberOfRegisters];
+
+ // Floating-point registers, as raw bits.
+ uint64_t d_[kNumberOfFPRegisters];
+ uint32_t s_[kNumberOfFPRegisters];
+
+ // The stack pointer.
+ uint64_t sp_;
+ uint64_t wsp_;
+
+ // NZCV flags, stored in bits 28 to 31.
+ // bit[31] : Negative
+ // bit[30] : Zero
+ // bit[29] : Carry
+ // bit[28] : oVerflow
+ uint64_t flags_;
+ } dump_;
+
+ static dump_t for_sizeof();
+ STATIC_ASSERT(sizeof(for_sizeof().d_[0]) == kDRegSizeInBytes);
+ STATIC_ASSERT(sizeof(for_sizeof().s_[0]) == kSRegSizeInBytes);
+ STATIC_ASSERT(sizeof(for_sizeof().d_[0]) == kXRegSizeInBytes);
+ STATIC_ASSERT(sizeof(for_sizeof().s_[0]) == kWRegSizeInBytes);
+ STATIC_ASSERT(sizeof(for_sizeof().x_[0]) == kXRegSizeInBytes);
+ STATIC_ASSERT(sizeof(for_sizeof().w_[0]) == kWRegSizeInBytes);
+};
+
+// Some of these methods don't use the RegisterDump argument, but they have to
+// accept them so that they can overload those that take register arguments.
+bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result);
+bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result);
+
+bool EqualFP32(float expected, const RegisterDump*, float result);
+bool EqualFP64(double expected, const RegisterDump*, double result);
+
+bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg);
+bool Equal64(uint64_t expected, const RegisterDump* core, const Register& reg);
+
+bool EqualFP32(float expected, const RegisterDump* core,
+ const FPRegister& fpreg);
+bool EqualFP64(double expected, const RegisterDump* core,
+ const FPRegister& fpreg);
+
+bool Equal64(const Register& reg0, const RegisterDump* core,
+ const Register& reg1);
+
+bool EqualNzcv(uint32_t expected, uint32_t result);
+
+bool EqualRegisters(const RegisterDump* a, const RegisterDump* b);
+
+// Populate the w, x and r arrays with registers from the 'allowed' mask. The
+// r array will be populated with <reg_size>-sized registers,
+//
+// This allows for tests which use large, parameterized blocks of registers
+// (such as the push and pop tests), but where certain registers must be
+// avoided as they are used for other purposes.
+//
+// Any of w, x, or r can be NULL if they are not required.
+//
+// The return value is a RegList indicating which registers were allocated.
+RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
+ int reg_size, int reg_count, RegList allowed);
+
+// As PopulateRegisterArray, but for floating-point registers.
+RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
+ int reg_size, int reg_count, RegList allowed);
+
+// Ovewrite the contents of the specified registers. This enables tests to
+// check that register contents are written in cases where it's likely that the
+// correct outcome could already be stored in the register.
+//
+// This always overwrites X-sized registers. If tests are operating on W
+// registers, a subsequent write into an aliased W register should clear the
+// top word anyway, so clobbering the full X registers should make tests more
+// rigorous.
+void Clobber(MacroAssembler* masm, RegList reg_list,
+ uint64_t const value = 0xfedcba9876543210UL);
+
+// As Clobber, but for FP registers.
+void ClobberFP(MacroAssembler* masm, RegList reg_list,
+ double const value = kFP64SignallingNaN);
+
+// As Clobber, but for a CPURegList with either FP or integer registers. When
+// using this method, the clobber value is always the default for the basic
+// Clobber or ClobberFP functions.
+void Clobber(MacroAssembler* masm, CPURegList reg_list);
+
+#endif // V8_A64_TEST_UTILS_A64_H_
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 932b06b850..97eca86f1b 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <utility>
+
#include "v8.h"
#include "global-handles.h"
@@ -56,7 +58,7 @@ static Handle<JSWeakMap> AllocateJSWeakMap(Isolate* isolate) {
static void PutIntoWeakMap(Handle<JSWeakMap> weakmap,
Handle<JSObject> key,
Handle<Object> value) {
- Handle<ObjectHashTable> table = PutIntoObjectHashTable(
+ Handle<ObjectHashTable> table = ObjectHashTable::Put(
Handle<ObjectHashTable>(ObjectHashTable::cast(weakmap->table())),
Handle<JSObject>(JSObject::cast(*key)),
value);
@@ -64,12 +66,14 @@ static void PutIntoWeakMap(Handle<JSWeakMap> weakmap,
}
static int NumberOfWeakCalls = 0;
-static void WeakPointerCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* handle,
- void* id) {
- ASSERT(id == reinterpret_cast<void*>(1234));
+static void WeakPointerCallback(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ std::pair<v8::Persistent<v8::Value>*, int>* p =
+ reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
+ data.GetParameter());
+ ASSERT_EQ(1234, p->second);
NumberOfWeakCalls++;
- handle->Dispose();
+ p->first->Reset();
}
@@ -112,9 +116,10 @@ TEST(Weakness) {
// Make the global reference to the key weak.
{
HandleScope scope(isolate);
- global_handles->MakeWeak(key.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
+ std::pair<Handle<Object>*, int> handle_and_id(&key, 1234);
+ GlobalHandles::MakeWeak(key.location(),
+ reinterpret_cast<void*>(&handle_and_id),
+ &WeakPointerCallback);
}
CHECK(global_handles->IsWeak(key.location()));
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index aff4c7fcbe..514b6b2393 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <utility>
+
#include "v8.h"
#include "global-handles.h"
@@ -56,7 +58,7 @@ static Handle<JSWeakSet> AllocateJSWeakSet(Isolate* isolate) {
static void PutIntoWeakSet(Handle<JSWeakSet> weakset,
Handle<JSObject> key,
Handle<Object> value) {
- Handle<ObjectHashTable> table = PutIntoObjectHashTable(
+ Handle<ObjectHashTable> table = ObjectHashTable::Put(
Handle<ObjectHashTable>(ObjectHashTable::cast(weakset->table())),
Handle<JSObject>(JSObject::cast(*key)),
value);
@@ -64,12 +66,14 @@ static void PutIntoWeakSet(Handle<JSWeakSet> weakset,
}
static int NumberOfWeakCalls = 0;
-static void WeakPointerCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* handle,
- void* id) {
- ASSERT(id == reinterpret_cast<void*>(1234));
+static void WeakPointerCallback(
+ const v8::WeakCallbackData<v8::Value, void>& data) {
+ std::pair<v8::Persistent<v8::Value>*, int>* p =
+ reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
+ data.GetParameter());
+ ASSERT_EQ(1234, p->second);
NumberOfWeakCalls++;
- handle->Dispose();
+ p->first->Reset();
}
@@ -112,9 +116,10 @@ TEST(WeakSet_Weakness) {
// Make the global reference to the key weak.
{
HandleScope scope(isolate);
- global_handles->MakeWeak(key.location(),
- reinterpret_cast<void*>(1234),
- &WeakPointerCallback);
+ std::pair<Handle<Object>*, int> handle_and_id(&key, 1234);
+ GlobalHandles::MakeWeak(key.location(),
+ reinterpret_cast<void*>(&handle_and_id),
+ &WeakPointerCallback);
}
CHECK(global_handles->IsWeak(key.location()));
diff --git a/deps/v8/test/cctest/test-weaktypedarrays.cc b/deps/v8/test/cctest/test-weaktypedarrays.cc
index fe1ef04940..daf07eed02 100644
--- a/deps/v8/test/cctest/test-weaktypedarrays.cc
+++ b/deps/v8/test/cctest/test-weaktypedarrays.cc
@@ -89,22 +89,24 @@ TEST(WeakArrayBuffersFromApi) {
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
- CHECK_EQ(0, CountArrayBuffersInWeakList(isolate->heap()));
+ int start = CountArrayBuffersInWeakList(isolate->heap());
{
v8::HandleScope s1(context->GetIsolate());
- v8::Handle<v8::ArrayBuffer> ab1 = v8::ArrayBuffer::New(256);
+ v8::Handle<v8::ArrayBuffer> ab1 =
+ v8::ArrayBuffer::New(context->GetIsolate(), 256);
{
v8::HandleScope s2(context->GetIsolate());
- v8::Handle<v8::ArrayBuffer> ab2 = v8::ArrayBuffer::New(128);
+ v8::Handle<v8::ArrayBuffer> ab2 =
+ v8::ArrayBuffer::New(context->GetIsolate(), 128);
Handle<JSArrayBuffer> iab1 = v8::Utils::OpenHandle(*ab1);
Handle<JSArrayBuffer> iab2 = v8::Utils::OpenHandle(*ab2);
- CHECK_EQ(2, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK_EQ(2, CountArrayBuffersInWeakList(isolate->heap()) - start);
CHECK(HasArrayBufferInWeakList(isolate->heap(), *iab1));
CHECK(HasArrayBufferInWeakList(isolate->heap(), *iab2));
}
isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()) - start);
{
HandleScope scope2(isolate);
Handle<JSArrayBuffer> iab1 = v8::Utils::OpenHandle(*ab1);
@@ -114,7 +116,7 @@ TEST(WeakArrayBuffersFromApi) {
}
isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CHECK_EQ(0, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK_EQ(start, CountArrayBuffersInWeakList(isolate->heap()));
}
@@ -122,11 +124,12 @@ TEST(WeakArrayBuffersFromScript) {
v8::V8::Initialize();
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
+ int start = CountArrayBuffersInWeakList(isolate->heap());
for (int i = 1; i <= 3; i++) {
// Create 3 array buffers, make i-th of them garbage,
// validate correct state of array buffer weak list.
- CHECK_EQ(0, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK_EQ(start, CountArrayBuffersInWeakList(isolate->heap()));
{
v8::HandleScope scope(context->GetIsolate());
@@ -142,7 +145,7 @@ TEST(WeakArrayBuffersFromScript) {
v8::Handle<v8::ArrayBuffer> ab3 =
v8::Handle<v8::ArrayBuffer>::Cast(CompileRun("ab3"));
- CHECK_EQ(3, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK_EQ(3, CountArrayBuffersInWeakList(isolate->heap()) - start);
CHECK(HasArrayBufferInWeakList(isolate->heap(),
*v8::Utils::OpenHandle(*ab1)));
CHECK(HasArrayBufferInWeakList(isolate->heap(),
@@ -156,7 +159,7 @@ TEST(WeakArrayBuffersFromScript) {
CompileRun(source.start());
isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CHECK_EQ(2, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK_EQ(2, CountArrayBuffersInWeakList(isolate->heap()) - start);
{
v8::HandleScope s2(context->GetIsolate());
@@ -174,7 +177,7 @@ TEST(WeakArrayBuffersFromScript) {
}
isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CHECK_EQ(0, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK_EQ(start, CountArrayBuffersInWeakList(isolate->heap()));
}
}
@@ -185,7 +188,8 @@ void TestViewFromApi() {
Isolate* isolate = GetIsolateFrom(&context);
v8::HandleScope s1(context->GetIsolate());
- v8::Handle<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(2048);
+ v8::Handle<v8::ArrayBuffer> ab =
+ v8::ArrayBuffer::New(context->GetIsolate(), 2048);
Handle<JSArrayBuffer> iab = v8::Utils::OpenHandle(*ab);
{
v8::HandleScope s2(context->GetIsolate());
@@ -266,6 +270,7 @@ static void TestTypedArrayFromScript(const char* constructor) {
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
v8::HandleScope scope(context->GetIsolate());
+ int start = CountArrayBuffersInWeakList(isolate->heap());
CompileRun("var ab = new ArrayBuffer(2048);");
for (int i = 1; i <= 3; i++) {
// Create 3 typed arrays, make i-th of them garbage,
@@ -273,7 +278,7 @@ static void TestTypedArrayFromScript(const char* constructor) {
v8::HandleScope s0(context->GetIsolate());
i::ScopedVector<char> source(2048);
- CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()) - start);
{
v8::HandleScope s1(context->GetIsolate());
@@ -292,7 +297,7 @@ static void TestTypedArrayFromScript(const char* constructor) {
v8::Handle<TypedArray>::Cast(CompileRun("ta2"));
v8::Handle<TypedArray> ta3 =
v8::Handle<TypedArray>::Cast(CompileRun("ta3"));
- CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()) - start);
Handle<JSArrayBuffer> iab = v8::Utils::OpenHandle(*ab);
CHECK_EQ(3, CountViews(*iab));
CHECK(HasViewInWeakList(*iab, *v8::Utils::OpenHandle(*ta1)));
@@ -304,7 +309,7 @@ static void TestTypedArrayFromScript(const char* constructor) {
CompileRun(source.start());
isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()) - start);
{
v8::HandleScope s2(context->GetIsolate());
@@ -324,7 +329,7 @@ static void TestTypedArrayFromScript(const char* constructor) {
CompileRun("ta1 = null; ta2 = null; ta3 = null;");
isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()));
+ CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()) - start);
{
v8::HandleScope s3(context->GetIsolate());
diff --git a/deps/v8/test/cctest/trace-extension.cc b/deps/v8/test/cctest/trace-extension.cc
new file mode 100644
index 0000000000..2da6813166
--- /dev/null
+++ b/deps/v8/test/cctest/trace-extension.cc
@@ -0,0 +1,142 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "trace-extension.h"
+
+#include "cctest.h"
+#include "sampler.h"
+
+namespace v8 {
+namespace internal {
+
+const char* TraceExtension::kSource =
+ "native function trace();"
+ "native function js_trace();"
+ "native function js_entry_sp();"
+ "native function js_entry_sp_level2();";
+
+
+v8::Handle<v8::FunctionTemplate> TraceExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Handle<v8::String> name) {
+ if (name->Equals(v8::String::NewFromUtf8(isolate, "trace"))) {
+ return v8::FunctionTemplate::New(isolate, TraceExtension::Trace);
+ } else if (name->Equals(v8::String::NewFromUtf8(isolate, "js_trace"))) {
+ return v8::FunctionTemplate::New(isolate, TraceExtension::JSTrace);
+ } else if (name->Equals(v8::String::NewFromUtf8(isolate, "js_entry_sp"))) {
+ return v8::FunctionTemplate::New(isolate, TraceExtension::JSEntrySP);
+ } else if (name->Equals(v8::String::NewFromUtf8(isolate,
+ "js_entry_sp_level2"))) {
+ return v8::FunctionTemplate::New(isolate, TraceExtension::JSEntrySPLevel2);
+ } else {
+ CHECK(false);
+ return v8::Handle<v8::FunctionTemplate>();
+ }
+}
+
+
+Address TraceExtension::GetFP(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ // Convert frame pointer from encoding as smis in the arguments to a pointer.
+ CHECK_EQ(2, args.Length()); // Ignore second argument on 32-bit platform.
+#if defined(V8_HOST_ARCH_32_BIT)
+ Address fp = *reinterpret_cast<Address*>(*args[0]);
+#elif defined(V8_HOST_ARCH_64_BIT)
+ int64_t low_bits = *reinterpret_cast<uint64_t*>(*args[0]) >> 32;
+ int64_t high_bits = *reinterpret_cast<uint64_t*>(*args[1]);
+ Address fp = reinterpret_cast<Address>(high_bits | low_bits);
+#else
+#error Host architecture is neither 32-bit nor 64-bit.
+#endif
+ printf("Trace: %p\n", fp);
+ return fp;
+}
+
+
+static struct {
+ TickSample* sample;
+} trace_env = { NULL };
+
+
+void TraceExtension::InitTraceEnv(TickSample* sample) {
+ trace_env.sample = sample;
+}
+
+
+void TraceExtension::DoTrace(Address fp) {
+ RegisterState regs;
+ regs.fp = fp;
+ // sp is only used to define stack high bound
+ regs.sp =
+ reinterpret_cast<Address>(trace_env.sample) - 10240;
+ trace_env.sample->Init(CcTest::i_isolate(), regs);
+}
+
+
+void TraceExtension::Trace(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DoTrace(GetFP(args));
+}
+
+
+// Hide c_entry_fp to emulate situation when sampling is done while
+// pure JS code is being executed
+static void DoTraceHideCEntryFPAddress(Address fp) {
+ v8::internal::Address saved_c_frame_fp =
+ *(CcTest::i_isolate()->c_entry_fp_address());
+ CHECK(saved_c_frame_fp);
+ *(CcTest::i_isolate()->c_entry_fp_address()) = 0;
+ i::TraceExtension::DoTrace(fp);
+ *(CcTest::i_isolate()->c_entry_fp_address()) = saved_c_frame_fp;
+}
+
+
+void TraceExtension::JSTrace(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DoTraceHideCEntryFPAddress(GetFP(args));
+}
+
+
+Address TraceExtension::GetJsEntrySp() {
+ CHECK_NE(NULL, CcTest::i_isolate()->thread_local_top());
+ return CcTest::i_isolate()->js_entry_sp();
+}
+
+
+void TraceExtension::JSEntrySP(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CHECK_NE(0, GetJsEntrySp());
+}
+
+
+void TraceExtension::JSEntrySPLevel2(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::HandleScope scope(args.GetIsolate());
+ const Address js_entry_sp = GetJsEntrySp();
+ CHECK_NE(0, js_entry_sp);
+ CompileRun("js_entry_sp();");
+ CHECK_EQ(js_entry_sp, GetJsEntrySp());
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/test/cctest/trace-extension.h b/deps/v8/test/cctest/trace-extension.h
new file mode 100644
index 0000000000..b80b3d45dc
--- /dev/null
+++ b/deps/v8/test/cctest/trace-extension.h
@@ -0,0 +1,56 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TEST_CCTEST_TRACE_EXTENSION_H_
+#define V8_TEST_CCTEST_TRACE_EXTENSION_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+class TraceExtension : public v8::Extension {
+ public:
+ TraceExtension() : v8::Extension("v8/trace", kSource) { }
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
+ v8::Handle<v8::String> name);
+ static void Trace(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void JSTrace(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void JSEntrySP(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void JSEntrySPLevel2(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static Address GetJsEntrySp();
+ static void InitTraceEnv(TickSample* sample);
+ static void DoTrace(Address fp);
+ private:
+ static Address GetFP(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static const char* kSource;
+};
+
+} } // namespace v8::internal
+
+#endif
diff --git a/deps/v8/test/intl/collator/sr-sort.js b/deps/v8/test/intl/collator/sr-sort.js
index 53c784ce0d..b7e6f30d57 100644
--- a/deps/v8/test/intl/collator/sr-sort.js
+++ b/deps/v8/test/intl/collator/sr-sort.js
@@ -43,4 +43,3 @@ assertEquals('Стара Планина', result[6]);
assertEquals('ћук', result[7]);
assertEquals('чука', result[8]);
assertEquals('џак', result[9]);
-
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index fc3c66b9c1..4ecbf325ad 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -25,9 +25,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# The following tests use getDefaultTimeZone().
[
[ALWAYS, {
- # The following tests use getDefaultTimeZone().
'date-format/resolved-options': [FAIL],
'date-format/timezone': [FAIL],
'general/v8Intl-exists': [FAIL],
diff --git a/deps/v8/test/intl/overrides/caching.js b/deps/v8/test/intl/overrides/caching.js
index 5ff3c390e7..abcd64031f 100644
--- a/deps/v8/test/intl/overrides/caching.js
+++ b/deps/v8/test/intl/overrides/caching.js
@@ -57,4 +57,3 @@ collatorTime = endTime.getTime() - startTime.getTime();
assertTrue(collatorTime < cachedTime);
// Non-cached time is much slower, measured to 12.5 times.
assertTrue(cachedTime < nonCachedTime);
-
diff --git a/deps/v8/test/intl/string/normalization.js b/deps/v8/test/intl/string/normalization.js
new file mode 100644
index 0000000000..446d6277db
--- /dev/null
+++ b/deps/v8/test/intl/string/normalization.js
@@ -0,0 +1,145 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests the new String.prototype.normalize method.
+
+
+// Common use case when searching for 'not very exact' match.
+// These are examples of data one might encounter in real use.
+var testRealUseCases = function() {
+ // Vietnamese legacy text, old Windows 9x / non-Unicode applications use
+ // windows-1258 code page, which is neither precomposed, nor decomposed.
+ assertEquals('ti\u00ea\u0301ng Vi\u00ea\u0323t'.normalize('NFKD'),
+ 'ti\u1ebfng Vi\u1ec7t'.normalize('NFKD')); // all precomposed
+
+ // Various kinds of spaces
+ assertEquals('Google\u0020Maps'.normalize('NFKD'), // normal space
+ 'Google\u00a0Maps'.normalize('NFKD')); // non-breaking space
+ assertEquals('Google\u0020Maps'.normalize('NFKD'), // normal space
+ 'Google\u2002Maps'.normalize('NFKD')); // en-space
+ assertEquals('Google\u0020Maps'.normalize('NFKD'), // normal space
+ 'Google\u2003Maps'.normalize('NFKD')); // em-space
+ assertEquals('Google\u0020Maps'.normalize('NFKD'), // normal space
+ 'Google\u3000Maps'.normalize('NFKC')); // ideographic space
+
+ // Latin small ligature "fi"
+ assertEquals('fi'.normalize('NFKD'), '\ufb01'.normalize('NFKD'));
+
+ // ŀ, Latin small L with middle dot, used in Catalan and often represented
+ // as decomposed for non-Unicode environments ( l + ·)
+ assertEquals('l\u00b7'.normalize('NFKD'), '\u0140'.normalize('NFKD'));
+
+ // Legacy text, Japanese narrow Kana (MS-DOS & Win 3.x time)
+ assertEquals('\u30d1\u30bd\u30b3\u30f3'.normalize('NFKD'), // パソコン : wide
+ '\uff8a\uff9f\uff7f\uff7a\uff9d'.normalize('NFKD')); // パソコン : narrow
+ // Also for Japanese, Latin fullwidth forms vs. ASCII
+ assertEquals('ABCD'.normalize('NFKD'),
+ '\uff21\uff22\uff23\uff24'.normalize('NFKD')); // ABCD, fullwidth
+}();
+
+
+var testEdgeCases = function() {
+ // Make sure we throw RangeError, as the standard requires.
+ assertThrows('"".normalize(1234)', RangeError);
+ assertThrows('"".normalize("BAD")', RangeError);
+
+ // The standard does not say what kind of exceptions we should throw, so we
+ // will not be specific. But we still test that we throw errors.
+ assertThrows('s.normalize()'); // s is not defined
+ assertThrows('var s = null; s.normalize()');
+ assertThrows('var s = undefined; s.normalize()');
+ assertThrows('var s = 1234; s.normalize()'); // no normalize for non-strings
+}();
+
+
+// Several kinds of mappings. No need to be comprehensive, we don't test
+// the ICU functionality, we only test C - JavaScript 'glue'
+var testData = [
+ // org, default, NFC, NFD, NKFC, NKFD
+ ['\u00c7', // Ç : Combining sequence, Latin 1
+ '\u00c7', '\u0043\u0327',
+ '\u00c7', '\u0043\u0327'],
+ ['\u0218', // Ș : Combining sequence, non-Latin 1
+ '\u0218', '\u0053\u0326',
+ '\u0218', '\u0053\u0326'],
+ ['\uac00', // 가 : Hangul
+ '\uac00', '\u1100\u1161',
+ '\uac00', '\u1100\u1161'],
+ ['\uff76', // カ : Narrow Kana
+ '\uff76', '\uff76',
+ '\u30ab', '\u30ab'],
+ ['\u00bc', // ¼ : Fractions
+ '\u00bc', '\u00bc',
+ '\u0031\u2044\u0034', '\u0031\u2044\u0034'],
+ ['\u01c6', // dž : Latin ligature
+ '\u01c6', '\u01c6',
+ '\u0064\u017e', '\u0064\u007a\u030c'],
+ ['s\u0307\u0323', // s + dot above + dot below, ordering of combining marks
+ '\u1e69', 's\u0323\u0307',
+ '\u1e69', 's\u0323\u0307'],
+ ['\u3300', // ㌀ : Squared characters
+ '\u3300', '\u3300',
+ '\u30a2\u30d1\u30fc\u30c8', // アパート
+ '\u30a2\u30cf\u309a\u30fc\u30c8'], // アパート
+ ['\ufe37', // ︷ : Vertical forms
+ '\ufe37', '\ufe37',
+ '{' , '{'],
+ ['\u2079', // ⁹ : superscript 9
+ '\u2079', '\u2079',
+ '9', '9'],
+ ['\ufee5\ufee6\ufee7\ufee8', // Arabic forms
+ '\ufee5\ufee6\ufee7\ufee8', '\ufee5\ufee6\ufee7\ufee8',
+ '\u0646\u0646\u0646\u0646', '\u0646\u0646\u0646\u0646'],
+ ['\u2460', // ① : Circled
+ '\u2460', '\u2460',
+ '1', '1'],
+ ['\u210c', // ℌ : Font variants
+ '\u210c', '\u210c',
+ 'H', 'H'],
+ ['\u2126', // Ω : Singleton, OHM sign vs. Greek capital letter OMEGA
+ '\u03a9', '\u03a9',
+ '\u03a9', '\u03a9'],
+ ['\ufdfb', // Long ligature, ARABIC LIGATURE JALLAJALALOUHOU
+ '\ufdfb', '\ufdfb',
+ '\u062C\u0644\u0020\u062C\u0644\u0627\u0644\u0647',
+ '\u062C\u0644\u0020\u062C\u0644\u0627\u0644\u0647']
+];
+
+var testArray = function() {
+ var kNFC = 1, kNFD = 2, kNFKC = 3, kNFKD = 4;
+ for (var i = 0; i < testData.length; ++i) {
+ // the original, NFC and NFD should normalize to the same thing
+ for (var column = 0; column < 3; ++column) {
+ var str = testData[i][column];
+ assertEquals(str.normalize(), testData[i][kNFC]); // defaults to NFC
+ assertEquals(str.normalize('NFC'), testData[i][kNFC]);
+ assertEquals(str.normalize('NFD'), testData[i][kNFD]);
+ assertEquals(str.normalize('NFKC'), testData[i][kNFKC]);
+ assertEquals(str.normalize('NFKD'), testData[i][kNFKD]);
+ }
+ }
+}();
diff --git a/deps/v8/test/message/instanceof.js b/deps/v8/test/message/instanceof.js
new file mode 100644
index 0000000000..1d55e0f94e
--- /dev/null
+++ b/deps/v8/test/message/instanceof.js
@@ -0,0 +1,28 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+1 instanceof 2;
diff --git a/deps/v8/test/message/instanceof.out b/deps/v8/test/message/instanceof.out
new file mode 100644
index 0000000000..d279bc44e7
--- /dev/null
+++ b/deps/v8/test/message/instanceof.out
@@ -0,0 +1,5 @@
+*%(basename)s:28: TypeError: Expecting a function in instanceof check, but got 2
+1 instanceof 2;
+ ^
+TypeError: Expecting a function in instanceof check, but got 2
+ at *%(basename)s:28:3
diff --git a/deps/v8/test/message/isvar.js b/deps/v8/test/message/isvar.js
index 67793a00b9..fedf9d5f68 100644
--- a/deps/v8/test/message/isvar.js
+++ b/deps/v8/test/message/isvar.js
@@ -28,4 +28,4 @@
// Flags: --allow-natives-syntax
var x;
%IS_VAR(x);
-%IS_VAR(x+x); \ No newline at end of file
+%IS_VAR(x+x);
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 234bf0f35c..00f6e34720 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -25,9 +25,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# All tests in the bug directory are expected to fail.
[
[ALWAYS, {
- # All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
}], # ALWAYS
]
diff --git a/deps/v8/test/message/paren_in_arg_string.js b/deps/v8/test/message/paren_in_arg_string.js
index bab762a6a4..83ba7c0859 100644
--- a/deps/v8/test/message/paren_in_arg_string.js
+++ b/deps/v8/test/message/paren_in_arg_string.js
@@ -26,4 +26,4 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var paren_in_arg_string_good = new Function('x', 'return;');
-var paren_in_arg_string_bad = new Function(')', 'return;'); \ No newline at end of file
+var paren_in_arg_string_bad = new Function(')', 'return;');
diff --git a/deps/v8/test/message/replacement-marker-as-argument.js b/deps/v8/test/message/replacement-marker-as-argument.js
index 9036654d98..22a68ecbe5 100644
--- a/deps/v8/test/message/replacement-marker-as-argument.js
+++ b/deps/v8/test/message/replacement-marker-as-argument.js
@@ -28,4 +28,4 @@
"use strict";
var o = { get "%3" (){} };
-o["%3"] = 10; \ No newline at end of file
+o["%3"] = 10;
diff --git a/deps/v8/test/message/single-function-literal.js b/deps/v8/test/message/single-function-literal.js
index 372063737f..96d3bd663a 100644
--- a/deps/v8/test/message/single-function-literal.js
+++ b/deps/v8/test/message/single-function-literal.js
@@ -29,4 +29,4 @@
var single_function_good = "(function() { return 5; })";
%CompileString(single_function_good, true);
var single_function_bad = "(function() { return 5; })();";
-%CompileString(single_function_bad, true); \ No newline at end of file
+%CompileString(single_function_bad, true);
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index e4f3f5587a..b472f9cfb3 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -73,7 +73,7 @@ class MessageTestSuite(testsuite.TestSuite):
return f.read()
def _IgnoreLine(self, string):
- """Ignore empty lines, valgrind output and Android output."""
+ """Ignore empty lines, valgrind output, Android output."""
if not string: return True
return (string.startswith("==") or string.startswith("**") or
string.startswith("ANDROID") or
diff --git a/deps/v8/test/mjsunit/allocation-folding.js b/deps/v8/test/mjsunit/allocation-folding.js
index ec07392f2c..fcf909d6af 100644
--- a/deps/v8/test/mjsunit/allocation-folding.js
+++ b/deps/v8/test/mjsunit/allocation-folding.js
@@ -100,3 +100,16 @@ gc();
assertEquals(result[1], 4);
assertEquals(result2[1], 6);
+
+// Test to almost exceed the Page::MaxRegularHeapObjectSize limit.
+
+function boom() {
+ var a1 = new Array(84632);
+ var a2 = new Array(84632);
+ var a3 = new Array(84632);
+ return [ a1, a2, a3 ];
+}
+
+boom(); boom(); boom();
+%OptimizeFunctionOnNextCall(boom);
+boom();
diff --git a/deps/v8/test/mjsunit/allocation-site-info.js b/deps/v8/test/mjsunit/allocation-site-info.js
index f32344a405..cd086d3506 100644
--- a/deps/v8/test/mjsunit/allocation-site-info.js
+++ b/deps/v8/test/mjsunit/allocation-site-info.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
-// Flags: --track-allocation-sites --noalways-opt
+// Flags: --noalways-opt
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile
@@ -35,11 +35,6 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
-// Reset the GC stress mode to be off. Needed because AllocationMementos only
-// live for one gc, so a gc that happens in certain fragile areas of the test
-// can break assumptions.
-%SetFlags("--gc-interval=-1")
-
// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
support_smi_only_arrays = true;
@@ -148,8 +143,12 @@ if (support_smi_only_arrays) {
assertKind(elements_kind.fast_double, obj);
obj = fastliteralcase([3, 6, 2], 1.5);
assertKind(elements_kind.fast_double, obj);
+
+ // Note: thanks to pessimistic transition store stubs, we'll attempt
+ // to transition to the most general elements kind seen at a particular
+ // store site. So, the elements kind will be double.
obj = fastliteralcase([2, 6, 3], 2);
- assertKind(elements_kind.fast_smi_only, obj);
+ assertKind(elements_kind.fast_double, obj);
}
// Verify that we will not pretransition the double->fast path.
diff --git a/deps/v8/test/mjsunit/arguments-apply-deopt.js b/deps/v8/test/mjsunit/arguments-apply-deopt.js
new file mode 100644
index 0000000000..b7251af5aa
--- /dev/null
+++ b/deps/v8/test/mjsunit/arguments-apply-deopt.js
@@ -0,0 +1,77 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+(function ApplyArgumentsDeoptInReceiverMapCheck() {
+ function invoker(h, r) {
+ return function XXXXX() {
+ var res = h.apply({ fffffff : r(this) }, arguments);
+ return res;
+ };
+ }
+
+ var y = invoker(m, selfOf);
+
+ function selfOf(c) {
+ var sssss = c.self_;
+ return sssss;
+ }
+
+ function m() {
+ return this.fffffff;
+ }
+
+ y.apply({ self_ : 3 });
+ y.apply({ self_ : 3 });
+ y.apply({ self_ : 3 });
+
+ %OptimizeFunctionOnNextCall(y);
+
+ assertEquals(y.apply({ self_ : 3, uuu : 4 }), 3);
+})();
+
+(function ApplyArgumentsDeoptInReceiverExplicit() {
+ function f() { return this + 21; }
+
+ function deopt() {
+ %DeoptimizeFunction(XXXXX);
+ return 21;
+ }
+
+ function XXXXX() {
+ return f.apply(deopt(), arguments);
+ };
+
+ XXXXX();
+ XXXXX();
+ XXXXX();
+
+ %OptimizeFunctionOnNextCall(XXXXX);
+
+ assertEquals(42, XXXXX());
+})();
diff --git a/deps/v8/test/mjsunit/arguments-load-across-eval.js b/deps/v8/test/mjsunit/arguments-load-across-eval.js
index e97c11329e..295b62cd46 100644
--- a/deps/v8/test/mjsunit/arguments-load-across-eval.js
+++ b/deps/v8/test/mjsunit/arguments-load-across-eval.js
@@ -82,5 +82,3 @@ function testShadowing(x, h) {
}
testShadowing(1, function() { return 2; });
-
-
diff --git a/deps/v8/test/mjsunit/arguments-read-and-assignment.js b/deps/v8/test/mjsunit/arguments-read-and-assignment.js
index c5d34bfa9b..fba052bfb7 100644
--- a/deps/v8/test/mjsunit/arguments-read-and-assignment.js
+++ b/deps/v8/test/mjsunit/arguments-read-and-assignment.js
@@ -161,4 +161,4 @@ function weirdargs(a,b,c) { if (!a) return arguments;
var args1 = weirdargs(false, null, 40);
var res = weirdargs(true, args1, 15);
assertEquals(40, res[0], "return old args element");
-assertEquals(15, res[1], "return own args element"); \ No newline at end of file
+assertEquals(15, res[1], "return own args element");
diff --git a/deps/v8/test/mjsunit/arguments.js b/deps/v8/test/mjsunit/arguments.js
index 78b7722e6d..56c1d7224d 100644
--- a/deps/v8/test/mjsunit/arguments.js
+++ b/deps/v8/test/mjsunit/arguments.js
@@ -187,4 +187,4 @@ function arg_del(x) { return delete arguments[x]; }
function arg_set(x) { return (arguments[x] = 117); }
assertEquals(undefined, arg_get(0xFFFFFFFF));
assertEquals(true, arg_del(0xFFFFFFFF));
-assertEquals(117, arg_set(0xFFFFFFFF)); \ No newline at end of file
+assertEquals(117, arg_set(0xFFFFFFFF));
diff --git a/deps/v8/test/mjsunit/array-constructor-feedback.js b/deps/v8/test/mjsunit/array-constructor-feedback.js
index 72ff12c08f..7cd421bd1b 100644
--- a/deps/v8/test/mjsunit/array-constructor-feedback.js
+++ b/deps/v8/test/mjsunit/array-constructor-feedback.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
-// Flags: --track-allocation-sites --noalways-opt
+// Flags: --noalways-opt
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile
@@ -35,11 +35,6 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
-// Reset the GC stress mode to be off. Needed because AllocationMementos only
-// live for one gc, so a gc that happens in certain fragile areas of the test
-// can break assumptions.
-%SetFlags("--gc-interval=-1")
-
// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
support_smi_only_arrays = true;
@@ -138,8 +133,8 @@ if (support_smi_only_arrays) {
})();
- // Test: Ensure that bailouts from the stub don't deopt a crankshafted
- // method with a call to that stub.
+ // Test: Ensure that inlined array calls in crankshaft learn from deopts
+ // based on the move to a dictionary for the array.
(function() {
function bar(len) {
return new Array(len);
@@ -152,10 +147,16 @@ if (support_smi_only_arrays) {
a = bar(10);
assertKind(elements_kind.fast, a);
assertOptimized(bar);
- // The stub bails out, but the method call should be fine.
+ // bar should deopt because the length is too large.
+ a = bar(100000);
+ assertUnoptimized(bar);
+ assertKind(elements_kind.dictionary, a);
+ // The allocation site now has feedback that means the array constructor
+ // will not be inlined.
+ %OptimizeFunctionOnNextCall(bar);
a = bar(100000);
- assertOptimized(bar);
assertKind(elements_kind.dictionary, a);
+ assertOptimized(bar);
// If the argument isn't a smi, it bails out as well
a = bar("oops");
@@ -172,8 +173,12 @@ if (support_smi_only_arrays) {
barn(1, 2, 3);
assertOptimized(barn);
a = barn(1, "oops", 3);
- // The stub should bail out but the method should remain optimized.
+ // The method should deopt, but learn from the failure to avoid inlining
+ // the array.
assertKind(elements_kind.fast, a);
+ assertUnoptimized(barn);
+ %OptimizeFunctionOnNextCall(barn);
+ a = barn(1, "oops", 3);
assertOptimized(barn);
})();
@@ -219,4 +224,29 @@ if (support_smi_only_arrays) {
assertFalse(Realm.eval(contextB, "bar2();") instanceof Array);
assertTrue(Realm.eval(contextB, "bar2() instanceof Array"));
})();
+
+ // Test: create array with packed feedback, then optimize/inline
+ // function. Verify that if we ask for a holey array then we deopt.
+ // Reoptimization will proceed with the correct feedback and we
+ // won't deopt anymore.
+ (function() {
+ function bar(len) { return new Array(len); }
+ bar(0);
+ bar(0);
+ %OptimizeFunctionOnNextCall(bar);
+ a = bar(0);
+ assertOptimized(bar);
+ assertFalse(isHoley(a));
+ a = bar(1); // ouch!
+ assertUnoptimized(bar);
+ assertTrue(isHoley(a));
+ // Try again
+ %OptimizeFunctionOnNextCall(bar);
+ a = bar(100);
+ assertOptimized(bar);
+ assertTrue(isHoley(a));
+ a = bar(0);
+ assertOptimized(bar);
+ assertTrue(isHoley(a));
+ })();
}
diff --git a/deps/v8/test/mjsunit/array-feedback.js b/deps/v8/test/mjsunit/array-feedback.js
index 6b1cbb3f5f..4129be1f88 100644
--- a/deps/v8/test/mjsunit/array-feedback.js
+++ b/deps/v8/test/mjsunit/array-feedback.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
-// Flags: --track-allocation-sites --noalways-opt
+// Flags: --noalways-opt
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile
@@ -35,11 +35,6 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
-// Reset the GC stress mode to be off. Needed because AllocationMementos only
-// live for one gc, so a gc that happens in certain fragile areas of the test
-// can break assumptions.
-%SetFlags("--gc-interval=-1")
-
// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
support_smi_only_arrays = true;
@@ -90,66 +85,66 @@ if (support_smi_only_arrays) {
// Verify that basic elements kind feedback works for non-constructor
// array calls (as long as the call is made through an IC, and not
// a CallStub).
- (function (){
- function create0() {
- return Array();
- }
-
- // Calls through ICs need warm up through uninitialized, then
- // premonomorphic first.
- create0();
- create0();
- a = create0();
- assertKind(elements_kind.fast_smi_only, a);
- a[0] = 3.5;
- b = create0();
- assertKind(elements_kind.fast_double, b);
-
- function create1(arg) {
- return Array(arg);
- }
-
- create1(0);
- create1(0);
- a = create1(0);
- assertFalse(isHoley(a));
- assertKind(elements_kind.fast_smi_only, a);
- a[0] = "hello";
- b = create1(10);
- assertTrue(isHoley(b));
- assertKind(elements_kind.fast, b);
-
- a = create1(100000);
- assertKind(elements_kind.dictionary, a);
-
- function create3(arg1, arg2, arg3) {
- return Array(arg1, arg2, arg3);
- }
-
- create3();
- create3();
- a = create3(1,2,3);
- a[0] = 3.5;
- b = create3(1,2,3);
- assertKind(elements_kind.fast_double, b);
- assertFalse(isHoley(b));
- })();
+ // (function (){
+ // function create0() {
+ // return Array();
+ // }
+
+ // // Calls through ICs need warm up through uninitialized, then
+ // // premonomorphic first.
+ // create0();
+ // create0();
+ // a = create0();
+ // assertKind(elements_kind.fast_smi_only, a);
+ // a[0] = 3.5;
+ // b = create0();
+ // assertKind(elements_kind.fast_double, b);
+
+ // function create1(arg) {
+ // return Array(arg);
+ // }
+
+ // create1(0);
+ // create1(0);
+ // a = create1(0);
+ // assertFalse(isHoley(a));
+ // assertKind(elements_kind.fast_smi_only, a);
+ // a[0] = "hello";
+ // b = create1(10);
+ // assertTrue(isHoley(b));
+ // assertKind(elements_kind.fast, b);
+
+ // a = create1(100000);
+ // assertKind(elements_kind.dictionary, a);
+
+ // function create3(arg1, arg2, arg3) {
+ // return Array(arg1, arg2, arg3);
+ // }
+
+ // create3();
+ // create3();
+ // a = create3(1,2,3);
+ // a[0] = 3.5;
+ // b = create3(1,2,3);
+ // assertKind(elements_kind.fast_double, b);
+ // assertFalse(isHoley(b));
+ // })();
// Verify that keyed calls work
- (function (){
- function create0(name) {
- return this[name]();
- }
+ // (function (){
+ // function create0(name) {
+ // return this[name]();
+ // }
- name = "Array";
- create0(name);
- create0(name);
- a = create0(name);
- a[0] = 3.5;
- b = create0(name);
- assertKind(elements_kind.fast_double, b);
- })();
+ // name = "Array";
+ // create0(name);
+ // create0(name);
+ // a = create0(name);
+ // a[0] = 3.5;
+ // b = create0(name);
+ // assertKind(elements_kind.fast_double, b);
+ // })();
// Verify that the IC can't be spoofed by patching
@@ -173,28 +168,28 @@ if (support_smi_only_arrays) {
// though the type information is reset.
// TODO(mvstanton): instead, consume the type feedback gathered up
// until crankshaft time.
- (function (){
- function create0() {
- return Array();
- }
-
- create0();
- create0();
- a = create0();
- a[0] = 3.5;
- %OptimizeFunctionOnNextCall(create0);
- create0();
- // This test only makes sense if crankshaft is allowed
- if (4 != %GetOptimizationStatus(create0)) {
- create0();
- b = create0();
- assertKind(elements_kind.fast_smi_only, b);
- b[0] = 3.5;
- c = create0();
- assertKind(elements_kind.fast_double, c);
- assertOptimized(create0);
- }
- })();
+ // (function (){
+ // function create0() {
+ // return Array();
+ // }
+
+ // create0();
+ // create0();
+ // a = create0();
+ // a[0] = 3.5;
+ // %OptimizeFunctionOnNextCall(create0);
+ // create0();
+ // // This test only makes sense if crankshaft is allowed
+ // if (4 != %GetOptimizationStatus(create0)) {
+ // create0();
+ // b = create0();
+ // assertKind(elements_kind.fast_smi_only, b);
+ // b[0] = 3.5;
+ // c = create0();
+ // assertKind(elements_kind.fast_double, c);
+ // assertOptimized(create0);
+ // }
+ // })();
// Verify that cross context calls work
diff --git a/deps/v8/test/mjsunit/array-iteration.js b/deps/v8/test/mjsunit/array-iteration.js
index 033bb5443a..d11f984bee 100644
--- a/deps/v8/test/mjsunit/array-iteration.js
+++ b/deps/v8/test/mjsunit/array-iteration.js
@@ -225,4 +225,3 @@
assertEquals(2, count);
})();
-
diff --git a/deps/v8/test/mjsunit/array-literal-feedback.js b/deps/v8/test/mjsunit/array-literal-feedback.js
index d2245c62a2..cfda0f6d5f 100644
--- a/deps/v8/test/mjsunit/array-literal-feedback.js
+++ b/deps/v8/test/mjsunit/array-literal-feedback.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
-// Flags: --track-allocation-sites --noalways-opt
+// Flags: --noalways-opt
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile
@@ -88,11 +88,15 @@ if (support_smi_only_arrays) {
}
get_literal(3);
- get_literal(3);
- %OptimizeFunctionOnNextCall(get_literal);
+ // It's important to store a from before we crankshaft get_literal, because
+ // mementos won't be created from crankshafted code at all.
a = get_literal(3);
+ %OptimizeFunctionOnNextCall(get_literal);
+ get_literal(3);
assertOptimized(get_literal);
assertTrue(%HasFastSmiElements(a));
+ // a has a memento so the transition caused by the store will affect the
+ // boilerplate.
a[0] = 3.5;
// We should have transitioned the boilerplate array to double, and
diff --git a/deps/v8/test/mjsunit/array-natives-elements.js b/deps/v8/test/mjsunit/array-natives-elements.js
index 04c2f73d7e..cf848bb4b9 100644
--- a/deps/v8/test/mjsunit/array-natives-elements.js
+++ b/deps/v8/test/mjsunit/array-natives-elements.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays
-// Flags: --notrack-allocation-sites
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile time
diff --git a/deps/v8/test/mjsunit/array-non-smi-length.js b/deps/v8/test/mjsunit/array-non-smi-length.js
index 23a25ee797..11277aff60 100644
--- a/deps/v8/test/mjsunit/array-non-smi-length.js
+++ b/deps/v8/test/mjsunit/array-non-smi-length.js
@@ -43,4 +43,3 @@ function TestNonSmiArrayLength() {
}
TestNonSmiArrayLength();
-
diff --git a/deps/v8/test/mjsunit/array-pop.js b/deps/v8/test/mjsunit/array-pop.js
index f193f09c2f..17771839ab 100644
--- a/deps/v8/test/mjsunit/array-pop.js
+++ b/deps/v8/test/mjsunit/array-pop.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+//
// Check pops with various number of arguments.
(function() {
var a = [];
@@ -121,3 +123,18 @@
assertEquals(99 - i, x.pop(), i + 'th iteration');
}
})();
+
+(function () {
+ function f(a, deopt) {
+ var v = a.pop() ? 1 : 2;
+ if (deopt) %DeoptimizeFunction(f);
+ return v;
+ }
+
+ var a = [true, true, true, true]
+ assertEquals(1, f(a, false));
+ assertEquals(1, f(a, false));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(1, f(a, false));
+ assertEquals(1, f(a, true));
+})();
diff --git a/deps/v8/test/mjsunit/array-push.js b/deps/v8/test/mjsunit/array-push.js
index 2a25a9cc9e..c87fd128e4 100644
--- a/deps/v8/test/mjsunit/array-push.js
+++ b/deps/v8/test/mjsunit/array-push.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
// Check pushes with various number of arguments.
(function() {
var a = [];
@@ -113,3 +115,34 @@
assertEquals(i + 1, x.length, i + 'th iteration');
}
})();
+
+(function() {
+ function f(a, i) {
+ a.push(i);
+ }
+
+ var a = [1,2,3];
+ a.f = function() { return 10; }
+ f(a, 4);
+ f(a, 5);
+ f(a, 6);
+ f(a, 7);
+ f(a, {});
+ assertEquals(10, a.f());
+})();
+
+(function() {
+ function f(a, i) {
+ a.push(i);
+ }
+
+ var a = [1,2,3];
+ a.f = function() { return 10; }
+ f(a, 4);
+ f(a, 5);
+ f(a, 6);
+ %OptimizeFunctionOnNextCall(f);
+ f(a, 7);
+ f(a, {});
+ assertEquals(10, a.f());
+})();
diff --git a/deps/v8/test/mjsunit/array-tostring.js b/deps/v8/test/mjsunit/array-tostring.js
index 6708657eef..5be3d5045c 100644
--- a/deps/v8/test/mjsunit/array-tostring.js
+++ b/deps/v8/test/mjsunit/array-tostring.js
@@ -156,4 +156,4 @@ for (var i = 0; i < 3; i++) {
}});
}
Number.prototype.arrayToLocaleString = Array.prototype.toLocaleString;
-assertEquals("42,42,42", (42).arrayToLocaleString()); \ No newline at end of file
+assertEquals("42,42,42", (42).arrayToLocaleString());
diff --git a/deps/v8/test/mjsunit/bool-concat.js b/deps/v8/test/mjsunit/bool-concat.js
new file mode 100644
index 0000000000..1306dcbbbc
--- /dev/null
+++ b/deps/v8/test/mjsunit/bool-concat.js
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function format(a) {
+ if (a) {
+ return "X"+true+"Y";
+ } else {
+ return "X"+false+"Y";
+ }
+}
+
+for (var i = 0; i < 1000; i++) {
+ assertEquals("XtrueY", format(true));
+ assertEquals("XfalseY", format(false));
+}
diff --git a/deps/v8/test/mjsunit/bugs/bug-1344252.js b/deps/v8/test/mjsunit/bugs/bug-1344252.js
index 1723834c09..747ce199ce 100644
--- a/deps/v8/test/mjsunit/bugs/bug-1344252.js
+++ b/deps/v8/test/mjsunit/bugs/bug-1344252.js
@@ -76,4 +76,3 @@ Object.prototype.__defineSetter__('z', function(value) { result_z = value; });
o2.z = 27;
assertEquals(27, result_z);
assertTrue(typeof o2.z == 'undefined');
-
diff --git a/deps/v8/test/mjsunit/bugs/bug-proto.js b/deps/v8/test/mjsunit/bugs/bug-proto.js
index 149088e9de..5638336c45 100644
--- a/deps/v8/test/mjsunit/bugs/bug-proto.js
+++ b/deps/v8/test/mjsunit/bugs/bug-proto.js
@@ -59,4 +59,3 @@ assertSame(undefined, Realm.eval(realmB, "Realm.shared.__proto__"));
Realm.eval(realmB, "Realm.shared.__proto__ = {c: 3}");
assertSame(1, o.a);
assertSame(undefined, o.c);
-
diff --git a/deps/v8/test/mjsunit/closures.js b/deps/v8/test/mjsunit/closures.js
index 7c1197107f..5ce26ca747 100644
--- a/deps/v8/test/mjsunit/closures.js
+++ b/deps/v8/test/mjsunit/closures.js
@@ -44,4 +44,3 @@ function test(n) {
test(1);
test(42);
test(239);
-
diff --git a/deps/v8/test/mjsunit/compare-character.js b/deps/v8/test/mjsunit/compare-character.js
index cabe0137bf..8c812eec0e 100644
--- a/deps/v8/test/mjsunit/compare-character.js
+++ b/deps/v8/test/mjsunit/compare-character.js
@@ -47,4 +47,3 @@ for (var i in a) {
assertEquals(f < x, 'f' < x, ">r" + x);
assertEquals(f <= x, 'f' <= x, ">=r" + x);
}
-
diff --git a/deps/v8/test/mjsunit/compare-nan.js b/deps/v8/test/mjsunit/compare-nan.js
index c4f7817ff8..a4224ff1b3 100644
--- a/deps/v8/test/mjsunit/compare-nan.js
+++ b/deps/v8/test/mjsunit/compare-nan.js
@@ -63,4 +63,3 @@ for (var i in b) {
"assertFalse(" + x + " >= NaN, '' + " + x + " + ' >= NaN');\n";
eval(program);
}
-
diff --git a/deps/v8/test/mjsunit/compiler/alloc-number-debug.js b/deps/v8/test/mjsunit/compiler/alloc-number-debug.js
new file mode 100644
index 0000000000..ccfcc0c185
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/alloc-number-debug.js
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Try to get a GC because of a heap number allocation while we
+// have live values (o) in a register.
+function f(o) {
+ var x = 1.5;
+ var y = 2.5;
+ for (var i = 1; i < 3; i += 1) {
+ %SetAllocationTimeout(1, 0, false);
+ o.val = x + y + i;
+ %SetAllocationTimeout(-1, -1, true);
+ }
+ return o;
+}
+
+var o = { val: 0 };
+f(o);
diff --git a/deps/v8/test/mjsunit/compiler/compare-map-elim.js b/deps/v8/test/mjsunit/compiler/compare-map-elim.js
new file mode 100644
index 0000000000..288d4811a6
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/compare-map-elim.js
@@ -0,0 +1,51 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --check-elimination
+
+a = {
+ f: function() { this.y = 3; }
+};
+b = {
+ f: function() { this.y = 4; }
+};
+
+function x(z) {
+ return z.f();
+}
+
+x(a);
+x(b);
+x(a);
+x(b);
+x(a);
+x(b);
+
+%OptimizeFunctionOnNextCall(x)
+
+x(a);
+x(b);
diff --git a/deps/v8/test/mjsunit/compiler/compare-map-elim2.js b/deps/v8/test/mjsunit/compiler/compare-map-elim2.js
new file mode 100644
index 0000000000..0c0540ccab
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/compare-map-elim2.js
@@ -0,0 +1,130 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --check-elimination
+
+
+function test_empty() {
+ function foo(o) {
+ return { value: o.value };
+ }
+
+ function Base() {
+ this.v_ = 5;
+ }
+ Base.prototype.__defineGetter__("value", function() { return 1; });
+
+ var a = new Base();
+ a.a = 1;
+ foo(a);
+
+ Base.prototype.__defineGetter__("value", function() { return this.v_; });
+
+ var b = new Base();
+ b.b = 1;
+ foo(b);
+
+ var d = new Base();
+ d.d = 1;
+ d.value;
+
+ %OptimizeFunctionOnNextCall(foo);
+
+ var o = foo(b);
+}
+
+
+function test_narrow1() {
+ function foo(o) {
+ return { value: o.value };
+ }
+
+ function Base() {
+ this.v_ = 5;
+ }
+ Base.prototype.__defineGetter__("value", function() { return 1; });
+
+ var a = new Base();
+ a.a = 1;
+ foo(a);
+
+ Base.prototype.__defineGetter__("value", function() { return this.v_; });
+
+ var b = new Base();
+ b.b = 1;
+ foo(b);
+
+ var c = new Base();
+ c.c = 1;
+ foo(c);
+
+ var d = new Base();
+ d.d = 1;
+ d.value;
+
+ %OptimizeFunctionOnNextCall(foo);
+
+ var o = foo(b);
+}
+
+
+function test_narrow2() {
+ function foo(o, flag) {
+ return { value: o.value(flag) };
+ }
+
+ function Base() {
+ this.v_ = 5;
+ }
+ Base.prototype.value = function(flag) { return flag ? this.v_ : this.v_; };
+
+
+ var a = new Base();
+ a.a = 1;
+ foo(a, false);
+ foo(a, false);
+
+ var b = new Base();
+ b.b = 1;
+ foo(b, true);
+
+ var c = new Base();
+ c.c = 1;
+ foo(c, true);
+
+ var d = new Base();
+ d.d = 1;
+ d.value(true);
+
+ %OptimizeFunctionOnNextCall(foo);
+
+ var o = foo(b);
+}
+
+test_empty();
+test_narrow1();
+test_narrow2();
diff --git a/deps/v8/test/mjsunit/compiler/compare-objeq-elim.js b/deps/v8/test/mjsunit/compiler/compare-objeq-elim.js
new file mode 100644
index 0000000000..4492df45c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/compare-objeq-elim.js
@@ -0,0 +1,85 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --check-elimination
+
+function A(x, y) {
+ this.x = x;
+ this.y = y;
+}
+
+function B(x, y) {
+ this.x = x;
+ this.y = y;
+}
+
+function F1(a, b) {
+ if (a == b) return a.x;
+ else return b.x;
+}
+
+function F2(a, b) {
+ if (a == b) return a.x;
+ else return b.x;
+}
+
+function F3(a, b) {
+ var f = a.y;
+ if (a == b) return a.x;
+ else return b.x;
+}
+
+function F4(a, b) {
+ var f = b.y;
+ if (a == b) return a.x;
+ else return b.x;
+}
+
+%NeverOptimizeFunction(test);
+
+function test(f, a, b) {
+ f(a, a);
+ f(a, b);
+ f(b, a);
+ f(b, c);
+ f(b, b);
+ f(c, c);
+
+ %OptimizeFunctionOnNextCall(f)
+
+ assertEquals(a.x, f(a, a));
+ assertEquals(b.x, f(b, b));
+}
+
+var a = new A(3, 5);
+var b = new B(2, 6);
+var c = new A(1, 7);
+
+test(F1, a, c);
+test(F2, a, b);
+test(F3, a, b);
+test(F4, a, b);
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
index 699534f665..2a20790eaa 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
@@ -59,3 +59,5 @@ assertUnoptimized(add_field, "no sync");
%UnblockConcurrentRecompilation();
// Sync with background thread to conclude optimization that bailed out.
assertUnoptimized(add_field, "sync");
+// Clear type info for stress runs.
+%ClearFunctionTypeFeedback(add_field);
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js b/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
index e126465a95..3abf292c92 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
@@ -54,3 +54,5 @@ assertUnoptimized(f, "no sync");
// Optimization eventually bails out due to map dependency.
assertUnoptimized(f, "sync");
assertEquals(2, f(o));
+//Clear type info for stress runs.
+%ClearFunctionTypeFeedback(f);
diff --git a/deps/v8/test/mjsunit/compiler/control-flow-1.js b/deps/v8/test/mjsunit/compiler/control-flow-1.js
index 973d9b6668..ca7ad87850 100644
--- a/deps/v8/test/mjsunit/compiler/control-flow-1.js
+++ b/deps/v8/test/mjsunit/compiler/control-flow-1.js
@@ -52,4 +52,4 @@ function g1(x, y) {
}
assertTrue(g1(0, 0));
-assertFalse(g1(0, 1)); \ No newline at end of file
+assertFalse(g1(0, 1));
diff --git a/deps/v8/test/mjsunit/compiler/dead-loops-neg.js b/deps/v8/test/mjsunit/compiler/dead-loops-neg.js
new file mode 100644
index 0000000000..dbf500b48e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dead-loops-neg.js
@@ -0,0 +1,100 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Presents negative opportunities for dead loop removal.
+
+function loop1() {
+ while (true) return;
+}
+
+function loop2() {
+ var i = 0;
+ while (i++ < 10) ;
+ return i; // value of {i} escapes.
+ // can only remove the loop with induction variable analysis.
+}
+
+function loop3() {
+ var i = 0;
+ for (; i < 10; i++) ;
+ return i; // value of {i} escapes.
+ // can only remove the loop with induction variable analysis.
+}
+
+function loop4() {
+ var a = 0;
+ for (var i = 0; i < 10; i++) a++;
+ return a; // value of {a} escapes.
+ // can only remove the loop with induction variable analysis.
+}
+
+function loop5() {
+ var a = new Int32Array(4), sum = 0;
+ for (var i = 0; i < a.length; i++) {
+ sum += a[i];
+ }
+ return sum; // {sum} escapes.
+ // can only remove the loop by figuring out that all elements of {a} are 0.
+}
+
+function loop6(a) {
+ for (var i = 0; i < a; i++) ; // implicit a.valueOf().
+ // can only remove the loop by guarding on the type of a.
+}
+
+function loop7(a) {
+ for (var i = 0; i < 10; i++) a.toString(); // unknown side-effect on a.
+ // can only remove the loop by guarding on the type of a.
+}
+
+function loop8(a) {
+ for (var i = 0; i < 10; i++) a.valueOf(); // unknown side-effect on a.
+ // can only remove the loop by guarding on the type of a.
+}
+
+var no_params_loops = [loop1, loop2, loop3, loop4, loop5, loop6];
+var params_loops = [loop6, loop7, loop8];
+
+for (var i = 0; i < no_params_loops.length; i++) {
+ var f = no_params_loops[i];
+ f();
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+}
+
+for (var i = 0; i < params_loops.length; i++) {
+ var f = params_loops[i];
+ f(3);
+ f(7);
+ f(11);
+ %OptimizeFunctionOnNextCall(f);
+ f(9);
+}
diff --git a/deps/v8/test/mjsunit/compiler/dead-loops.js b/deps/v8/test/mjsunit/compiler/dead-loops.js
new file mode 100644
index 0000000000..2301b129dd
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dead-loops.js
@@ -0,0 +1,87 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Presents opportunities for dead loop removal.
+
+function loop1() {
+ while (false) ; // doesn't even loop.
+}
+
+function loop2() {
+ var i = 0;
+ while (i++ < 10) ; // nothing in the body.
+}
+
+function loop3() {
+ for (var i = 0; i < 10; i++) ; // nothing in the body.
+}
+
+function loop4() {
+ var a = 0;
+ for (var i = 0; i < 10; i++) a++; // {a} is dead after the loop.
+}
+
+function loop5() {
+ var a = new Int32Array(4), sum = 0;
+ for (var i = 0; i < a.length; i++) {
+ // Involves only reads on typed arrays, and {i} doesn't overflow.
+ sum += a[i];
+ }
+}
+
+function loop6() {
+ var a = new Array(4), sum = 0;
+ for (var i = 0; i < a.length; i++) {
+ // Involves only in-bounds read on the array {a}.
+ // Have to prove that {a} doesn't have getters...?
+ sum += a[i];
+ }
+}
+
+function loop7() {
+ for (var i = 0; i < 10; i++) {
+ new Object(); // Have to prove the allocation doesn't escape.
+ }
+}
+
+function loop8() {
+ for (var i = 0; i < 10; i++) {
+ var x = {}; // Have to prove the allocation doesn't escape.
+ }
+}
+
+var loops = [loop1, loop2, loop3, loop4, loop5, loop6, loop7, loop8];
+
+for (var i = 0; i < loops.length; i++) {
+ var f = loops[i];
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+}
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js b/deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js
new file mode 100644
index 0000000000..bdab182fed
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-arguments.js
@@ -0,0 +1,187 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --use-escape-analysis --expose-gc
+
+
+// Simple test of capture
+(function testCapturedArguments() {
+ function h() {
+ return g.arguments[0];
+ }
+
+ function g(x) {
+ return h();
+ }
+
+ function f() {
+ var l = { y : { z : 4 }, x : 2 }
+ var r = g(l);
+ assertEquals(2, r.x);
+ assertEquals(2, l.x);
+ l.x = 3;
+ l.y.z = 5;
+ // Test that the arguments object is properly
+ // aliased
+ assertEquals(3, r.x);
+ assertEquals(3, l.x);
+ assertEquals(5, r.y.z);
+ }
+
+ f(); f(); f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+})();
+
+
+// Get the arguments object twice, test aliasing
+(function testTwoCapturedArguments() {
+ function h() {
+ return g.arguments[0];
+ }
+
+ function i() {
+ return g.arguments[0];
+ }
+
+ function g(x) {
+ return {h : h() , i : i()};
+ }
+
+ function f() {
+ var l = { y : { z : 4 }, x : 2 }
+ var r = g(l);
+ assertEquals(2, r.h.x)
+ l.y.z = 3;
+ assertEquals(3, r.h.y.z);
+ assertEquals(3, r.i.y.z);
+ }
+
+ f(); f(); f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+})();
+
+
+// Nested arguments object test
+(function testTwoCapturedArgumentsNested() {
+ function i() {
+ return { gx : g.arguments[0], hx : h.arguments[0] };
+ }
+
+ function h(x) {
+ return i();
+ }
+
+ function g(x) {
+ return h(x.y);
+ }
+
+ function f() {
+ var l = { y : { z : 4 }, x : 2 }
+ var r = g(l);
+ assertEquals(2, r.gx.x)
+ assertEquals(4, r.gx.y.z)
+ assertEquals(4, r.hx.z)
+ l.y.z = 3;
+ assertEquals(3, r.gx.y.z)
+ assertEquals(3, r.hx.z)
+ assertEquals(3, l.y.z)
+ }
+
+ f(); f(); f();
+ %OptimizeFunctionOnNextCall(f);
+ f(); f();
+ %OptimizeFunctionOnNextCall(f);
+ f(); f();
+})();
+
+
+// Nested arguments object test with different inlining
+(function testTwoCapturedArgumentsNested2() {
+ function i() {
+ return { gx : g.arguments[0], hx : h.arguments[0] };
+ }
+
+ function h(x) {
+ return i();
+ }
+
+ function g(x) {
+ return h(x.y);
+ }
+
+ function f() {
+ var l = { y : { z : 4 }, x : 2 }
+ var r = g(l);
+ assertEquals(2, r.gx.x)
+ assertEquals(4, r.gx.y.z)
+ assertEquals(4, r.hx.z)
+ l.y.z = 3;
+ assertEquals(3, r.gx.y.z)
+ assertEquals(3, r.hx.z)
+ assertEquals(3, l.y.z)
+ }
+
+ %NeverOptimizeFunction(i);
+ f(); f(); f();
+ %OptimizeFunctionOnNextCall(f);
+ f(); f();
+ %OptimizeFunctionOnNextCall(f);
+ f(); f();
+})();
+
+
+// Multiple captured argument test
+(function testTwoArgumentsCapture() {
+ function h() {
+ return { a : g.arguments[1], b : g.arguments[0] };
+ }
+
+ function g(x, y) {
+ return h();
+ }
+
+ function f() {
+ var l = { y : { z : 4 }, x : 2 }
+ var k = { t : { u : 3 } };
+ var r = g(k, l);
+ assertEquals(2, r.a.x)
+ assertEquals(4, r.a.y.z)
+ assertEquals(3, r.b.t.u)
+ l.y.z = 6;
+ r.b.t.u = 7;
+ assertEquals(6, r.a.y.z)
+ assertEquals(7, k.t.u)
+ }
+
+ f(); f(); f();
+ %OptimizeFunctionOnNextCall(f);
+ f(); f();
+ %OptimizeFunctionOnNextCall(f);
+ f(); f();
+})();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis.js b/deps/v8/test/mjsunit/compiler/escape-analysis.js
index dccc476925..b12e7bfca0 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis.js
@@ -303,6 +303,101 @@
})();
+// Test non-shallow nested graph of captured objects with duplicates
+(function testDeepDuplicate() {
+ function constructor1() {
+ this.x = 23;
+ }
+ function constructor2(nested) {
+ this.a = 17;
+ this.b = nested;
+ this.c = 42;
+ }
+ function deep(shouldDeopt) {
+ var o1 = new constructor1();
+ var o2 = new constructor2(o1);
+ var o3 = new constructor2(o1);
+ assertEquals(17, o2.a);
+ assertEquals(23, o2.b.x);
+ assertEquals(42, o2.c);
+ o3.c = 54;
+ o1.x = 99;
+ if (shouldDeopt) %DeoptimizeFunction(deep);
+ assertEquals(99, o1.x);
+ assertEquals(99, o2.b.x);
+ assertEquals(99, o3.b.x);
+ assertEquals(54, o3.c);
+ assertEquals(17, o3.a);
+ assertEquals(42, o2.c);
+ assertEquals(17, o2.a);
+ o3.b.x = 1;
+ assertEquals(1, o1.x);
+ }
+ deep(false); deep(false);
+ %OptimizeFunctionOnNextCall(deep);
+ deep(false); deep(false);
+ deep(true); deep(true);
+})();
+
+
+// Test non-shallow nested graph of captured objects with inline
+(function testDeepInline() {
+ function h() {
+ return { y : 3 };
+ }
+
+ function g(x) {
+ var u = { x : h() };
+ %DeoptimizeFunction(f);
+ return u;
+ }
+
+ function f() {
+ var l = { dummy : { } };
+ var r = g(l);
+ assertEquals(3, r.x.y);
+ }
+
+ f(); f(); f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+})();
+
+
+// Test two nested objects
+(function testTwoNestedObjects() {
+ function f() {
+ var l = { x : { y : 111 } };
+ var l2 = { x : { y : 111 } };
+ %DeoptimizeFunction(f);
+ assertEquals(111, l.x.y);
+ assertEquals(111, l2.x.y);
+ }
+
+ f(); f(); f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+})();
+
+
+// Test a nested object and a duplicate
+(function testTwoObjectsWithDuplicate() {
+ function f() {
+ var l = { x : { y : 111 } };
+ var dummy = { d : 0 };
+ var l2 = l.x;
+ %DeoptimizeFunction(f);
+ assertEquals(111, l.x.y);
+ assertEquals(111, l2.y);
+ assertEquals(0, dummy.d);
+ }
+
+ f(); f(); f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+})();
+
+
// Test materialization of a field that requires a Smi value.
(function testSmiField() {
var deopt = { deopt:false };
diff --git a/deps/v8/test/mjsunit/compiler/inline-throw.js b/deps/v8/test/mjsunit/compiler/inline-throw.js
index e3aab39efa..099b9d3351 100644
--- a/deps/v8/test/mjsunit/compiler/inline-throw.js
+++ b/deps/v8/test/mjsunit/compiler/inline-throw.js
@@ -66,4 +66,3 @@ try {
} catch(e) {
assertEquals("wow", e);
}
-
diff --git a/deps/v8/test/mjsunit/compiler/lazy-const-lookup.js b/deps/v8/test/mjsunit/compiler/lazy-const-lookup.js
index b4f15a1c9f..ff4558e7ef 100644
--- a/deps/v8/test/mjsunit/compiler/lazy-const-lookup.js
+++ b/deps/v8/test/mjsunit/compiler/lazy-const-lookup.js
@@ -38,4 +38,3 @@ function outer() {
}
outer();
-
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination-params.js b/deps/v8/test/mjsunit/compiler/load-elimination-params.js
new file mode 100644
index 0000000000..13a4a8596d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/load-elimination-params.js
@@ -0,0 +1,71 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --load-elimination
+
+// Test local load elimination of redundant loads and stores.
+
+function B(x, y) {
+ this.x = x;
+ this.y = y;
+ return this;
+}
+
+function test_params1(a, b) {
+ var i = a.x;
+ var j = a.x;
+ var k = b.x;
+ var l = b.x;
+ return i + j + k + l;
+}
+
+assertEquals(14, test_params1(new B(3, 4), new B(4, 5)));
+assertEquals(110, test_params1(new B(11, 7), new B(44, 8)));
+
+%OptimizeFunctionOnNextCall(test_params1);
+
+assertEquals(6, test_params1(new B(1, 7), new B(2, 8)));
+
+function test_params2(a, b) {
+ var o = new B(a + 1, b);
+ o.x = a;
+ var i = o.x;
+ o.x = a;
+ var j = o.x;
+ o.x = b;
+ var k = o.x;
+ o.x = b;
+ var l = o.x;
+ return i + j + k + l;
+}
+
+assertEquals(14, test_params2(3, 4));
+assertEquals(110, test_params2(11, 44));
+
+%OptimizeFunctionOnNextCall(test_params2);
+
+assertEquals(6, test_params2(1, 2));
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination.js b/deps/v8/test/mjsunit/compiler/load-elimination.js
index e019508c65..9bf8564308 100644
--- a/deps/v8/test/mjsunit/compiler/load-elimination.js
+++ b/deps/v8/test/mjsunit/compiler/load-elimination.js
@@ -35,11 +35,34 @@ function B(x, y) {
return this;
}
+function C() {
+}
+
function test_load() {
var a = new B(1, 2);
return a.x + a.x + a.x + a.x;
}
+
+function test_load_from_different_contexts() {
+ var r = 1;
+ this.f = function() {
+ var fr = r;
+ this.g = function(flag) {
+ var gr;
+ if (flag) {
+ gr = r;
+ } else {
+ gr = r;
+ }
+ return gr + r + fr;
+ };
+ };
+ this.f();
+ return this.g(true);
+}
+
+
function test_store_load() {
var a = new B(1, 2);
a.x = 4;
@@ -64,6 +87,31 @@ function test_nonaliasing_store1() {
return f + g + h + a.x;
}
+function test_transitioning_store1() {
+ var a = new B(2, 3);
+ var f = a.x, g = a.y;
+ var b = new B(3, 4);
+ return a.x + a.y;
+}
+
+function test_transitioning_store2() {
+ var b = new C();
+ var a = new B(-1, 5);
+ var f = a.x, g = a.y;
+ b.x = 9;
+ b.y = 11;
+ return a.x + a.y;
+}
+
+var false_v = false;
+function test_transitioning_store3() {
+ var o = new C();
+ var v = o;
+ if (false_v) v = 0;
+ v.x = 20;
+ return o.x;
+}
+
function killall() {
try { } catch(e) { }
}
@@ -100,7 +148,11 @@ function test(x, f) {
}
test(4, test_load);
+test(3, new test_load_from_different_contexts().g);
test(22, test_store_load);
test(8, test_nonaliasing_store1);
+test(5, test_transitioning_store1);
+test(4, test_transitioning_store2);
+test(20, test_transitioning_store3);
test(22, test_store_load_kill);
test(7, test_store_store);
diff --git a/deps/v8/test/mjsunit/compiler/math-floor-global.js b/deps/v8/test/mjsunit/compiler/math-floor-global.js
index 9ec183fab1..3b9d125453 100644
--- a/deps/v8/test/mjsunit/compiler/math-floor-global.js
+++ b/deps/v8/test/mjsunit/compiler/math-floor-global.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --max-new-space-size=256 --allow-natives-syntax
+// Flags: --max-new-space-size=128 --allow-natives-syntax
// Test inlining of Math.floor when assigned to a global.
var flo = Math.floor;
@@ -140,8 +140,9 @@ function test() {
// Test in a loop to cover the custom IC and GC-related issues.
-for (var i = 0; i < 50; i++) {
+for (var i = 0; i < 10; i++) {
test();
+ new Array(i * 10000);
}
@@ -158,4 +159,4 @@ assertEquals(-0, floorsum(1, -0));
%OptimizeFunctionOnNextCall(floorsum);
// The optimized function will deopt. Run it with enough iterations to try
// to optimize via OSR (triggering the bug).
-assertEquals(-0, floorsum(100000, -0));
+assertEquals(-0, floorsum(50000, -0));
diff --git a/deps/v8/test/mjsunit/compiler/math-floor-local.js b/deps/v8/test/mjsunit/compiler/math-floor-local.js
index e44b15c734..fef3347e80 100644
--- a/deps/v8/test/mjsunit/compiler/math-floor-local.js
+++ b/deps/v8/test/mjsunit/compiler/math-floor-local.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --max-new-space-size=256 --allow-natives-syntax
+// Flags: --max-new-space-size=128 --allow-natives-syntax
// Test inlining of Math.floor when assigned to a local.
var test_id = 0;
@@ -140,8 +140,9 @@ function test() {
// Test in a loop to cover the custom IC and GC-related issues.
-for (var i = 0; i < 50; i++) {
+for (var i = 0; i < 10; i++) {
test();
+ new Array(i * 10000);
}
diff --git a/deps/v8/test/mjsunit/compiler/minus-zero.js b/deps/v8/test/mjsunit/compiler/minus-zero.js
index 6efceb54e3..c161257d77 100644
--- a/deps/v8/test/mjsunit/compiler/minus-zero.js
+++ b/deps/v8/test/mjsunit/compiler/minus-zero.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --no-fold-constants
function add(x, y) {
return x + y;
@@ -35,3 +35,59 @@ assertEquals(0, add(0, 0));
assertEquals(0, add(0, 0));
%OptimizeFunctionOnNextCall(add);
assertEquals(-0, add(-0, -0));
+
+
+function test(x, y) {
+ assertTrue(%_IsMinusZero(-0));
+ assertTrue(%_IsMinusZero(1/(-Infinity)));
+ assertTrue(%_IsMinusZero(x));
+
+ assertFalse(%_IsMinusZero(0));
+ assertFalse(%_IsMinusZero(1/Infinity));
+ assertFalse(%_IsMinusZero(0.1));
+ assertFalse(%_IsMinusZero(-0.2));
+ assertFalse(%_IsMinusZero({}));
+ assertFalse(%_IsMinusZero(""));
+ assertFalse(%_IsMinusZero("-0"));
+ assertFalse(%_IsMinusZero(function() {}));
+ assertFalse(%_IsMinusZero(y));
+}
+
+test(-0, 1.2);
+test(-0, 1.2);
+%OptimizeFunctionOnNextCall(test);
+test(-0, 1.2);
+assertOptimized(test);
+
+
+function testsin() {
+ assertTrue(%_IsMinusZero(Math.sin(-0)));
+}
+
+testsin();
+testsin();
+%OptimizeFunctionOnNextCall(testsin);
+testsin();
+
+
+function testfloor() {
+ assertTrue(%_IsMinusZero(Math.floor(-0)));
+ assertFalse(%_IsMinusZero(Math.floor(2)));
+}
+
+testfloor();
+testfloor();
+%OptimizeFunctionOnNextCall(testfloor);
+testfloor();
+
+
+var double_one = Math.cos(0);
+
+function add(a, b) {
+ return a + b;
+}
+
+assertEquals(1, 1/add(double_one, 0));
+assertEquals(1, 1/add(0, double_one));
+%OptimizeFunctionOnNextCall(add);
+assertEquals(1/(-0 + -0), 1/add(-0, -0));
diff --git a/deps/v8/test/mjsunit/compiler/optimized-closures.js b/deps/v8/test/mjsunit/compiler/optimized-closures.js
index eaf75f8d00..499e4d5e24 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-closures.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-closures.js
@@ -51,7 +51,3 @@ f();
assertEquals(42, a[0]);
assertEquals(49, a[7]);
assertEquals(-19, a[23]);
-
-
-
-
diff --git a/deps/v8/test/mjsunit/compiler/osr-with-args.js b/deps/v8/test/mjsunit/compiler/osr-with-args.js
index 44fa1cb2cf..4817ad7d44 100644
--- a/deps/v8/test/mjsunit/compiler/osr-with-args.js
+++ b/deps/v8/test/mjsunit/compiler/osr-with-args.js
@@ -29,7 +29,7 @@
function f() {
var sum = 0;
- for (var i = 0; i < 1000000; i++) {
+ for (var i = 0; i < 100000; i++) {
var t = arguments[0] + 2;
var x = arguments[1] + 2;
var y = t + x + 5;
@@ -39,6 +39,6 @@ function f() {
return sum;
}
-for (var i = 0; i < 4; i++) {
- assertEquals(17000000, f(2, 3));
+for (var i = 0; i < 3; i++) {
+ assertEquals(1700000, f(2, 3));
}
diff --git a/deps/v8/test/mjsunit/compiler/regress-1394.js b/deps/v8/test/mjsunit/compiler/regress-1394.js
index b1ce19267f..fbf435731f 100644
--- a/deps/v8/test/mjsunit/compiler/regress-1394.js
+++ b/deps/v8/test/mjsunit/compiler/regress-1394.js
@@ -56,4 +56,3 @@ for (var i = 0; i < 3; i++) assertEquals(i, f(i));
assertEquals(0, f(0));
assertEquals(1, f(1));
-
diff --git a/deps/v8/test/mjsunit/compiler/regress-3260426.js b/deps/v8/test/mjsunit/compiler/regress-3260426.js
index dfef424e7f..457064d2bb 100644
--- a/deps/v8/test/mjsunit/compiler/regress-3260426.js
+++ b/deps/v8/test/mjsunit/compiler/regress-3260426.js
@@ -33,4 +33,3 @@ function always_false() {}
function test() { return always_false() ? 0 : 1; }
assertEquals(1, test());
-
diff --git a/deps/v8/test/mjsunit/compiler/regress-4.js b/deps/v8/test/mjsunit/compiler/regress-4.js
index 0ec9a12b81..9a212baf2d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-4.js
+++ b/deps/v8/test/mjsunit/compiler/regress-4.js
@@ -34,7 +34,7 @@ function f(p) {
return y+x;
}
-for (var i=0; i<10000000; i++) f(42);
+for (var i=0; i<100000; i++) f(42);
var result = f("foo");
assertEquals("0foo6", result);
diff --git a/deps/v8/test/mjsunit/compiler/regress-arguments.js b/deps/v8/test/mjsunit/compiler/regress-arguments.js
index ebae5a0399..d32b435ff3 100644
--- a/deps/v8/test/mjsunit/compiler/regress-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/regress-arguments.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
// Test of arguments.
// Test passing null or undefined as receiver.
@@ -35,8 +37,15 @@ function h() { return f.apply(void 0, arguments); }
var foo = 42;
-for (var i=0; i<1000000; i++) assertEquals(42, g());
-for (var i=0; i<1000000; i++) assertEquals(42, h());
+for (var i = 0; i < 3; i++) assertEquals(42, g());
+%OptimizeFunctionOnNextCall(g);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(42, g());
+
+for (var i = 0; i < 3; i++) assertEquals(42, h());
+%OptimizeFunctionOnNextCall(h);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(42, h());
var G1 = 21;
var G2 = 22;
@@ -49,4 +58,7 @@ function u() {
Number.prototype.foo = 42;
delete Number.prototype.foo;
-for (var i=0; i<100000; i++) assertEquals(void 0, u());
+for (var i = 0; i < 3; i++) assertEquals(void 0, u());
+%OptimizeFunctionOnNextCall(u);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(void 0, u());
diff --git a/deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js b/deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js
index 57afb1643e..d95d128a9d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js
+++ b/deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js
@@ -53,4 +53,4 @@ for (var n = 0; n < 5; n++) {
withEval(expr, function(a) { return a; });
}
%OptimizeFunctionOnNextCall(withEval);
-withEval(expr, function(a) { return a; }); \ No newline at end of file
+withEval(expr, function(a) { return a; });
diff --git a/deps/v8/test/mjsunit/compiler/regress-inline-callfunctionstub.js b/deps/v8/test/mjsunit/compiler/regress-inline-callfunctionstub.js
index a39d26df0e..33655346a8 100644
--- a/deps/v8/test/mjsunit/compiler/regress-inline-callfunctionstub.js
+++ b/deps/v8/test/mjsunit/compiler/regress-inline-callfunctionstub.js
@@ -43,4 +43,3 @@ main(o.g);
main(o.g);
%OptimizeFunctionOnNextCall(main);
main(o.g);
-
diff --git a/deps/v8/test/mjsunit/compiler/regress-rep-change.js b/deps/v8/test/mjsunit/compiler/regress-rep-change.js
index 937099937d..c8a0983c44 100644
--- a/deps/v8/test/mjsunit/compiler/regress-rep-change.js
+++ b/deps/v8/test/mjsunit/compiler/regress-rep-change.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
// Regression test for the case where a phi has two input operands with
// the same value.
@@ -35,8 +37,11 @@ function test(start) {
for (var i = start; i < 10; i++) { }
}
-var n = 5000000;
+var n = 3;
for (var i = 0; i < n; ++i) {
test(0);
}
+
+%OptimizeFunctionOnNextCall(test);
+test(0);
diff --git a/deps/v8/test/mjsunit/compiler/regress-toint32.js b/deps/v8/test/mjsunit/compiler/regress-toint32.js
index 54c2f76dd7..75892d4775 100644
--- a/deps/v8/test/mjsunit/compiler/regress-toint32.js
+++ b/deps/v8/test/mjsunit/compiler/regress-toint32.js
@@ -42,4 +42,3 @@ assertEquals(G, f(G));
assertEquals(G, f(G));
%OptimizeFunctionOnNextCall(f);
assertEquals(G, f(G));
-
diff --git a/deps/v8/test/mjsunit/compiler/rotate.js b/deps/v8/test/mjsunit/compiler/rotate.js
index 2f4bc5a967..1c81e496ea 100644
--- a/deps/v8/test/mjsunit/compiler/rotate.js
+++ b/deps/v8/test/mjsunit/compiler/rotate.js
@@ -306,5 +306,3 @@ assertEquals(ROR4(0xFFFFFFFF, 40), ROR4_sa40(0xFFFFFFFF));
assertEquals(ROR4(0xFFFFFFFF, 40), ROR4_sa40(0xFFFFFFFF));
%OptimizeFunctionOnNextCall(ROR4_sa40);
assertEquals(ROR4(0xFFFFFFFF, 40), ROR4_sa40(0xFFFFFFFF));
-
-
diff --git a/deps/v8/test/mjsunit/compiler/smi-stores-opt.js b/deps/v8/test/mjsunit/compiler/smi-stores-opt.js
new file mode 100644
index 0000000000..ca0923abc9
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/smi-stores-opt.js
@@ -0,0 +1,49 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var o = {a:1.5};
+o.a = 0;
+var a = o.a;
+
+function g() {
+ return 1;
+}
+
+var o2 = {a:{}};
+
+function f() {
+ var result = {a: a};
+ var literal = {x:g()};
+ return [result, literal];
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1, f()[1].x);
diff --git a/deps/v8/test/mjsunit/compiler/to-fast-properties.js b/deps/v8/test/mjsunit/compiler/to-fast-properties.js
new file mode 100644
index 0000000000..26829d95e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/to-fast-properties.js
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This test requires OSR or --stress-runs=3 to optimize the top level script.
+
+for (var i = 0; i < 3; i++) {
+ // HToFastProperties is used for top-level object literals that have
+ // function property.
+ var obj = {
+ index: function() { return i; },
+ x: 0
+ }
+ var n = 10000;
+ // Loop to hit OSR.
+ for (var j = 0; j < n; j++) {
+ obj.x += i;
+ }
+ assertEquals(obj.index() * n, obj.x);
+}
diff --git a/deps/v8/test/mjsunit/concurrent-initial-prototype-change.js b/deps/v8/test/mjsunit/concurrent-initial-prototype-change.js
index d5b1b99491..1b6f97b433 100644
--- a/deps/v8/test/mjsunit/concurrent-initial-prototype-change.js
+++ b/deps/v8/test/mjsunit/concurrent-initial-prototype-change.js
@@ -55,3 +55,5 @@ assertUnoptimized(f1, "no sync");
// Sync with background thread to conclude optimization, which bails out
// due to map dependency.
assertUnoptimized(f1, "sync");
+//Clear type info for stress runs.
+%ClearFunctionTypeFeedback(f1);
diff --git a/deps/v8/test/mjsunit/const-declaration.js b/deps/v8/test/mjsunit/const-declaration.js
index 48c0cf2717..e7bb678eb6 100644
--- a/deps/v8/test/mjsunit/const-declaration.js
+++ b/deps/v8/test/mjsunit/const-declaration.js
@@ -169,4 +169,3 @@
}
f("const x = 0;");
})();
-
diff --git a/deps/v8/test/mjsunit/constant-folding-2.js b/deps/v8/test/mjsunit/constant-folding-2.js
index 9e6b2c6306..f429c6ca10 100644
--- a/deps/v8/test/mjsunit/constant-folding-2.js
+++ b/deps/v8/test/mjsunit/constant-folding-2.js
@@ -128,30 +128,6 @@ test(function mathMax() {
assertEquals("Infinity", String(1 / Math.max(0.0, -0.0)));
});
-test(function mathSin() {
- assertEquals(0.0, Math.sin(0.0));
- assertTrue(0.8 < Math.sin(1) && Math.sin(1) < 0.9);
- assertEquals("NaN", String(Math.sin(Infinity)));
- assertEquals("NaN", String(Math.sin(-Infinity)));
- assertEquals("NaN", String(Math.sin(NaN)));
-});
-
-test(function mathCos() {
- assertEquals(1.0, Math.cos(0.0));
- assertTrue(0.5 < Math.cos(1) && Math.cos(1) < 0.6);
- assertEquals("NaN", String(Math.cos(Infinity)));
- assertEquals("NaN", String(Math.cos(-Infinity)));
- assertEquals("NaN", String(Math.cos(NaN)));
-});
-
-test(function mathTan() {
- assertEquals(0.0, Math.tan(0.0));
- assertTrue(1.5 < Math.tan(1) && Math.tan(1) < 1.6);
- assertEquals("NaN", String(Math.tan(Infinity)));
- assertEquals("NaN", String(Math.tan(-Infinity)));
- assertEquals("NaN", String(Math.tan(NaN)));
-});
-
test(function mathExp() {
assertEquals(1.0, Math.exp(0.0));
assertTrue(2.7 < Math.exp(1) && Math.exp(1) < 2.8);
diff --git a/deps/v8/test/mjsunit/context-calls-maintained.js b/deps/v8/test/mjsunit/context-calls-maintained.js
new file mode 100644
index 0000000000..95bf55240b
--- /dev/null
+++ b/deps/v8/test/mjsunit/context-calls-maintained.js
@@ -0,0 +1,116 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --allow-natives-syntax
+
+function clear_all_ics() {
+ %NotifyContextDisposed();
+ gc();
+ gc();
+ gc();
+}
+
+
+// Test: verify that a monomorphic call retains the structural knowledge
+// of a global call, correctly throwing either ReferenceError or
+// TypeError on undefined depending on how the call is made.
+(function() {
+ foo = function(arg) { return arg + 1; }
+
+ function f() { foo(1); }
+
+ // Drive to monomorphic
+ f(); f(); f();
+
+ delete foo;
+ assertThrows(function() { f(); }, ReferenceError);
+ foo = function(arg) { return arg * 2; }
+ assertDoesNotThrow(function() { f(); });
+ f(); f(); f();
+ delete foo;
+ assertThrows(function() { f(); }, ReferenceError);
+ clear_all_ics();
+ foo = function(arg) { return arg * 3; }
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+ delete foo;
+ assertThrows(function() { f(); }, ReferenceError);
+
+ foo = function(arg) { return arg * 3; }
+ function g() { this.foo(1); }
+ g(); g(); g();
+ delete foo;
+ assertThrows(function() { g(); }, TypeError);
+ foo = function(arg) { return arg * 3; }
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ delete foo;
+ assertThrows(function() { g(); }, TypeError);
+})();
+
+
+// Test: verify that a load with IC does the right thing.
+(function() {
+ var foo = function() { return a; }
+ a = 3;
+ foo(); foo(); foo();
+ delete a;
+ assertThrows(function() { foo(); }, ReferenceError);
+ a = "hi";
+ foo();
+ clear_all_ics();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+ delete a;
+ assertThrows(function() { foo(); }, ReferenceError);
+ foo = function() { return this.a; }
+ assertDoesNotThrow(function() { foo(); });
+})();
+
+
+// Test: verify that a store with IC does the right thing.
+// If store is contextual and strict mode is set, throw a ReferenceError
+// if the variable isn't found.
+(function() {
+ var foo = function() { a = 3; }
+ var bar = function() { "use strict"; a = 3; }
+ foo(); foo(); foo();
+ delete a;
+ assertThrows(function() { bar(); }, ReferenceError);
+ a = 6;
+ foo(); foo(); foo();
+ bar(); bar();
+ clear_all_ics();
+ bar();
+ %OptimizeFunctionOnNextCall(bar);
+ bar();
+ delete a;
+ assertThrows(function() { bar(); }, ReferenceError);
+})();
diff --git a/deps/v8/test/mjsunit/contextual-calls.js b/deps/v8/test/mjsunit/contextual-calls.js
new file mode 100644
index 0000000000..10c3e8d82c
--- /dev/null
+++ b/deps/v8/test/mjsunit/contextual-calls.js
@@ -0,0 +1,103 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var realms = [Realm.current(), Realm.create()];
+globals = [Realm.global(0), Realm.global(1)];
+Realm.shared = {}
+
+function install(name, value) {
+ Realm.shared[name] = value;
+ for (i in realms) {
+ Realm.eval(realms[i], name + " = Realm.shared['" + name + "'];");
+ }
+}
+
+install('return_this', function() { return this; });
+install('return_this_strict', function () { 'use strict'; return this; });
+
+// test behaviour of 'with' scope
+for (i in realms) {
+ Realm.shared.results = [];
+ // in the second case, 'this' is found in the with scope,
+ // so the receiver is 'this'
+ Realm.eval(realms[i]," \
+ with('irrelevant') { \
+ Realm.shared.results.push(return_this()); \
+ Realm.shared.results.push(return_this_strict()); \
+ } \
+ with(this) { \
+ Realm.shared.results.push(return_this()); \
+ Realm.shared.results.push(return_this_strict()); \
+ } \
+ ");
+ assertSame(globals[0], Realm.shared.results[0]);
+ assertSame(undefined, Realm.shared.results[1]);
+ assertSame(globals[i], Realm.shared.results[2]);
+ assertSame(globals[i], Realm.shared.results[3]);
+}
+
+// test 'apply' and 'call'
+for (i in realms) {
+ // 'apply' without a receiver is a contextual call
+ assertSame(globals[0], Realm.eval(realms[i],'return_this.apply()')) ;
+ assertSame(undefined, Realm.eval(realms[i],'return_this_strict.apply()'));
+ assertSame(globals[0], Realm.eval(realms[i],'return_this.apply(null)')) ;
+ assertSame(null, Realm.eval(realms[i],'return_this_strict.apply(null)'));
+ // 'call' without a receiver is a contextual call
+ assertSame(globals[0], Realm.eval(realms[i],'return_this.call()')) ;
+ assertSame(undefined, Realm.eval(realms[i],'return_this_strict.call()'));
+ assertSame(globals[0], Realm.eval(realms[i],'return_this.call(null)')) ;
+ assertSame(null, Realm.eval(realms[i],'return_this_strict.call(null)'));
+}
+
+// test ics
+for (var i = 0; i < 4; i++) {
+ assertSame(globals[0], return_this());
+ assertSame(undefined, return_this_strict());
+}
+
+// BUG(1547)
+
+Realm.eval(realms[0], "var name = 'o'");
+Realm.eval(realms[1], "var name = 'i'");
+
+install('f', function() { return this.name; });
+install('g', function() { "use strict"; return this ? this.name : "u"; });
+
+for (i in realms) {
+ result = Realm.eval(realms[i], " \
+ (function(){return f();})() + \
+ (function(){return (1,f)();})() + \
+ (function(){'use strict'; return f();})() + \
+ (function(){'use strict'; return (1,f)();})() + \
+ (function(){return g();})() + \
+ (function(){return (1,g)();})() + \
+ (function(){'use strict'; return g();})() + \
+ (function(){'use strict'; return (1,g)();})(); \
+ ");
+ assertSame("oooouuuu", result);
+}
diff --git a/deps/v8/test/mjsunit/cyclic-array-to-string.js b/deps/v8/test/mjsunit/cyclic-array-to-string.js
index 0a2d6e379e..ad77743c07 100644
--- a/deps/v8/test/mjsunit/cyclic-array-to-string.js
+++ b/deps/v8/test/mjsunit/cyclic-array-to-string.js
@@ -62,4 +62,3 @@ a1.push(a2);
assertEquals("", a1.toString());
assertEquals("", a1.toLocaleString());
assertEquals("", a1.join());
-
diff --git a/deps/v8/test/mjsunit/d8-performance-now.js b/deps/v8/test/mjsunit/d8-performance-now.js
index 13eb1d3f00..3e5485e81d 100644
--- a/deps/v8/test/mjsunit/d8-performance-now.js
+++ b/deps/v8/test/mjsunit/d8-performance-now.js
@@ -30,11 +30,6 @@
// Test the performance.now() function of d8. This test only makes sense with
// d8.
-// Don't run this test in gc stress mode. Time differences may be long
-// due to garbage collections.
-%SetFlags("--gc-interval=-1");
-%SetFlags("--nostress-compaction");
-
if (this.performance && performance.now) {
(function run() {
var start_test = performance.now();
diff --git a/deps/v8/test/mjsunit/debug-breakpoints.js b/deps/v8/test/mjsunit/debug-breakpoints.js
index 148acfc9ef..a04fac5c73 100644
--- a/deps/v8/test/mjsunit/debug-breakpoints.js
+++ b/deps/v8/test/mjsunit/debug-breakpoints.js
@@ -222,4 +222,3 @@ for(var i = 0; i < scenario.length; i++) {
Debug.BreakPositionAlignment.BreakPosition).indexOf(scenario[i][1]) > 0);
Debug.clearBreakPoint(bp1);
}
-
diff --git a/deps/v8/test/mjsunit/debug-constructor.js b/deps/v8/test/mjsunit/debug-constructor.js
index 38028aa8c1..6d4e7f3a8f 100644
--- a/deps/v8/test/mjsunit/debug-constructor.js
+++ b/deps/v8/test/mjsunit/debug-constructor.js
@@ -75,4 +75,4 @@ Debug.clearStepping(); // Clear stepping as the listener leaves it on.
assertEquals("bbccdcb", call_graph);
// Get rid of the debug event listener.
-Debug.setListener(null); \ No newline at end of file
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-evaluate-const.js b/deps/v8/test/mjsunit/debug-evaluate-const.js
index cb9695b6f2..7fad483cd5 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-const.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-const.js
@@ -118,4 +118,3 @@ Debug.setListener(null);
assertFalse(exception, "exception in listener")
assertTrue(listenerComplete);
-
diff --git a/deps/v8/test/mjsunit/debug-function-scopes.js b/deps/v8/test/mjsunit/debug-function-scopes.js
index 4262b950da..b51e8b4432 100644
--- a/deps/v8/test/mjsunit/debug-function-scopes.js
+++ b/deps/v8/test/mjsunit/debug-function-scopes.js
@@ -159,4 +159,3 @@ CheckNoScopeVisible(Function.toString);
// This getter is known to be implemented as closure.
CheckNoScopeVisible(new Error().__lookupGetter__("stack"));
-
diff --git a/deps/v8/test/mjsunit/debug-ignore-breakpoints.js b/deps/v8/test/mjsunit/debug-ignore-breakpoints.js
index 96c6044e7b..3cb283bc48 100644
--- a/deps/v8/test/mjsunit/debug-ignore-breakpoints.js
+++ b/deps/v8/test/mjsunit/debug-ignore-breakpoints.js
@@ -86,4 +86,3 @@ f();
assertEquals(5, break_point_hit_count);
f();
assertEquals(6, break_point_hit_count);
-
diff --git a/deps/v8/test/mjsunit/debug-liveedit-3.js b/deps/v8/test/mjsunit/debug-liveedit-3.js
index b2106579d8..023c94e8e2 100644
--- a/deps/v8/test/mjsunit/debug-liveedit-3.js
+++ b/deps/v8/test/mjsunit/debug-liveedit-3.js
@@ -66,5 +66,3 @@ assertEquals(8, z6());
var z100 = Factory(100)();
assertEquals(102, z100());
-
-
diff --git a/deps/v8/test/mjsunit/debug-liveedit-check-stack.js b/deps/v8/test/mjsunit/debug-liveedit-check-stack.js
index df9e1cfab0..6948a70d6b 100644
--- a/deps/v8/test/mjsunit/debug-liveedit-check-stack.js
+++ b/deps/v8/test/mjsunit/debug-liveedit-check-stack.js
@@ -138,4 +138,3 @@ test = new TestBase("Test with C++ frame above ChooseAnimal frame");
exception_holder = {};
assertEquals("Cat", test.ChooseAnimal(WrapInNativeCall(WrapInDebuggerCall(WrapInCatcher(test.ScriptChanger, exception_holder)))));
assertTrue(!!exception_holder[0]);
-
diff --git a/deps/v8/test/mjsunit/debug-liveedit-compile-error.js b/deps/v8/test/mjsunit/debug-liveedit-compile-error.js
index 2fd6aedabf..99ac0314a8 100644
--- a/deps/v8/test/mjsunit/debug-liveedit-compile-error.js
+++ b/deps/v8/test/mjsunit/debug-liveedit-compile-error.js
@@ -56,5 +56,3 @@ assertEquals("Unexpected token )",
caught_exception.details.syntaxErrorMessage);
assertEquals(2, caught_exception.details.position.start.line);
-
-
diff --git a/deps/v8/test/mjsunit/debug-liveedit-diff.js b/deps/v8/test/mjsunit/debug-liveedit-diff.js
index 0d26a30b40..2fd2497295 100644
--- a/deps/v8/test/mjsunit/debug-liveedit-diff.js
+++ b/deps/v8/test/mjsunit/debug-liveedit-diff.js
@@ -110,4 +110,3 @@ CheckCompare(
"yesterday\nall\nmy\ntroubles\nseemed\nso\nfar\naway",
"yesterday\nall\nmy\ntroubles\nseemed\nso\n"
);
-
diff --git a/deps/v8/test/mjsunit/debug-liveedit-utils.js b/deps/v8/test/mjsunit/debug-liveedit-utils.js
index c892ec9ae6..8521a6dae2 100644
--- a/deps/v8/test/mjsunit/debug-liveedit-utils.js
+++ b/deps/v8/test/mjsunit/debug-liveedit-utils.js
@@ -93,5 +93,3 @@ assertEquals(69, MultiChunkTranslator.Translate(59));
assertEquals(2010, MultiChunkTranslator.Translate(60, Return2010));
assertEquals(70, MultiChunkTranslator.Translate(70));
assertEquals(75, MultiChunkTranslator.Translate(75));
-
-
diff --git a/deps/v8/test/mjsunit/debug-mirror-cache.js b/deps/v8/test/mjsunit/debug-mirror-cache.js
index 5b85306a18..07aaf880dc 100644
--- a/deps/v8/test/mjsunit/debug-mirror-cache.js
+++ b/deps/v8/test/mjsunit/debug-mirror-cache.js
@@ -82,4 +82,3 @@ debugger;
assertEquals([], listenerExceptions, "Exception in listener");
// Make sure that the debug event listener vas invoked.
assertEquals(2, listenerCallCount, "Listener not called");
-
diff --git a/deps/v8/test/mjsunit/debug-setbreakpoint.js b/deps/v8/test/mjsunit/debug-setbreakpoint.js
index 8531c4e935..bc23021ec7 100644
--- a/deps/v8/test/mjsunit/debug-setbreakpoint.js
+++ b/deps/v8/test/mjsunit/debug-setbreakpoint.js
@@ -214,4 +214,3 @@ function SetBreakpointInI1Script() {
// moment. Since there's no way of simply getting the pointer to the function,
// we run this code while the script function is being activated on stack.
eval('SetBreakpointInI1Script()\nfunction i1(){}\n\n\n\nfunction i2(){}\n');
-
diff --git a/deps/v8/test/mjsunit/debug-step-4-in-frame.js b/deps/v8/test/mjsunit/debug-step-4-in-frame.js
index 65ac4902dd..93884303ca 100644
--- a/deps/v8/test/mjsunit/debug-step-4-in-frame.js
+++ b/deps/v8/test/mjsunit/debug-step-4-in-frame.js
@@ -37,18 +37,18 @@ var state;
function f() {
var a = 1978;
- for (state[2] = 0; state[2] < 5; state[2]++) {
+ for (state[2] = 0; state[2] < 3; state[2]++) {
void String(a);
}
}
function g() {
- for (state[1] = 0; state[1] < 5; state[1]++) {
+ for (state[1] = 0; state[1] < 3; state[1]++) {
f();
}
}
function h() {
state = [-1, -1, -1];
- for (state[0] = 0; state[0] < 5; state[0]++) {
+ for (state[0] = 0; state[0] < 3; state[0]++) {
g();
}
}
@@ -123,10 +123,10 @@ TestCase(0, 5, "0,0,1");
TestCase(0, 8, "0,0,3");
// Stepping in the frame #1.
-TestCase(1, 0, "0,0,5");
-TestCase(1, 3, "0,1,5");
-TestCase(1, 8, "0,4,5");
+TestCase(1, 0, "0,0,3");
+TestCase(1, 3, "0,1,3");
+TestCase(1, 7, "0,3,3");
// Stepping in the frame #2.
-TestCase(2, 3, "1,5,5");
-TestCase(2, 8, "4,5,5");
+TestCase(2, 3, "1,3,3");
+TestCase(2, 7, "3,3,3");
diff --git a/deps/v8/test/mjsunit/debug-stepin-positions.js b/deps/v8/test/mjsunit/debug-stepin-positions.js
index e6d8204611..722df53666 100644
--- a/deps/v8/test/mjsunit/debug-stepin-positions.js
+++ b/deps/v8/test/mjsunit/debug-stepin-positions.js
@@ -221,5 +221,3 @@ var fun = (function(p) {
};
})(Object);
TestCaseWithDebugger(fun);
-
-
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part1.js b/deps/v8/test/mjsunit/debug-stepout-scope-part1.js
index f2f9d91419..f49b1a07eb 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part1.js
+++ b/deps/v8/test/mjsunit/debug-stepout-scope-part1.js
@@ -187,4 +187,4 @@ function nop() {}
// With block as the last(!) statement in global code.
-with ({}) { debugger; } \ No newline at end of file
+with ({}) { debugger; }
diff --git a/deps/v8/test/mjsunit/deopt-with-fp-regs.js b/deps/v8/test/mjsunit/deopt-with-fp-regs.js
new file mode 100644
index 0000000000..10e3d9abb3
--- /dev/null
+++ b/deps/v8/test/mjsunit/deopt-with-fp-regs.js
@@ -0,0 +1,90 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+deopt_trigger = 0;
+side_effect = 0;
+
+function test(a, b, c, d, e, v) {
+ // This test expects some specific input values.
+ assertEquals(10.0, a);
+ assertEquals(20.0, b);
+ assertEquals(30.0, c);
+ assertEquals(40.0, d);
+ assertEquals(50.0, e);
+ assertEquals(1.5, v);
+
+ // Perform a few double calculations.
+ a = a * 0.1;
+ b = b * 0.2;
+ c = c * 0.3;
+ d = d * 0.4;
+ e = e * 0.5;
+
+ // Write to a field of a global object. As for any side effect, a HSimulate
+ // will be introduced after the instructions to support this. If we deopt
+ // later in this function, the execution will resume in full-codegen after
+ // this point.
+ side_effect++;
+ // The following field of the global object will be deleted to force a deopt.
+ // If we use type feedback to deopt, then tests ran with --stress-opt will
+ // not deopt after a few iteration.
+ // If we use %DeoptimizeFunction, all values will be on the frame due to the
+ // call and we will not exercise the translation mechanism handling fp
+ // registers.
+ deopt_trigger = v;
+
+ // Do a few more calculations using the previous values after our deopt point
+ // so the floating point registers which hold those values are recorded in the
+ // environment and will be used during deoptimization.
+ a = a * v;
+ b = b * v;
+ c = c * v;
+ d = d * v;
+ e = e * v;
+
+ // Check that we got the expected results.
+ assertEquals(1.5, a);
+ assertEquals(6, b);
+ assertEquals(13.5, c);
+ assertEquals(24, d);
+ assertEquals(37.5, e);
+}
+
+
+test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
+test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
+%OptimizeFunctionOnNextCall(test);
+test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
+assertTrue(2 != %GetOptimizationStatus(test));
+
+// By deleting the field we are forcing the code to deopt when the field is
+// read on next execution.
+delete deopt_trigger;
+test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
+assertTrue(1 != %GetOptimizationStatus(test));
diff --git a/deps/v8/test/mjsunit/div-mod.js b/deps/v8/test/mjsunit/div-mod.js
index c3144955cb..08cee8cdd1 100644
--- a/deps/v8/test/mjsunit/div-mod.js
+++ b/deps/v8/test/mjsunit/div-mod.js
@@ -126,9 +126,15 @@ function compute_mod(dividend, divisor) {
var example_numbers = [
NaN,
0,
+
+ // Due to a bug in fmod(), modulos involving denormals
+ // return the wrong result for glibc <= 2.16.
+ // Details: http://sourceware.org/bugzilla/show_bug.cgi?id=14048
+
Number.MIN_VALUE,
3 * Number.MIN_VALUE,
max_denormal,
+
min_normal,
repeating_decimal,
finite_decimal,
diff --git a/deps/v8/test/mjsunit/div-mul-minus-one.js b/deps/v8/test/mjsunit/div-mul-minus-one.js
index f05bf0f54c..5ade61492d 100644
--- a/deps/v8/test/mjsunit/div-mul-minus-one.js
+++ b/deps/v8/test/mjsunit/div-mul-minus-one.js
@@ -36,9 +36,7 @@ var expected_MinInt = div(kMinInt);
var expected_minus_zero = div(0);
%OptimizeFunctionOnNextCall(div);
assertEquals(expected_MinInt, div(kMinInt));
-assertOptimized(div);
assertEquals(expected_minus_zero , div(0));
-assertOptimized(div);
function mul(g) {
return (g * -1) ^ 1
diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js
index 442d756ae9..e2bbc31a41 100644
--- a/deps/v8/test/mjsunit/elements-kind.js
+++ b/deps/v8/test/mjsunit/elements-kind.js
@@ -25,13 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
-// Flags: --notrack_allocation_sites
-
-// Limit the number of stress runs to reduce polymorphism it defeats some of the
-// assumptions made about how elements transitions work because transition stubs
-// end up going generic.
-// Flags: --stress-runs=2
+// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc --nostress-opt
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile
@@ -71,31 +65,31 @@ function getKind(obj) {
if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
// Every external kind is also an external array.
assertTrue(%HasExternalArrayElements(obj));
- if (%HasExternalByteElements(obj)) {
+ if (%HasExternalInt8Elements(obj)) {
return elements_kind.external_byte;
}
- if (%HasExternalUnsignedByteElements(obj)) {
+ if (%HasExternalUint8Elements(obj)) {
return elements_kind.external_unsigned_byte;
}
- if (%HasExternalShortElements(obj)) {
+ if (%HasExternalInt16Elements(obj)) {
return elements_kind.external_short;
}
- if (%HasExternalUnsignedShortElements(obj)) {
+ if (%HasExternalUint16Elements(obj)) {
return elements_kind.external_unsigned_short;
}
- if (%HasExternalIntElements(obj)) {
+ if (%HasExternalInt32Elements(obj)) {
return elements_kind.external_int;
}
- if (%HasExternalUnsignedIntElements(obj)) {
+ if (%HasExternalUint32Elements(obj)) {
return elements_kind.external_unsigned_int;
}
- if (%HasExternalFloatElements(obj)) {
+ if (%HasExternalFloat32Elements(obj)) {
return elements_kind.external_float;
}
- if (%HasExternalDoubleElements(obj)) {
+ if (%HasExternalFloat64Elements(obj)) {
return elements_kind.external_double;
}
- if (%HasExternalPixelElements(obj)) {
+ if (%HasExternalUint8ClampedElements(obj)) {
return elements_kind.external_pixel;
}
}
@@ -123,56 +117,75 @@ if (support_smi_only_arrays) {
}
// Make sure the element kind transitions from smi when a non-smi is stored.
-var you = new Array();
-assertKind(elements_kind.fast_smi_only, you);
-for (var i = 0; i < 1337; i++) {
- var val = i;
- if (i == 1336) {
- assertKind(elements_kind.fast_smi_only, you);
- val = new Object();
+function test_wrapper() {
+ var you = new Array();
+ assertKind(elements_kind.fast_smi_only, you);
+ for (var i = 0; i < 1337; i++) {
+ var val = i;
+ if (i == 1336) {
+ assertKind(elements_kind.fast_smi_only, you);
+ val = new Object();
+ }
+ you[i] = val;
}
- you[i] = val;
-}
-assertKind(elements_kind.fast, you);
+ assertKind(elements_kind.fast, you);
-assertKind(elements_kind.dictionary, new Array(0xDECAF));
+ assertKind(elements_kind.dictionary, new Array(0xDECAF));
-var fast_double_array = new Array(0xDECAF);
-for (var i = 0; i < 0xDECAF; i++) fast_double_array[i] = i / 2;
-assertKind(elements_kind.fast_double, fast_double_array);
+ var fast_double_array = new Array(0xDECAF);
+ for (var i = 0; i < 0xDECAF; i++) fast_double_array[i] = i / 2;
+ assertKind(elements_kind.fast_double, fast_double_array);
-assertKind(elements_kind.external_byte, new Int8Array(9001));
-assertKind(elements_kind.external_unsigned_byte, new Uint8Array(007));
-assertKind(elements_kind.external_short, new Int16Array(666));
-assertKind(elements_kind.external_unsigned_short, new Uint16Array(42));
-assertKind(elements_kind.external_int, new Int32Array(0xF));
-assertKind(elements_kind.external_unsigned_int, new Uint32Array(23));
-assertKind(elements_kind.external_float, new Float32Array(7));
-assertKind(elements_kind.external_double, new Float64Array(0));
-assertKind(elements_kind.external_pixel, new Uint8ClampedArray(512));
+ assertKind(elements_kind.external_byte, new Int8Array(9001));
+ assertKind(elements_kind.external_unsigned_byte, new Uint8Array(007));
+ assertKind(elements_kind.external_short, new Int16Array(666));
+ assertKind(elements_kind.external_unsigned_short, new Uint16Array(42));
+ assertKind(elements_kind.external_int, new Int32Array(0xF));
+ assertKind(elements_kind.external_unsigned_int, new Uint32Array(23));
+ assertKind(elements_kind.external_float, new Float32Array(7));
+ assertKind(elements_kind.external_double, new Float64Array(0));
+ assertKind(elements_kind.external_pixel, new Uint8ClampedArray(512));
-// Crankshaft support for smi-only array elements.
-function monomorphic(array) {
- assertKind(elements_kind.fast_smi_only, array);
- for (var i = 0; i < 3; i++) {
- array[i] = i + 10;
- }
- assertKind(elements_kind.fast_smi_only, array);
- for (var i = 0; i < 3; i++) {
- var a = array[i];
- assertEquals(i + 10, a);
+ // Crankshaft support for smi-only array elements.
+ function monomorphic(array) {
+ assertKind(elements_kind.fast_smi_only, array);
+ for (var i = 0; i < 3; i++) {
+ array[i] = i + 10;
+ }
+ assertKind(elements_kind.fast_smi_only, array);
+ for (var i = 0; i < 3; i++) {
+ var a = array[i];
+ assertEquals(i + 10, a);
+ }
}
+ var smi_only = new Array(1, 2, 3);
+ assertKind(elements_kind.fast_smi_only, smi_only);
+ for (var i = 0; i < 3; i++) monomorphic(smi_only);
+ %OptimizeFunctionOnNextCall(monomorphic);
+ monomorphic(smi_only);
}
-var smi_only = new Array(1, 2, 3);
-assertKind(elements_kind.fast_smi_only, smi_only);
-for (var i = 0; i < 3; i++) monomorphic(smi_only);
-%OptimizeFunctionOnNextCall(monomorphic);
-monomorphic(smi_only);
+
+// The test is called in a wrapper function to eliminate the transition learning
+// feedback of AllocationSites.
+test_wrapper();
+%ClearFunctionTypeFeedback(test_wrapper);
if (support_smi_only_arrays) {
%NeverOptimizeFunction(construct_smis);
+
+ // This code exists to eliminate the learning influence of AllocationSites
+ // on the following tests.
+ var __sequence = 0;
+ function make_array_string() {
+ this.__sequence = this.__sequence + 1;
+ return "/* " + this.__sequence + " */ [0, 0, 0];"
+ }
+ function make_array() {
+ return eval(make_array_string());
+ }
+
function construct_smis() {
- var a = [0, 0, 0];
+ var a = make_array();
a[0] = 0; // Send the COW array map to the steak house.
assertKind(elements_kind.fast_smi_only, a);
return a;
diff --git a/deps/v8/test/mjsunit/elements-transition-and-store.js b/deps/v8/test/mjsunit/elements-transition-and-store.js
index 7a07b3eeca..0b4786b4b8 100644
--- a/deps/v8/test/mjsunit/elements-transition-and-store.js
+++ b/deps/v8/test/mjsunit/elements-transition-and-store.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --notrack-allocation-sites
-
function foo(a, v) {
a[0] = v;
return a;
diff --git a/deps/v8/test/mjsunit/elements-transition-hoisting.js b/deps/v8/test/mjsunit/elements-transition-hoisting.js
index 0295318f6a..76027b9ed1 100644
--- a/deps/v8/test/mjsunit/elements-transition-hoisting.js
+++ b/deps/v8/test/mjsunit/elements-transition-hoisting.js
@@ -25,10 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --smi-only-arrays --notrack-allocation-sites
-
-// No tracking of allocation sites because it interfers with the semantics
-// the test is trying to ensure.
+// Flags: --allow-natives-syntax --smi-only-arrays
+// Flags: --nostress-opt
// Ensure that ElementsKind transitions in various situations are hoisted (or
// not hoisted) correctly, don't change the semantics programs and don't trigger
@@ -42,7 +40,7 @@ if (support_smi_only_arrays) {
print("Tests do NOT include smi-only arrays.");
}
-if (support_smi_only_arrays) {
+function test_wrapper() {
// Make sure that a simple elements array transitions inside a loop before
// stores to an array gets hoisted in a way that doesn't generate a deopt in
// simple cases.}
@@ -239,3 +237,10 @@ if (support_smi_only_arrays) {
assertOptimized(testStraightLineDupeElinination);
%ClearFunctionTypeFeedback(testStraightLineDupeElinination);
}
+
+if (support_smi_only_arrays) {
+ // The test is called in a test wrapper that has type feedback cleared to
+ // prevent the influence of allocation-sites, which learn from transitions.
+ test_wrapper();
+ %ClearFunctionTypeFeedback(test_wrapper);
+}
diff --git a/deps/v8/test/mjsunit/elements-transition.js b/deps/v8/test/mjsunit/elements-transition.js
index e28f3c3d64..7298e68a12 100644
--- a/deps/v8/test/mjsunit/elements-transition.js
+++ b/deps/v8/test/mjsunit/elements-transition.js
@@ -25,7 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --smi-only-arrays --notrack-allocation-sites
+// Flags: --allow-natives-syntax --smi-only-arrays
+// Flags: --nostress-opt
support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
@@ -36,14 +37,26 @@ if (support_smi_only_arrays) {
}
if (support_smi_only_arrays) {
+ // This code exists to eliminate the learning influence of AllocationSites
+ // on the following tests.
+ var __sequence = 0;
+ function make_array_string(length) {
+ this.__sequence = this.__sequence + 1;
+ return "/* " + this.__sequence + " */ new Array(" + length + ");";
+ }
+ function make_array(length) {
+ return eval(make_array_string(length));
+ }
+
function test(test_double, test_object, set, length) {
// We apply the same operations to two identical arrays. The first array
// triggers an IC miss, upon which the conversion stub is generated, but the
// actual conversion is done in runtime. The second array, arriving at
// the previously patched IC, is then converted using the conversion stub.
- var array_1 = new Array(length);
- var array_2 = new Array(length);
+ var array_1 = make_array(length);
+ var array_2 = make_array(length);
+ // false, true, nice setter function, 20
assertTrue(%HasFastSmiElements(array_1));
assertTrue(%HasFastSmiElements(array_2));
for (var i = 0; i < length; i++) {
@@ -86,15 +99,20 @@ if (support_smi_only_arrays) {
assertEquals(length, array_2.length);
}
- test(false, false, function(a,i,v){ a[i] = v; }, 20);
- test(true, false, function(a,i,v){ a[i] = v; }, 20);
- test(false, true, function(a,i,v){ a[i] = v; }, 20);
- test(true, true, function(a,i,v){ a[i] = v; }, 20);
+ function run_test(test_double, test_object, set, length) {
+ test(test_double, test_object, set, length);
+ %ClearFunctionTypeFeedback(test);
+ }
+
+ run_test(false, false, function(a,i,v){ a[i] = v; }, 20);
+ run_test(true, false, function(a,i,v){ a[i] = v; }, 20);
+ run_test(false, true, function(a,i,v){ a[i] = v; }, 20);
+ run_test(true, true, function(a,i,v){ a[i] = v; }, 20);
- test(false, false, function(a,i,v){ a[i] = v; }, 10000);
- test(true, false, function(a,i,v){ a[i] = v; }, 10000);
- test(false, true, function(a,i,v){ a[i] = v; }, 10000);
- test(true, true, function(a,i,v){ a[i] = v; }, 10000);
+ run_test(false, false, function(a,i,v){ a[i] = v; }, 10000);
+ run_test(true, false, function(a,i,v){ a[i] = v; }, 10000);
+ run_test(false, true, function(a,i,v){ a[i] = v; }, 10000);
+ run_test(true, true, function(a,i,v){ a[i] = v; }, 10000);
// Check COW arrays
function get_cow() { return [1, 2, 3]; }
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-9.js b/deps/v8/test/mjsunit/elide-double-hole-check-9.js
index 88bbc7eaaa..bbcbfb2be6 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-9.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-9.js
@@ -46,4 +46,3 @@ assertEquals(0.5, f(arr, 0));
assertEquals(0.5, f(arr, 0));
do_set = true;
assertEquals(2, f(arr, 1));
-
diff --git a/deps/v8/test/mjsunit/enumeration-order.js b/deps/v8/test/mjsunit/enumeration-order.js
index a328121d73..70942ee13c 100644
--- a/deps/v8/test/mjsunit/enumeration-order.js
+++ b/deps/v8/test/mjsunit/enumeration-order.js
@@ -103,7 +103,3 @@ var expected = ['23', '42', // indexed from 'o'
var actual = [];
for (var p in o) actual.push(p);
assertArrayEquals(expected, actual);
-
-
-
-
diff --git a/deps/v8/test/mjsunit/error-accessors.js b/deps/v8/test/mjsunit/error-accessors.js
index 9581050240..cdaf080a3c 100644
--- a/deps/v8/test/mjsunit/error-accessors.js
+++ b/deps/v8/test/mjsunit/error-accessors.js
@@ -51,4 +51,3 @@ assertEquals("x is not defined",
o.message = "another message";
assertEquals("another message", o.message);
assertEquals("x is not defined", error2.message);
-
diff --git a/deps/v8/test/mjsunit/error-tostring-omit.js b/deps/v8/test/mjsunit/error-tostring-omit.js
new file mode 100644
index 0000000000..111adfc212
--- /dev/null
+++ b/deps/v8/test/mjsunit/error-tostring-omit.js
@@ -0,0 +1,63 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function veryLongString() {
+ return "Lorem ipsum dolor sit amet, consectetur adipiscing elit." +
+ "Nam vulputate metus est. Maecenas quis pellentesque eros," +
+ "ac mattis augue. Nam porta purus vitae tincidunt blandit." +
+ "Aliquam lacus dui, blandit id consectetur id, hendrerit ut" +
+ "felis. Class aptent taciti sociosqu ad litora torquent per" +
+ "conubia nostra, per inceptos himenaeos. Ut posuere eros et" +
+ "tempus luctus. Nullam condimentum aliquam odio, at dignissim" +
+ "augue tincidunt in. Nam mattis vitae mauris eget dictum." +
+ "Nam accumsan dignissim turpis a turpis duis.";
+}
+
+
+var re = /omitted/;
+
+try {
+ veryLongString.nonexistentMethod();
+} catch (e) {
+ assertTrue(e.message.length < 350);
+ // TODO(verwaest): Proper error message.
+ // assertTrue(re.test(e.message));
+}
+
+try {
+ veryLongString().nonexistentMethod();
+} catch (e) {
+ assertTrue(e.message.length < 350);
+ // TODO(verwaest): Proper error message.
+ // assertTrue(re.test(e.message));
+}
+
+try {
+ throw Error(veryLongString());
+} catch (e) {
+ assertEquals(veryLongString(), e.message);
+}
diff --git a/deps/v8/test/mjsunit/eval-stack-trace.js b/deps/v8/test/mjsunit/eval-stack-trace.js
index d83b84c16f..f95e50fa38 100644
--- a/deps/v8/test/mjsunit/eval-stack-trace.js
+++ b/deps/v8/test/mjsunit/eval-stack-trace.js
@@ -201,4 +201,3 @@ try {
[true, false].verifyUndefined(frames, "getFileName");
["eval at <anonymous> ("].verifyContains(frames, "getEvalOrigin");
}
-
diff --git a/deps/v8/test/mjsunit/extra-arguments.js b/deps/v8/test/mjsunit/extra-arguments.js
index 186277a006..29063672ee 100644
--- a/deps/v8/test/mjsunit/extra-arguments.js
+++ b/deps/v8/test/mjsunit/extra-arguments.js
@@ -50,5 +50,3 @@ for (var i = 0; i < 25; i++) {
}
assertEquals(expected, f.apply(null, array), String(i));
}
-
-
diff --git a/deps/v8/test/mjsunit/fast-array-length.js b/deps/v8/test/mjsunit/fast-array-length.js
index 42f2c38f49..3917d97f2c 100644
--- a/deps/v8/test/mjsunit/fast-array-length.js
+++ b/deps/v8/test/mjsunit/fast-array-length.js
@@ -34,4 +34,3 @@ var a = [0, 1, 2, 3, 4, 5];
assertTrue(%HasFastSmiElements(a));
a.length = (1 << 30);
assertFalse(%HasFastSmiElements(a));
-
diff --git a/deps/v8/test/mjsunit/fast-literal.js b/deps/v8/test/mjsunit/fast-literal.js
index 822d90656b..4fd92c4d3a 100644
--- a/deps/v8/test/mjsunit/fast-literal.js
+++ b/deps/v8/test/mjsunit/fast-literal.js
@@ -27,16 +27,13 @@
// Flags: --allow-natives-syntax --no-inline-new --nouse-allocation-folding
-%SetAllocationTimeout(10, 0);
+%SetAllocationTimeout(20, 0);
function f() {
return [[1, 2, 3], [1.1, 1.2, 1.3], [[], [], []]];
}
f(); f(); f();
%OptimizeFunctionOnNextCall(f);
-for (var i=0; i<1000; i++) {
+for (var i=0; i<50; i++) {
f();
}
-
-
-
diff --git a/deps/v8/test/mjsunit/fast-prototype.js b/deps/v8/test/mjsunit/fast-prototype.js
index d700c3c3cc..cdcc1a9ed6 100644
--- a/deps/v8/test/mjsunit/fast-prototype.js
+++ b/deps/v8/test/mjsunit/fast-prototype.js
@@ -26,9 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc
-
-// TODO(mstarzinger): This test does not succeed when GCs happen in
-// between prototype transitions, we disable GC stress for now.
// Flags: --noincremental-marking
// Check that objects that are used for prototypes are in the fast mode.
diff --git a/deps/v8/test/mjsunit/fun-name.js b/deps/v8/test/mjsunit/fun-name.js
index 676daaa133..1688438ac8 100644
--- a/deps/v8/test/mjsunit/fun-name.js
+++ b/deps/v8/test/mjsunit/fun-name.js
@@ -31,4 +31,3 @@ function strip(s) {
assertEquals('function(){}', strip((function () { }).toString()));
assertEquals('functionanonymous(){}', strip(new Function().toString()));
-
diff --git a/deps/v8/test/mjsunit/function-arguments-duplicate.js b/deps/v8/test/mjsunit/function-arguments-duplicate.js
new file mode 100644
index 0000000000..80f03a106b
--- /dev/null
+++ b/deps/v8/test/mjsunit/function-arguments-duplicate.js
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Execises ArgumentsAccessStub::GenerateNewNonStrictSlow.
+
+function f(a, a) {
+ assertEquals(2, a);
+ assertEquals(1, arguments[0]);
+ assertEquals(2, arguments[1]);
+}
+
+f(1, 2);
diff --git a/deps/v8/test/mjsunit/function.js b/deps/v8/test/mjsunit/function.js
index b5e83dba6e..5c33762b3e 100644
--- a/deps/v8/test/mjsunit/function.js
+++ b/deps/v8/test/mjsunit/function.js
@@ -80,4 +80,3 @@ assertEquals(42, f(2, 21));
f = new Function(x, y, z);
assertEquals(25, f(5, 5));
assertEquals(42, f(2, 21));
-
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part1.js b/deps/v8/test/mjsunit/fuzz-natives-part1.js
index e76b9be6d4..e22ac49472 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part1.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part1.js
@@ -116,6 +116,7 @@ function testArgumentTypes(name, argc) {
var knownProblems = {
"Abort": true,
+ "ThrowMessage": true,
// Avoid calling the concat operation, because weird lengths
// may lead to out-of-memory. Ditto for StringBuilderJoin.
@@ -148,9 +149,9 @@ var knownProblems = {
"PushCatchContext": true,
"PushBlockContext": true,
"PushModuleContext": true,
- "LazyCompile": true,
- "LazyRecompile": true,
- "ConcurrentRecompile": true,
+ "CompileUnoptimized": true,
+ "CompileOptimized": true,
+ "CompileOptimizedConcurrent": true,
"NotifyDeoptimized": true,
"NotifyStubFailure": true,
"NotifyOSR": true,
@@ -170,6 +171,7 @@ var knownProblems = {
// Vararg with minimum number > 0.
"Call": true,
+ "SetAllocationTimeout": true,
// Requires integer arguments to be non-negative.
"Apply": true,
@@ -203,11 +205,15 @@ var knownProblems = {
"_OneByteSeqStringSetChar": true,
"_TwoByteSeqStringSetChar": true,
+ // Only applicable to TypedArrays.
+ "TypedArrayInitialize": true,
+
// Only applicable to generators.
"_GeneratorNext": true,
"_GeneratorThrow": true,
// Only applicable to DataViews.
+ "DataViewInitialize": true,
"DataViewGetBuffer": true,
"DataViewGetByteLength": true,
"DataViewGetByteOffset": true
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part2.js b/deps/v8/test/mjsunit/fuzz-natives-part2.js
index 0797deb18d..293ad7e524 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part2.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part2.js
@@ -116,6 +116,7 @@ function testArgumentTypes(name, argc) {
var knownProblems = {
"Abort": true,
+ "ThrowMessage": true,
// Avoid calling the concat operation, because weird lengths
// may lead to out-of-memory. Ditto for StringBuilderJoin.
@@ -148,9 +149,9 @@ var knownProblems = {
"PushCatchContext": true,
"PushBlockContext": true,
"PushModuleContext": true,
- "LazyCompile": true,
- "LazyRecompile": true,
- "ConcurrentRecompile": true,
+ "CompileUnoptimized": true,
+ "CompileOptimized": true,
+ "CompileOptimizedConcurrent": true,
"NotifyDeoptimized": true,
"NotifyStubFailure": true,
"NotifyOSR": true,
@@ -171,6 +172,7 @@ var knownProblems = {
// Vararg with minimum number > 0.
"Call": true,
+ "SetAllocationTimeout": true,
// Requires integer arguments to be non-negative.
"Apply": true,
@@ -204,11 +206,15 @@ var knownProblems = {
"_OneByteSeqStringSetChar": true,
"_TwoByteSeqStringSetChar": true,
+ // Only applicable to TypedArrays.
+ "TypedArrayInitialize": true,
+
// Only applicable to generators.
"_GeneratorNext": true,
"_GeneratorThrow": true,
// Only applicable to DataViews.
+ "DataViewInitialize": true,
"DataViewGetBuffer": true,
"DataViewGetByteLength": true,
"DataViewGetByteOffset": true
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part3.js b/deps/v8/test/mjsunit/fuzz-natives-part3.js
index 9a3a883fe4..b3a1fb610c 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part3.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part3.js
@@ -116,6 +116,7 @@ function testArgumentTypes(name, argc) {
var knownProblems = {
"Abort": true,
+ "ThrowMessage": true,
// Avoid calling the concat operation, because weird lengths
// may lead to out-of-memory. Ditto for StringBuilderJoin.
@@ -148,9 +149,9 @@ var knownProblems = {
"PushCatchContext": true,
"PushBlockContext": true,
"PushModuleContext": true,
- "LazyCompile": true,
- "LazyRecompile": true,
- "ConcurrentRecompile": true,
+ "CompileUnoptimized": true,
+ "CompileOptimized": true,
+ "CompileOptimizedConcurrent": true,
"NotifyDeoptimized": true,
"NotifyStubFailure": true,
"NotifyOSR": true,
@@ -170,6 +171,7 @@ var knownProblems = {
// Vararg with minimum number > 0.
"Call": true,
+ "SetAllocationTimeout": true,
// Requires integer arguments to be non-negative.
"Apply": true,
@@ -203,14 +205,21 @@ var knownProblems = {
"_OneByteSeqStringSetChar": true,
"_TwoByteSeqStringSetChar": true,
+ // Only applicable to TypedArrays.
+ "TypedArrayInitialize": true,
+
// Only applicable to generators.
"_GeneratorNext": true,
"_GeneratorThrow": true,
// Only applicable to DataViews.
+ "DataViewInitialize":true,
"DataViewGetBuffer": true,
"DataViewGetByteLength": true,
- "DataViewGetByteOffset": true
+ "DataViewGetByteOffset": true,
+
+ // Only ever called internally.
+ "RunMicrotasks": true
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part4.js b/deps/v8/test/mjsunit/fuzz-natives-part4.js
index 83e00d2b66..5f1f912063 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part4.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part4.js
@@ -116,6 +116,7 @@ function testArgumentTypes(name, argc) {
var knownProblems = {
"Abort": true,
+ "ThrowMessage": true,
// Avoid calling the concat operation, because weird lengths
// may lead to out-of-memory. Ditto for StringBuilderJoin.
@@ -148,9 +149,9 @@ var knownProblems = {
"PushCatchContext": true,
"PushBlockContext": true,
"PushModuleContext": true,
- "LazyCompile": true,
- "LazyRecompile": true,
- "ConcurrentRecompile": true,
+ "CompileUnoptimized": true,
+ "CompileOptimized": true,
+ "CompileOptimizedConcurrent": true,
"NotifyDeoptimized": true,
"NotifyStubFailure": true,
"NotifyOSR": true,
@@ -170,6 +171,7 @@ var knownProblems = {
// Vararg with minimum number > 0.
"Call": true,
+ "SetAllocationTimeout": true,
// Requires integer arguments to be non-negative.
"Apply": true,
@@ -203,11 +205,15 @@ var knownProblems = {
"_OneByteSeqStringSetChar": true,
"_TwoByteSeqStringSetChar": true,
+ // Only applicable to TypedArrays.
+ "TypedArrayInitialize": true,
+
// Only applicable to generators.
"_GeneratorNext": true,
"_GeneratorThrow": true,
// Only applicable to DataViews.
+ "DataViewInitialize": true,
"DataViewGetBuffer": true,
"DataViewGetByteLength": true,
"DataViewGetByteOffset": true
diff --git a/deps/v8/test/mjsunit/get-prototype-of.js b/deps/v8/test/mjsunit/get-prototype-of.js
index 6475bde651..c2a492a3cf 100644
--- a/deps/v8/test/mjsunit/get-prototype-of.js
+++ b/deps/v8/test/mjsunit/get-prototype-of.js
@@ -65,4 +65,3 @@ GetPrototypeOfObject(y);
GetPrototypeOfObject({x:5});
GetPrototypeOfObject(F);
GetPrototypeOfObject(RegExp);
-
diff --git a/deps/v8/test/mjsunit/getter-in-value-prototype.js b/deps/v8/test/mjsunit/getter-in-value-prototype.js
index abe2cb1934..835710604b 100644
--- a/deps/v8/test/mjsunit/getter-in-value-prototype.js
+++ b/deps/v8/test/mjsunit/getter-in-value-prototype.js
@@ -32,4 +32,3 @@
String.prototype.__defineGetter__('x', function() { return this; });
assertEquals(Object('asdf'), 'asdf'.x);
-
diff --git a/deps/v8/test/mjsunit/getters-on-elements.js b/deps/v8/test/mjsunit/getters-on-elements.js
new file mode 100644
index 0000000000..3bc360f143
--- /dev/null
+++ b/deps/v8/test/mjsunit/getters-on-elements.js
@@ -0,0 +1,221 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --max-opt-count=100 --noalways-opt
+// Flags: --nocollect-maps
+
+// We specify max-opt-count because we opt/deopt the same function many
+// times.
+
+// We specify nocollect-maps because in gcstress we can end up deoptimizing
+// a function in a gc in the stack guard at the beginning of the (optimized)
+// function due to leftover map clearing work that results in deoptimizing
+// dependent code from those maps. The choice is to insert strategic gc()
+// calls or specify this flag.
+
+// It's nice to run this in other browsers too.
+var standalone = false;
+if (standalone) {
+ assertTrue = function(val) {
+ if (val != true) {
+ print("FAILURE");
+ }
+ }
+
+ assertFalse = function(val) {
+ if (val != false) {
+ print("FAILURE");
+ }
+ }
+
+ assertEquals = function(expected, val) {
+ if (expected !== val) {
+ print("FAILURE");
+ }
+ }
+
+ empty_func = function(name) { }
+ assertUnoptimized = empty_func;
+ assertOptimized = empty_func;
+
+ optimize = empty_func;
+ clearFunctionTypeFeedback = empty_func;
+ deoptimizeFunction = empty_func;
+} else {
+ optimize = function(name) {
+ %OptimizeFunctionOnNextCall(name);
+ }
+ clearFunctionTypeFeedback = function(name) {
+ %ClearFunctionTypeFeedback(name);
+ }
+ deoptimizeFunction = function(name) {
+ %DeoptimizeFunction(name);
+ }
+}
+
+function base_getter_test(create_func) {
+ var calls = 0;
+
+ // Testcase: setter in prototype chain
+ foo = function(a) { var x = a[0]; return x + 3; }
+ var a = create_func();
+ var ap = [];
+ ap.__defineGetter__(0, function() { calls++; return 0; });
+
+ foo(a);
+ foo(a);
+ foo(a);
+ delete a[0];
+
+ assertEquals(0, calls);
+ a.__proto__ = ap;
+ foo(a);
+ assertEquals(1, calls);
+ optimize(foo);
+ foo(a);
+ assertEquals(2, calls);
+ assertOptimized(foo);
+
+ // Testcase: getter "deep" in prototype chain.
+ clearFunctionTypeFeedback(foo);
+ deoptimizeFunction(foo);
+ clearFunctionTypeFeedback(foo);
+ calls = 0;
+
+ a = create_func();
+ var ap2 = [];
+ a.__proto__ = ap2;
+ foo(a);
+ foo(a);
+ foo(a);
+ delete a[0];
+
+ assertEquals(0, calls);
+
+ ap2.__proto__ = ap; // "sneak" in a callback.
+ // The sneak case should be caught by unoptimized code too.
+ assertUnoptimized(foo);
+ foo(a);
+ foo(a);
+ foo(a);
+ assertEquals(3, calls);
+
+ // Testcase: getter added after optimization (feedback is monomorphic)
+ clearFunctionTypeFeedback(foo);
+ deoptimizeFunction(foo);
+ clearFunctionTypeFeedback(foo);
+ calls = 0;
+
+ a = create_func();
+ ap2 = [];
+ a.__proto__ = ap2;
+ foo(a);
+ foo(a);
+ foo(a);
+ optimize(foo);
+ foo(a);
+ assertOptimized(foo);
+ delete a[0];
+ ap2.__proto__ = ap;
+ foo(a);
+ assertOptimized(foo); // getters don't require deopt on shape change.
+ assertEquals(1, calls);
+
+ // Testcase: adding additional getters to a prototype chain that already has
+ // one shouldn't deopt anything.
+ clearFunctionTypeFeedback(foo);
+ calls = 0;
+
+ a = create_func();
+ a.__proto__ = ap2;
+ bar = function(a) { return a[3] + 600; }
+ bar(a);
+ bar(a);
+ bar(a);
+ optimize(bar);
+ bar(a);
+ assertOptimized(bar);
+ assertEquals(0, calls);
+ delete a[3];
+ ap2.__defineGetter__(3, function() { calls++; return 0; });
+ bar(a);
+ assertOptimized(bar);
+ assertEquals(1, calls);
+}
+
+// Verify that map transitions don't confuse us.
+create_func_smi = function() { return [,,,,,,5]; }
+create_func_double = function() { return [,,,,,,5.5]; }
+create_func_fast = function() { return [,,,,,,true]; }
+
+var cf = [create_func_smi,
+ create_func_double,
+ create_func_fast];
+
+for(var c = 0; c < 3; c++) {
+ base_getter_test(cf[c]);
+}
+
+// A special test for LoadKeyedHoleMode. Ensure that optimized is generated
+// which sets ALLOW_RETURN_HOLE, then add a setter on the prototype that should
+// cause the function to deoptimize.
+
+var a = [3.5,,,3.5];
+fun = function(a) { return a[0] + 5.5; }
+fun(a);
+fun(a);
+fun(a); // should have a monomorphic KeyedLoadIC.
+optimize(fun);
+fun(a);
+assertOptimized(fun);
+
+// returning undefined shouldn't phase us.
+delete a[0];
+fun(a);
+assertOptimized(fun);
+
+// but messing up the prototype chain will.
+a.__proto__ = [];
+fun(a);
+assertUnoptimized(fun);
+
+// Construct a non-trivial prototype chain.
+var a = [3.5,,,,3.5];
+var ap = [,,3.5];
+ap.__proto__ = a.__proto__;
+a.__proto__ = ap;
+fun(a);
+optimize(fun);
+fun(a);
+assertOptimized(fun);
+
+var calls = 0;
+delete a[0];
+ap.__defineGetter__(0, function() { calls++; return 0; });
+fun(a);
+assertEquals(1, calls);
+assertUnoptimized(fun);
diff --git a/deps/v8/test/mjsunit/global-load-from-eval-in-with.js b/deps/v8/test/mjsunit/global-load-from-eval-in-with.js
index d733f6c8d4..a41ad7ee00 100644
--- a/deps/v8/test/mjsunit/global-load-from-eval-in-with.js
+++ b/deps/v8/test/mjsunit/global-load-from-eval-in-with.js
@@ -56,4 +56,3 @@ test({ y: 42 }, "function f() { eval('1'); assertEquals(27, x) }; f();");
// in the eval scopes. Deeper nesting this time.
test({ x: 42 }, "function f() { function g() { eval('1'); assertEquals(42, x) }; g() }; f();");
test({ y: 42 }, "function f() { function g() { eval('1'); assertEquals(27, x) }; g() }; f();");
-
diff --git a/deps/v8/test/mjsunit/global-load-from-nested-eval.js b/deps/v8/test/mjsunit/global-load-from-nested-eval.js
index 3c7ff75691..9c9b015aea 100644
--- a/deps/v8/test/mjsunit/global-load-from-nested-eval.js
+++ b/deps/v8/test/mjsunit/global-load-from-nested-eval.js
@@ -59,8 +59,3 @@ function testEvalDontShadow(source) {
eval('eval(' + source +')');
}
testEvalDontShadow('assertEquals(42, x)');
-
-
-
-
-
diff --git a/deps/v8/test/mjsunit/harmony/array-find.js b/deps/v8/test/mjsunit/harmony/array-find.js
index 906c9cde7b..9f5750eca0 100644
--- a/deps/v8/test/mjsunit/harmony/array-find.js
+++ b/deps/v8/test/mjsunit/harmony/array-find.js
@@ -277,4 +277,4 @@ assertThrows('Array.prototype.find.apply({}, false, [])', TypeError);
assertThrows('Array.prototype.find.apply({}, "", [])', TypeError);
assertThrows('Array.prototype.find.apply({}, {}, [])', TypeError);
assertThrows('Array.prototype.find.apply({}, [], [])', TypeError);
-assertThrows('Array.prototype.find.apply({}, /\d+/, [])', TypeError); \ No newline at end of file
+assertThrows('Array.prototype.find.apply({}, /\d+/, [])', TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/array-findindex.js b/deps/v8/test/mjsunit/harmony/array-findindex.js
index 928cad79e4..a33849dab3 100644
--- a/deps/v8/test/mjsunit/harmony/array-findindex.js
+++ b/deps/v8/test/mjsunit/harmony/array-findindex.js
@@ -277,4 +277,4 @@ assertThrows('Array.prototype.findIndex.apply({}, false, [])', TypeError);
assertThrows('Array.prototype.findIndex.apply({}, "", [])', TypeError);
assertThrows('Array.prototype.findIndex.apply({}, {}, [])', TypeError);
assertThrows('Array.prototype.findIndex.apply({}, [], [])', TypeError);
-assertThrows('Array.prototype.findIndex.apply({}, /\d+/, [])', TypeError); \ No newline at end of file
+assertThrows('Array.prototype.findIndex.apply({}, /\d+/, [])', TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/collections.js b/deps/v8/test/mjsunit/harmony/collections.js
index 174d3d1dc7..7e95b9e110 100644
--- a/deps/v8/test/mjsunit/harmony/collections.js
+++ b/deps/v8/test/mjsunit/harmony/collections.js
@@ -483,4 +483,26 @@ for (var i = 9; i >= 0; i--) {
assertTrue(w.has(k));
w.clear();
assertFalse(w.has(k));
-})(); \ No newline at end of file
+})();
+
+
+(function TestMinusZeroSet() {
+ var m = new Set();
+ m.add(0);
+ m.add(-0);
+ assertEquals(1, m.size);
+ assertTrue(m.has(0));
+ assertTrue(m.has(-0));
+})();
+
+
+(function TestMinusZeroMap() {
+ var m = new Map();
+ m.set(0, 'plus');
+ m.set(-0, 'minus');
+ assertEquals(1, m.size);
+ assertTrue(m.has(0));
+ assertTrue(m.has(-0));
+ assertEquals('minus', m.get(0));
+ assertEquals('minus', m.get(-0));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/dataview-accessors.js b/deps/v8/test/mjsunit/harmony/dataview-accessors.js
index 7b03da7089..c54f8cc20d 100644
--- a/deps/v8/test/mjsunit/harmony/dataview-accessors.js
+++ b/deps/v8/test/mjsunit/harmony/dataview-accessors.js
@@ -114,11 +114,13 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
test(isTestingGet, "Int8", undefined, 0);
test(isTestingGet, "Int8", 8, -128);
test(isTestingGet, "Int8", 15, -1);
+ test(isTestingGet, "Int8", 1e12, undefined);
test(isTestingGet, "Uint8", 0, 0);
test(isTestingGet, "Uint8", undefined, 0);
test(isTestingGet, "Uint8", 8, 128);
test(isTestingGet, "Uint8", 15, 255);
+ test(isTestingGet, "Uint8", 1e12, undefined);
// Little endian.
test(isTestingGet, "Int16", 0, 256, true);
@@ -126,6 +128,7 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
test(isTestingGet, "Int16", 5, 26213, true);
test(isTestingGet, "Int16", 9, -32127, true);
test(isTestingGet, "Int16", 14, -2, true);
+ test(isTestingGet, "Int16", 1e12, undefined, true);
// Big endian.
test(isTestingGet, "Int16", 0, 1);
@@ -133,6 +136,7 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
test(isTestingGet, "Int16", 5, 25958);
test(isTestingGet, "Int16", 9, -32382);
test(isTestingGet, "Int16", 14, -257);
+ test(isTestingGet, "Int16", 1e12, undefined);
// Little endian.
test(isTestingGet, "Uint16", 0, 256, true);
@@ -140,6 +144,7 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
test(isTestingGet, "Uint16", 5, 26213, true);
test(isTestingGet, "Uint16", 9, 33409, true);
test(isTestingGet, "Uint16", 14, 65534, true);
+ test(isTestingGet, "Uint16", 1e12, undefined, true);
// Big endian.
test(isTestingGet, "Uint16", 0, 1);
@@ -147,6 +152,7 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
test(isTestingGet, "Uint16", 5, 25958);
test(isTestingGet, "Uint16", 9, 33154);
test(isTestingGet, "Uint16", 14, 65279);
+ test(isTestingGet, "Uint16", 1e12, undefined);
// Little endian.
test(isTestingGet, "Int32", 0, 50462976, true);
@@ -155,6 +161,7 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
test(isTestingGet, "Int32", 6, -2122291354, true);
test(isTestingGet, "Int32", 9, -58490239, true);
test(isTestingGet, "Int32", 12,-66052, true);
+ test(isTestingGet, "Int32", 1e12, undefined, true);
// Big endian.
test(isTestingGet, "Int32", 0, 66051);
@@ -163,6 +170,7 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
test(isTestingGet, "Int32", 6, 1718059137);
test(isTestingGet, "Int32", 9, -2122152964);
test(isTestingGet, "Int32", 12, -50462977);
+ test(isTestingGet, "Int32", 1e12, undefined);
// Little endian.
test(isTestingGet, "Uint32", 0, 50462976, true);
@@ -171,6 +179,7 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
test(isTestingGet, "Uint32", 6, 2172675942, true);
test(isTestingGet, "Uint32", 9, 4236477057, true);
test(isTestingGet, "Uint32", 12,4294901244, true);
+ test(isTestingGet, "Uint32", 1e12, undefined, true);
// Big endian.
test(isTestingGet, "Uint32", 0, 66051);
@@ -179,6 +188,7 @@ function runIntegerTestCases(isTestingGet, array, start, length) {
test(isTestingGet, "Uint32", 6, 1718059137);
test(isTestingGet, "Uint32", 9, 2172814332);
test(isTestingGet, "Uint32", 12, 4244504319);
+ test(isTestingGet, "Uint32", 1e12, undefined);
}
function testFloat(isTestingGet, func, array, start, expected) {
@@ -192,6 +202,7 @@ function testFloat(isTestingGet, func, array, start, expected) {
test(isTestingGet, func, 7, expected, true);
createDataView(array, 10, true, start);
test(isTestingGet, func, 10, expected, true);
+ test(isTestingGet, func, 1e12, undefined, true);
// Big endian.
createDataView(array, 0, false);
@@ -203,6 +214,7 @@ function testFloat(isTestingGet, func, array, start, expected) {
test(isTestingGet, func, 7, expected, false);
createDataView(array, 10, false);
test(isTestingGet, func, 10, expected, false);
+ test(isTestingGet, func, 1e12, undefined, false);
}
function runFloatTestCases(isTestingGet, start) {
diff --git a/deps/v8/test/mjsunit/harmony/generators-iteration.js b/deps/v8/test/mjsunit/harmony/generators-iteration.js
index 7fad97e944..d86a20f9e7 100644
--- a/deps/v8/test/mjsunit/harmony/generators-iteration.js
+++ b/deps/v8/test/mjsunit/harmony/generators-iteration.js
@@ -35,6 +35,18 @@ function assertIteratorResult(value, done, result) {
assertEquals({ value: value, done: done}, result);
}
+function assertIteratorIsClosed(iter) {
+ assertIteratorResult(undefined, true, iter.next());
+ assertDoesNotThrow(function() { iter.next(); });
+}
+
+function assertThrownIteratorIsClosed(iter) {
+ // TODO(yusukesuzuki): Since status of a thrown generator is "executing",
+ // following tests are failed.
+ // https://code.google.com/p/v8/issues/detail?id=3096
+ // assertIteratorIsClosed(iter);
+}
+
function TestGeneratorResultPrototype() {
function* g() { yield 1; }
var iter = g();
@@ -53,11 +65,12 @@ function TestGenerator(g, expected_values_for_next,
function testNext(thunk) {
var iter = thunk();
for (var i = 0; i < expected_values_for_next.length; i++) {
- assertIteratorResult(expected_values_for_next[i],
- i == expected_values_for_next.length - 1,
- iter.next());
+ var v1 = expected_values_for_next[i];
+ var v2 = i == expected_values_for_next.length - 1;
+ // var v3 = iter.next();
+ assertIteratorResult(v1, v2, iter.next());
}
- assertThrows(function() { iter.next(); }, Error);
+ assertIteratorIsClosed(iter);
}
function testSend(thunk) {
var iter = thunk();
@@ -66,7 +79,7 @@ function TestGenerator(g, expected_values_for_next,
i == expected_values_for_send.length - 1,
iter.next(send_val));
}
- assertThrows(function() { iter.next(send_val); }, Error);
+ assertIteratorIsClosed(iter);
}
function testThrow(thunk) {
for (var i = 0; i < expected_values_for_next.length; i++) {
@@ -78,7 +91,7 @@ function TestGenerator(g, expected_values_for_next,
}
function Sentinel() {}
assertThrows(function () { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function () { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
}
@@ -393,21 +406,20 @@ function TestTryCatch(instantiate) {
assertIteratorResult(1, false, iter.next());
assertIteratorResult(2, false, iter.next());
assertIteratorResult(3, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
+ assertIteratorIsClosed(iter);
}
Test1(instantiate(g));
function Test2(iter) {
assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
Test2(instantiate(g));
function Test3(iter) {
assertIteratorResult(1, false, iter.next());
assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
Test3(instantiate(g));
@@ -417,8 +429,7 @@ function TestTryCatch(instantiate) {
var exn = new Sentinel;
assertIteratorResult(exn, false, iter.throw(exn));
assertIteratorResult(3, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
+ assertIteratorIsClosed(iter);
}
Test4(instantiate(g));
@@ -429,8 +440,7 @@ function TestTryCatch(instantiate) {
assertIteratorResult(exn, false, iter.throw(exn));
assertIteratorResult(3, false, iter.next());
assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
+ assertThrownIteratorIsClosed(iter);
}
Test5(instantiate(g));
@@ -440,7 +450,7 @@ function TestTryCatch(instantiate) {
var exn = new Sentinel;
assertIteratorResult(exn, false, iter.throw(exn));
assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
Test6(instantiate(g));
@@ -448,8 +458,7 @@ function TestTryCatch(instantiate) {
assertIteratorResult(1, false, iter.next());
assertIteratorResult(2, false, iter.next());
assertIteratorResult(3, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
+ assertIteratorIsClosed(iter);
}
Test7(instantiate(g));
}
@@ -466,21 +475,20 @@ function TestTryFinally(instantiate) {
assertIteratorResult(2, false, iter.next());
assertIteratorResult(3, false, iter.next());
assertIteratorResult(4, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
+ assertIteratorIsClosed(iter);
}
Test1(instantiate(g));
function Test2(iter) {
assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
Test2(instantiate(g));
function Test3(iter) {
assertIteratorResult(1, false, iter.next());
assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
Test3(instantiate(g));
@@ -489,8 +497,7 @@ function TestTryFinally(instantiate) {
assertIteratorResult(2, false, iter.next());
assertIteratorResult(3, false, iter.throw(new Sentinel));
assertThrows(function() { iter.next(); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
-
+ assertThrownIteratorIsClosed(iter);
}
Test4(instantiate(g));
@@ -499,7 +506,7 @@ function TestTryFinally(instantiate) {
assertIteratorResult(2, false, iter.next());
assertIteratorResult(3, false, iter.throw(new Sentinel));
assertThrows(function() { iter.throw(new Sentinel2); }, Sentinel2);
- assertThrows(function() { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
Test5(instantiate(g));
@@ -508,7 +515,7 @@ function TestTryFinally(instantiate) {
assertIteratorResult(2, false, iter.next());
assertIteratorResult(3, false, iter.next());
assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
Test6(instantiate(g));
@@ -518,7 +525,7 @@ function TestTryFinally(instantiate) {
assertIteratorResult(3, false, iter.next());
assertIteratorResult(4, false, iter.next());
assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
Test7(instantiate(g));
@@ -527,9 +534,7 @@ function TestTryFinally(instantiate) {
assertIteratorResult(2, false, iter.next());
assertIteratorResult(3, false, iter.next());
assertIteratorResult(4, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
-
+ assertIteratorIsClosed(iter);
}
Test8(instantiate(g));
}
@@ -556,14 +561,13 @@ function TestNestedTry(instantiate) {
assertIteratorResult(3, false, iter.next());
assertIteratorResult(4, false, iter.next());
assertIteratorResult(5, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
+ assertIteratorIsClosed(iter);
}
Test1(instantiate(g));
function Test2(iter) {
assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
Test2(instantiate(g));
@@ -571,7 +575,7 @@ function TestNestedTry(instantiate) {
assertIteratorResult(1, false, iter.next());
assertIteratorResult(4, false, iter.throw(new Sentinel));
assertThrows(function() { iter.next(); }, Sentinel);
- assertThrows(function() { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
Test3(instantiate(g));
@@ -579,7 +583,7 @@ function TestNestedTry(instantiate) {
assertIteratorResult(1, false, iter.next());
assertIteratorResult(4, false, iter.throw(new Sentinel));
assertThrows(function() { iter.throw(new Sentinel2); }, Sentinel2);
- assertThrows(function() { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
Test4(instantiate(g));
@@ -591,9 +595,7 @@ function TestNestedTry(instantiate) {
assertIteratorResult(3, false, iter.next());
assertIteratorResult(4, false, iter.next());
assertIteratorResult(5, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
- assertThrows(function() { iter.next(); }, Error);
-
+ assertIteratorIsClosed(iter);
}
Test5(instantiate(g));
@@ -604,7 +606,7 @@ function TestNestedTry(instantiate) {
assertIteratorResult(exn, false, iter.throw(exn));
assertIteratorResult(4, false, iter.throw(new Sentinel2));
assertThrows(function() { iter.next(); }, Sentinel2);
- assertThrows(function() { iter.next(); }, Error);
+ assertThrownIteratorIsClosed(iter);
}
Test6(instantiate(g));
@@ -616,8 +618,7 @@ function TestNestedTry(instantiate) {
assertIteratorResult(3, false, iter.next());
assertIteratorResult(4, false, iter.throw(new Sentinel2));
assertThrows(function() { iter.next(); }, Sentinel2);
- assertThrows(function() { iter.next(); }, Error);
-
+ assertThrownIteratorIsClosed(iter);
}
Test7(instantiate(g));
diff --git a/deps/v8/test/mjsunit/harmony/iteration-semantics.js b/deps/v8/test/mjsunit/harmony/iteration-semantics.js
index 96b6d1452c..2449115dd4 100644
--- a/deps/v8/test/mjsunit/harmony/iteration-semantics.js
+++ b/deps/v8/test/mjsunit/harmony/iteration-semantics.js
@@ -25,7 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony --harmony-generators
+// Flags: --harmony-iteration
+// Flags: --harmony-generators --harmony-scoping --harmony-proxies
// Test for-of semantics.
diff --git a/deps/v8/test/mjsunit/harmony/math-clz32.js b/deps/v8/test/mjsunit/harmony/math-clz32.js
new file mode 100644
index 0000000000..bc15ad2569
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/math-clz32.js
@@ -0,0 +1,28 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-maths
+
+[NaN, Infinity, -Infinity, 0, -0, "abc", "Infinity", "-Infinity", {}].forEach(
+ function(x) {
+ assertEquals(32, Math.clz32(x));
+ }
+);
+
+function testclz(x) {
+ for (var i = 0; i < 33; i++) {
+ if (x & 0x80000000) return i;
+ x <<= 1;
+ }
+ return 32;
+}
+
+var max = Math.pow(2, 40);
+for (var x = 0; x < max; x = x * 1.01 + 1) {
+ assertEquals(testclz(x), Math.clz32(x));
+ assertEquals(testclz(-x), Math.clz32(-x));
+ assertEquals(testclz(x), Math.clz32({ valueOf: function() { return x; } }));
+ assertEquals(testclz(-x),
+ Math.clz32({ toString: function() { return -x; } }));
+}
diff --git a/deps/v8/test/mjsunit/harmony/math-fround.js b/deps/v8/test/mjsunit/harmony/math-fround.js
new file mode 100644
index 0000000000..ea432ea2de
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/math-fround.js
@@ -0,0 +1,99 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-maths
+
+// Monkey-patch Float32Array.
+Float32Array = function(x) { this[0] = 0; };
+
+assertTrue(isNaN(Math.fround(NaN)));
+assertTrue(isNaN(Math.fround(function() {})));
+assertTrue(isNaN(Math.fround({ toString: function() { return NaN; } })));
+assertTrue(isNaN(Math.fround({ valueOf: function() { return "abc"; } })));
+assertEquals("Infinity", String(1/Math.fround(0)));
+assertEquals("-Infinity", String(1/Math.fround(-0)));
+assertEquals("Infinity", String(Math.fround(Infinity)));
+assertEquals("-Infinity", String(Math.fround(-Infinity)));
+
+assertEquals("Infinity", String(Math.fround(1E200)));
+assertEquals("-Infinity", String(Math.fround(-1E200)));
+assertEquals("Infinity", String(1/Math.fround(1E-300)));
+assertEquals("-Infinity", String(1/Math.fround(-1E-300)));
+
+mantissa_23_shift = Math.pow(2, -23);
+mantissa_29_shift = Math.pow(2, -23-29);
+
+// Javascript implementation of IEEE 754 to test double to single conversion.
+function ieee754float(sign_bit,
+ exponent_bits,
+ mantissa_23_bits,
+ mantissa_29_bits) {
+ this.sign_bit = sign_bit & 1;
+ this.exponent_bits = exponent_bits & ((1 << 11) - 1);
+ this.mantissa_23_bits = mantissa_23_bits & ((1 << 23) - 1);
+ this.mantissa_29_bits = mantissa_29_bits & ((1 << 29) - 1);
+}
+
+ieee754float.prototype.returnSpecial = function() {
+ if (mantissa_23_bits == 0 && mantissa_29_bits == 0) return sign * Infinity;
+ return NaN;
+}
+
+ieee754float.prototype.toDouble = function() {
+ var sign = this.sign_bit ? -1 : 1;
+ var exponent = this.exponent_bits - 1023;
+ if (exponent == -1023) returnSpecial();
+ var mantissa = 1 + this.mantissa_23_bits * mantissa_23_shift +
+ this.mantissa_29_bits * mantissa_29_shift;
+ return sign * Math.pow(2, exponent) * mantissa;
+}
+
+ieee754float.prototype.toSingle = function() {
+ var sign = this.sign_bit ? -1 : 1;
+ var exponent = this.exponent_bits - 1023;
+ if (exponent == -1023) returnSpecial();
+ if (exponent > 127) return sign * Infinity;
+ if (exponent < -126) return this.toSingleSubnormal(sign, exponent);
+ var round = this.mantissa_29_bits >> 28;
+ var mantissa = 1 + (this.mantissa_23_bits + round) * mantissa_23_shift;
+ return sign * Math.pow(2, exponent) * mantissa;
+}
+
+ieee754float.prototype.toSingleSubnormal = function(sign, exponent) {
+ var shift = -126 - exponent;
+ if (shift > 24) return sign * 0;
+ var round_mask = 1 << (shift - 1);
+ var mantissa_23_bits = this.mantissa_23_bits + (1 << 23);
+ var round = ((mantissa_23_bits & round_mask) != 0) | 0;
+ if (round) { // Round to even if tied.
+ var tied_mask = round_mask - 1;
+ var result_last_bit_mask = 1 << shift;
+ var tied = this.mantissa_29_bits == 0 &&
+ (mantissa_23_bits & tied_mask ) == 0;
+ var result_already_even = (mantissa_23_bits & result_last_bit_mask) == 0;
+ if (tied && result_already_even) round = 0;
+ }
+ mantissa_23_bits >>= shift;
+ var mantissa = (mantissa_23_bits + round) * mantissa_23_shift;
+ return sign * Math.pow(2, -126) * mantissa;
+}
+
+
+var pi = new ieee754float(0, 0x400, 0x490fda, 0x14442d18);
+assertEquals(pi.toSingle(), Math.fround(pi.toDouble()));
+
+function fuzz_mantissa(sign, exp, m1inc, m2inc) {
+ for (var m1 = 0; m1 < (1 << 23); m1 += m1inc) {
+ for (var m2 = 0; m2 < (1 << 29); m2 += m2inc) {
+ var float = new ieee754float(sign, exp, m1, m2);
+ assertEquals(float.toSingle(), Math.fround(float.toDouble()));
+ }
+ }
+}
+
+for (var sign = 0; sign < 2; sign++) {
+ for (var exp = 1024 - 170; exp < 1024 + 170; exp++) {
+ fuzz_mantissa(sign, exp, 1337 * exp - sign, 127913 * exp - sign);
+ }
+}
diff --git a/deps/v8/test/mjsunit/harmony/math-hyperbolic.js b/deps/v8/test/mjsunit/harmony/math-hyperbolic.js
new file mode 100644
index 0000000000..c45a19c526
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/math-hyperbolic.js
@@ -0,0 +1,138 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-maths
+
+[Math.sinh, Math.cosh, Math.tanh, Math.asinh, Math.acosh, Math.atanh].
+ forEach(function(fun) {
+ assertTrue(isNaN(fun(NaN)));
+ assertTrue(isNaN(fun("abc")));
+ assertTrue(isNaN(fun({})));
+ assertEquals(fun(0), fun([]));
+ assertTrue(isNaN(fun([1, 1])));
+ assertEquals(fun(1.11), fun({ toString: function() { return "1.11"; } }));
+ assertEquals(fun(-3.1), fun({ toString: function() { return -3.1; } }));
+ assertEquals(fun(-1.1), fun({ valueOf: function() { return "-1.1"; } }));
+ assertEquals(fun(3.11), fun({ valueOf: function() { return 3.11; } }));
+});
+
+
+function test_id(fun, rev, value) {
+ assertEqualsDelta(1, rev(fun(value))/value, 1E-7);
+}
+
+[Math.PI, 2, 5, 1E-5, 0.3].forEach(function(x) {
+ test_id(Math.sinh, Math.asinh, x);
+ test_id(Math.sinh, Math.asinh, -x);
+ test_id(Math.cosh, Math.acosh, x);
+ test_id(Math.tanh, Math.atanh, x);
+ test_id(Math.tanh, Math.atanh, -x);
+});
+
+
+[Math.sinh, Math.asinh, Math.tanh, Math.atanh].forEach(function(fun) {
+ assertEquals("-Infinity", String(1/fun(-0)));
+ assertEquals("Infinity", String(1/fun(0)));
+});
+
+
+[Math.sinh, Math.asinh].forEach(function(fun) {
+ assertEquals("-Infinity", String(fun(-Infinity)));
+ assertEquals("Infinity", String(fun(Infinity)));
+ assertEquals("-Infinity", String(fun("-Infinity")));
+ assertEquals("Infinity", String(fun("Infinity")));
+});
+
+
+assertEquals("Infinity", String(Math.cosh(-Infinity)));
+assertEquals("Infinity", String(Math.cosh(Infinity)));
+assertEquals("Infinity", String(Math.cosh("-Infinity")));
+assertEquals("Infinity", String(Math.cosh("Infinity")));
+
+
+assertEquals("-Infinity", String(Math.atanh(-1)));
+assertEquals("Infinity", String(Math.atanh(1)));
+
+// Math.atanh(x) is NaN for |x| > 1 and NaN
+[1.000000000001, Math.PI, 10000000, 2, Infinity, NaN].forEach(function(x) {
+ assertTrue(isNaN(Math.atanh(-x)));
+ assertTrue(isNaN(Math.atanh(x)));
+});
+
+
+assertEquals(1, Math.tanh(Infinity));
+assertEquals(-1, Math.tanh(-Infinity));
+assertEquals(1, Math.cosh(0));
+assertEquals(1, Math.cosh(-0));
+
+assertEquals(0, Math.acosh(1));
+assertEquals("Infinity", String(Math.acosh(Infinity)));
+
+// Math.acosh(x) is NaN for x < 1
+[0.99999999999, 0.2, -1000, 0, -0].forEach(function(x) {
+ assertTrue(isNaN(Math.acosh(x)));
+});
+
+
+// Some random samples.
+assertEqualsDelta(0.5210953054937, Math.sinh(0.5), 1E-12);
+assertEqualsDelta(74.203210577788, Math.sinh(5), 1E-12);
+assertEqualsDelta(-0.5210953054937, Math.sinh(-0.5), 1E-12);
+assertEqualsDelta(-74.203210577788, Math.sinh(-5), 1E-12);
+
+assertEqualsDelta(1.1276259652063, Math.cosh(0.5), 1E-12);
+assertEqualsDelta(74.209948524787, Math.cosh(5), 1E-12);
+assertEqualsDelta(1.1276259652063, Math.cosh(-0.5), 1E-12);
+assertEqualsDelta(74.209948524787, Math.cosh(-5), 1E-12);
+
+assertEqualsDelta(0.4621171572600, Math.tanh(0.5), 1E-12);
+assertEqualsDelta(0.9999092042625, Math.tanh(5), 1E-12);
+assertEqualsDelta(-0.4621171572600, Math.tanh(-0.5), 1E-12);
+assertEqualsDelta(-0.9999092042625, Math.tanh(-5), 1E-12);
+
+assertEqualsDelta(0.4812118250596, Math.asinh(0.5), 1E-12);
+assertEqualsDelta(2.3124383412727, Math.asinh(5), 1E-12);
+assertEqualsDelta(-0.4812118250596, Math.asinh(-0.5), 1E-12);
+assertEqualsDelta(-2.3124383412727, Math.asinh(-5), 1E-12);
+
+assertEqualsDelta(0.9624236501192, Math.acosh(1.5), 1E-12);
+assertEqualsDelta(2.2924316695612, Math.acosh(5), 1E-12);
+assertEqualsDelta(0.4435682543851, Math.acosh(1.1), 1E-12);
+assertEqualsDelta(1.3169578969248, Math.acosh(2), 1E-12);
+
+assertEqualsDelta(0.5493061443341, Math.atanh(0.5), 1E-12);
+assertEqualsDelta(0.1003353477311, Math.atanh(0.1), 1E-12);
+assertEqualsDelta(-0.5493061443341, Math.atanh(-0.5), 1E-12);
+assertEqualsDelta(-0.1003353477311, Math.atanh(-0.1), 1E-12);
+
+[0, 1E-50, 1E-10, 1E10, 1E50, 1E100, 1E150].forEach(function(x) {
+ assertEqualsDelta(Math.asinh(x), -Math.asinh(-x), 1E-12);
+});
+
+[1-(1E-16), 0, 1E-10, 1E-50].forEach(function(x) {
+ assertEqualsDelta(Math.atanh(x), -Math.atanh(-x), 1E-12);
+});
diff --git a/deps/v8/test/mjsunit/harmony/math-hypot.js b/deps/v8/test/mjsunit/harmony/math-hypot.js
new file mode 100644
index 0000000000..1052627213
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/math-hypot.js
@@ -0,0 +1,94 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-maths
+
+assertTrue(isNaN(Math.hypot({})));
+assertTrue(isNaN(Math.hypot(undefined, 1)));
+assertTrue(isNaN(Math.hypot(1, undefined)));
+assertTrue(isNaN(Math.hypot(Math.hypot, 1)));
+assertEquals(1, Math.hypot(1));
+assertEquals(Math.PI, Math.hypot(Math.PI));
+assertEquals(5, Math.hypot(3, 4));
+assertEquals(13, Math.hypot(3, 4, 12));
+assertEquals(15, Math.hypot(" 2 ",
+ "0x5",
+ { valueOf: function() { return "0xe"; } }));
+assertEquals(17, Math.hypot({ valueOf: function() { return 1; } },
+ { toString: function() { return 12; } },
+ { toString: function() { return "12"; } }));
+
+// Check function length.
+assertEquals(2, Math.hypot.length);
+
+// Check that 0 is returned for no arguments.
+assertEquals(0, Math.hypot());
+
+// Check that Infinity is returned if any of the arguments is +/-Infinity.
+assertEquals("Infinity", String(Math.hypot(NaN, Infinity)));
+assertEquals("Infinity", String(Math.hypot(1, -Infinity, 2)));
+
+// Check that NaN is returned if any argument is NaN and none is +/-Infinity/
+assertTrue(isNaN(Math.hypot(1, 2, NaN)));
+assertTrue(isNaN(Math.hypot(NaN, NaN, 4)));
+
+// Check that +0 is returned if all arguments are +/-0.
+assertEquals("Infinity", String(1/Math.hypot(-0)));
+assertEquals("Infinity", String(1/Math.hypot(0)));
+assertEquals("Infinity", String(1/Math.hypot(-0, -0)));
+assertEquals("Infinity", String(1/Math.hypot(-0, 0)));
+
+// Check that we avoid overflows and underflows.
+assertEqualsDelta(5E300, Math.hypot(3E300, 4E300), 1E285);
+assertEqualsDelta(17E-300, Math.hypot(8E-300, 15E-300), 1E-315);
+assertEqualsDelta(19E300, Math.hypot(6E300, 6E300, 17E300), 1E285);
+
+// Check that we sufficiently account for rounding errors when summing up.
+// For this, we calculate a simple fractal square that recurses in the
+// fourth quarter.
+var fractals = [];
+var edge_length = Math.E * 1E20;
+
+var fractal_length = edge_length;
+while(fractal_length >= 1) {
+ fractal_length *= 0.5;
+ fractals.push(fractal_length);
+ fractals.push(fractal_length);
+ fractals.push(fractal_length);
+}
+
+fractals.push(fractal_length);
+assertEqualsDelta(edge_length, Math.hypot.apply(Math, fractals), 1E-15);
+fractals.reverse();
+assertEqualsDelta(edge_length, Math.hypot.apply(Math, fractals), 1E-15);
+// Also shuffle the array.
+var c = 0;
+function random_sort(a, b) { c++; return (c & 3) - 1.5; }
+fractals.sort(random_sort);
+assertEqualsDelta(edge_length, Math.hypot.apply(Math, fractals), 1E-15);
+fractals.sort(random_sort);
+assertEqualsDelta(edge_length, Math.hypot.apply(Math, fractals), 1E-15);
diff --git a/deps/v8/test/mjsunit/harmony/math-log2-log10.js b/deps/v8/test/mjsunit/harmony/math-log2-log10.js
new file mode 100644
index 0000000000..2ab496012c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/math-log2-log10.js
@@ -0,0 +1,47 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-maths
+
+[Math.log10, Math.log2].forEach( function(fun) {
+ assertTrue(isNaN(fun(NaN)));
+ assertTrue(isNaN(fun(fun)));
+ assertTrue(isNaN(fun({ toString: function() { return NaN; } })));
+ assertTrue(isNaN(fun({ valueOf: function() { return -1; } })));
+ assertTrue(isNaN(fun({ valueOf: function() { return "abc"; } })));
+ assertTrue(isNaN(fun(-0.1)));
+ assertTrue(isNaN(fun(-1)));
+ assertEquals("-Infinity", String(fun(0)));
+ assertEquals("-Infinity", String(fun(-0)));
+ assertEquals(0, fun(1));
+ assertEquals("Infinity", String(fun(Infinity)));
+});
+
+for (var i = -300; i < 300; i += 0.7) {
+ assertEqualsDelta(i, Math.log10(Math.pow(10, i)), 1E-13);
+ assertEqualsDelta(i, Math.log2(Math.pow(2, i)), 1E-13);
+}
diff --git a/deps/v8/test/mjsunit/harmony/microtask-delivery.js b/deps/v8/test/mjsunit/harmony/microtask-delivery.js
new file mode 100644
index 0000000000..566a39d03e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/microtask-delivery.js
@@ -0,0 +1,168 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-observation --harmony-promises --allow-natives-syntax
+
+var ordering = [];
+function reset() {
+ ordering = [];
+}
+
+function assertArrayValues(expected, actual) {
+ assertEquals(expected.length, actual.length);
+ for (var i = 0; i < expected.length; i++) {
+ assertEquals(expected[i], actual[i]);
+ }
+}
+
+function assertOrdering(expected) {
+ %RunMicrotasks();
+ assertArrayValues(expected, ordering);
+}
+
+function newPromise(id, fn) {
+ var r;
+ var t = 1;
+ var promise = new Promise(function(resolve) {
+ r = resolve;
+ if (fn) fn();
+ });
+
+ var next = promise.then(function(value) {
+ ordering.push('p' + id);
+ return value;
+ });
+
+ return {
+ resolve: r,
+ then: function(fn) {
+ next = next.then(function(value) {
+ ordering.push('p' + id + ':' + t++);
+ return fn ? fn(value) : value;
+ });
+
+ return this;
+ }
+ };
+}
+
+function newObserver(id, fn, obj) {
+ var observer = {
+ value: 1,
+ recordCounts: []
+ };
+
+ Object.observe(observer, function(records) {
+ ordering.push('o' + id);
+ observer.recordCounts.push(records.length);
+ if (fn) fn();
+ });
+
+ return observer;
+}
+
+
+(function PromiseThens() {
+ reset();
+
+ var p1 = newPromise(1).then();
+ var p2 = newPromise(2).then();
+
+ p1.resolve();
+ p2.resolve();
+
+ assertOrdering(['p1', 'p2', 'p1:1', 'p2:1']);
+})();
+
+
+(function ObserversBatch() {
+ reset();
+
+ var p1 = newPromise(1);
+ var p2 = newPromise(2);
+ var p3 = newPromise(3);
+
+ var ob1 = newObserver(1);
+ var ob2 = newObserver(2, function() {
+ ob3.value++;
+ p3.resolve();
+ ob1.value++;
+ });
+ var ob3 = newObserver(3);
+
+ p1.resolve();
+ ob1.value++;
+ p2.resolve();
+ ob2.value++;
+
+ assertOrdering(['p1', 'o1', 'o2', 'p2', 'o1', 'o3', 'p3']);
+ assertArrayValues([1, 1], ob1.recordCounts);
+ assertArrayValues([1], ob2.recordCounts);
+ assertArrayValues([1], ob3.recordCounts);
+})();
+
+
+(function ObserversGetAllRecords() {
+ reset();
+
+ var p1 = newPromise(1);
+ var p2 = newPromise(2);
+ var ob1 = newObserver(1, function() {
+ ob2.value++;
+ });
+ var ob2 = newObserver(2);
+
+ p1.resolve();
+ ob1.value++;
+ p2.resolve();
+ ob2.value++;
+
+ assertOrdering(['p1', 'o1', 'o2', 'p2']);
+ assertArrayValues([1], ob1.recordCounts);
+ assertArrayValues([2], ob2.recordCounts);
+})();
+
+
+(function NewObserverDeliveryGetsNewMicrotask() {
+ reset();
+
+ var p1 = newPromise(1);
+ var p2 = newPromise(2);
+ var ob1 = newObserver(1);
+ var ob2 = newObserver(2, function() {
+ ob1.value++;
+ });
+
+ p1.resolve();
+ ob1.value++;
+ p2.resolve();
+ ob2.value++;
+
+ assertOrdering(['p1', 'o1', 'o2', 'p2', 'o1']);
+ assertArrayValues([1, 1], ob1.recordCounts);
+ assertArrayValues([1], ob2.recordCounts);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/object-observe.js b/deps/v8/test/mjsunit/harmony/object-observe.js
index f94ab75e9a..fb15a1fa83 100644
--- a/deps/v8/test/mjsunit/harmony/object-observe.js
+++ b/deps/v8/test/mjsunit/harmony/object-observe.js
@@ -147,8 +147,9 @@ assertThrows(function() { notifier.performChange(1, function(){}); }, TypeError)
assertThrows(function() { notifier.performChange(undefined, function(){}); }, TypeError);
assertThrows(function() { notifier.performChange('foo', undefined); }, TypeError);
assertThrows(function() { notifier.performChange('foo', 'bar'); }, TypeError);
+var global = this;
notifier.performChange('foo', function() {
- assertEquals(undefined, this);
+ assertEquals(global, this);
});
var notify = notifier.notify;
@@ -187,21 +188,21 @@ Object.deliverChangeRecords(observer.callback);
// Multiple records are delivered.
reset();
notifier.notify({
- type: 'updated',
+ type: 'update',
name: 'foo',
expando: 1
});
notifier.notify({
object: notifier, // object property is ignored
- type: 'deleted',
+ type: 'delete',
name: 'bar',
expando2: 'str'
});
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, name: 'foo', type: 'updated', expando: 1 },
- { object: obj, name: 'bar', type: 'deleted', expando2: 'str' }
+ { object: obj, name: 'foo', type: 'update', expando: 1 },
+ { object: obj, name: 'bar', type: 'delete', expando2: 'str' }
]);
// Non-string accept values are coerced to strings
@@ -235,7 +236,7 @@ reset();
Object.observe(obj, observer.callback);
Object.observe(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'updated',
+ type: 'update',
});
Object.deliverChangeRecords(observer.callback);
observer.assertCalled();
@@ -245,7 +246,7 @@ observer.assertCalled();
reset();
Object.unobserve(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'updated',
+ type: 'update',
});
Object.deliverChangeRecords(observer.callback);
observer.assertNotCalled();
@@ -256,7 +257,7 @@ reset();
Object.unobserve(obj, observer.callback);
Object.unobserve(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'updated',
+ type: 'update',
});
Object.deliverChangeRecords(observer.callback);
observer.assertNotCalled();
@@ -265,11 +266,11 @@ observer.assertNotCalled();
// Re-observation works and only includes changeRecords after of call.
reset();
Object.getNotifier(obj).notify({
- type: 'updated',
+ type: 'update',
});
Object.observe(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'updated',
+ type: 'update',
});
records = undefined;
Object.deliverChangeRecords(observer.callback);
@@ -283,7 +284,7 @@ Object.observe(obj, observer.callback);
obj.id = 1;
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, type: 'new', name: 'id' },
+ { object: obj, type: 'add', name: 'id' },
]);
// The empty-string property is observable
@@ -295,110 +296,205 @@ obj[''] = ' ';
delete obj[''];
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, type: 'new', name: '' },
- { object: obj, type: 'updated', name: '', oldValue: '' },
- { object: obj, type: 'deleted', name: '', oldValue: ' ' },
+ { object: obj, type: 'add', name: '' },
+ { object: obj, type: 'update', name: '', oldValue: '' },
+ { object: obj, type: 'delete', name: '', oldValue: ' ' },
]);
+// Object.preventExtensions
+reset();
+var obj = { foo: 'bar'};
+Object.observe(obj, observer.callback);
+obj.baz = 'bat';
+Object.preventExtensions(obj);
+
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: obj, type: 'add', name: 'baz' },
+ { object: obj, type: 'preventExtensions' },
+]);
+
+reset();
+var obj = { foo: 'bar'};
+Object.preventExtensions(obj);
+Object.observe(obj, observer.callback);
+Object.preventExtensions(obj);
+Object.deliverChangeRecords(observer.callback);
+observer.assertNotCalled();
+
+// Object.freeze
+reset();
+var obj = { a: 'a' };
+Object.defineProperty(obj, 'b', {
+ writable: false,
+ configurable: true,
+ value: 'b'
+});
+Object.defineProperty(obj, 'c', {
+ writable: true,
+ configurable: false,
+ value: 'c'
+});
+Object.defineProperty(obj, 'd', {
+ writable: false,
+ configurable: false,
+ value: 'd'
+});
+Object.observe(obj, observer.callback);
+Object.freeze(obj);
+
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: obj, type: 'reconfigure', name: 'a' },
+ { object: obj, type: 'reconfigure', name: 'b' },
+ { object: obj, type: 'reconfigure', name: 'c' },
+ { object: obj, type: 'preventExtensions' },
+]);
+
+reset();
+var obj = { foo: 'bar'};
+Object.freeze(obj);
+Object.observe(obj, observer.callback);
+Object.freeze(obj);
+Object.deliverChangeRecords(observer.callback);
+observer.assertNotCalled();
+
+// Object.seal
+reset();
+var obj = { a: 'a' };
+Object.defineProperty(obj, 'b', {
+ writable: false,
+ configurable: true,
+ value: 'b'
+});
+Object.defineProperty(obj, 'c', {
+ writable: true,
+ configurable: false,
+ value: 'c'
+});
+Object.defineProperty(obj, 'd', {
+ writable: false,
+ configurable: false,
+ value: 'd'
+});
+Object.observe(obj, observer.callback);
+Object.seal(obj);
+
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: obj, type: 'reconfigure', name: 'a' },
+ { object: obj, type: 'reconfigure', name: 'b' },
+ { object: obj, type: 'preventExtensions' },
+]);
+
+reset();
+var obj = { foo: 'bar'};
+Object.seal(obj);
+Object.observe(obj, observer.callback);
+Object.seal(obj);
+Object.deliverChangeRecords(observer.callback);
+observer.assertNotCalled();
+
// Observing a continuous stream of changes, while itermittantly unobserving.
reset();
+var obj = {};
Object.observe(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'updated',
+ type: 'update',
val: 1
});
Object.unobserve(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'updated',
+ type: 'update',
val: 2
});
Object.observe(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'updated',
+ type: 'update',
val: 3
});
Object.unobserve(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'updated',
+ type: 'update',
val: 4
});
Object.observe(obj, observer.callback);
Object.getNotifier(obj).notify({
- type: 'updated',
+ type: 'update',
val: 5
});
Object.unobserve(obj, observer.callback);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, type: 'updated', val: 1 },
- { object: obj, type: 'updated', val: 3 },
- { object: obj, type: 'updated', val: 5 }
+ { object: obj, type: 'update', val: 1 },
+ { object: obj, type: 'update', val: 3 },
+ { object: obj, type: 'update', val: 5 }
]);
// Accept
reset();
Object.observe(obj, observer.callback, ['somethingElse']);
Object.getNotifier(obj).notify({
- type: 'new'
+ type: 'add'
});
Object.getNotifier(obj).notify({
- type: 'updated'
+ type: 'update'
});
Object.getNotifier(obj).notify({
- type: 'deleted'
+ type: 'delete'
});
Object.getNotifier(obj).notify({
- type: 'reconfigured'
+ type: 'reconfigure'
});
Object.getNotifier(obj).notify({
- type: 'prototype'
+ type: 'setPrototype'
});
Object.deliverChangeRecords(observer.callback);
observer.assertNotCalled();
reset();
-Object.observe(obj, observer.callback, ['new', 'deleted', 'prototype']);
+Object.observe(obj, observer.callback, ['add', 'delete', 'setPrototype']);
Object.getNotifier(obj).notify({
- type: 'new'
+ type: 'add'
});
Object.getNotifier(obj).notify({
- type: 'updated'
+ type: 'update'
});
Object.getNotifier(obj).notify({
- type: 'deleted'
+ type: 'delete'
});
Object.getNotifier(obj).notify({
- type: 'deleted'
+ type: 'delete'
});
Object.getNotifier(obj).notify({
- type: 'reconfigured'
+ type: 'reconfigure'
});
Object.getNotifier(obj).notify({
- type: 'prototype'
+ type: 'setPrototype'
});
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, type: 'new' },
- { object: obj, type: 'deleted' },
- { object: obj, type: 'deleted' },
- { object: obj, type: 'prototype' }
+ { object: obj, type: 'add' },
+ { object: obj, type: 'delete' },
+ { object: obj, type: 'delete' },
+ { object: obj, type: 'setPrototype' }
]);
reset();
-Object.observe(obj, observer.callback, ['updated', 'foo']);
+Object.observe(obj, observer.callback, ['update', 'foo']);
Object.getNotifier(obj).notify({
- type: 'new'
+ type: 'add'
});
Object.getNotifier(obj).notify({
- type: 'updated'
+ type: 'update'
});
Object.getNotifier(obj).notify({
- type: 'deleted'
+ type: 'delete'
});
Object.getNotifier(obj).notify({
type: 'foo'
@@ -411,7 +507,7 @@ Object.getNotifier(obj).notify({
});
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, type: 'updated' },
+ { object: obj, type: 'update' },
{ object: obj, type: 'foo' },
{ object: obj, type: 'foo' }
]);
@@ -434,12 +530,10 @@ Thingy.prototype = {
notifier.performChange(Thingy.INCREMENT, function() {
self.a += amount;
self.b += amount;
- });
- notifier.notify({
- object: this,
- type: Thingy.INCREMENT,
- incremented: amount
+ return {
+ incremented: amount
+ }; // implicit notify
});
},
@@ -450,12 +544,10 @@ Thingy.prototype = {
notifier.performChange(Thingy.MULTIPLY, function() {
self.a *= amount;
self.b *= amount;
- });
- notifier.notify({
- object: this,
- type: Thingy.MULTIPLY,
- multiplied: amount
+ return {
+ multiplied: amount
+ }; // implicit notify
});
},
@@ -466,13 +558,11 @@ Thingy.prototype = {
notifier.performChange(Thingy.INCREMENT_AND_MULTIPLY, function() {
self.increment(incAmount);
self.multiply(multAmount);
- });
- notifier.notify({
- object: this,
- type: Thingy.INCREMENT_AND_MULTIPLY,
- incremented: incAmount,
- multiplied: multAmount
+ return {
+ incremented: incAmount,
+ multiplied: multAmount
+ }; // implicit notify
});
}
}
@@ -481,7 +571,7 @@ Thingy.observe = function(thingy, callback) {
Object.observe(thingy, callback, [Thingy.INCREMENT,
Thingy.MULTIPLY,
Thingy.INCREMENT_AND_MULTIPLY,
- 'updated']);
+ 'update']);
}
Thingy.unobserve = function(thingy, callback) {
@@ -501,22 +591,22 @@ thingy.incrementAndMultiply(2, 2); // { a: 26, b: 36 }
Object.deliverChangeRecords(observer.callback);
Object.deliverChangeRecords(observer2.callback);
observer.assertCallbackRecords([
- { object: thingy, type: 'updated', name: 'a', oldValue: 2 },
- { object: thingy, type: 'updated', name: 'b', oldValue: 4 },
- { object: thingy, type: 'updated', name: 'b', oldValue: 7 },
- { object: thingy, type: 'updated', name: 'a', oldValue: 5 },
- { object: thingy, type: 'updated', name: 'b', oldValue: 8 },
- { object: thingy, type: 'updated', name: 'a', oldValue: 10 },
- { object: thingy, type: 'updated', name: 'a', oldValue: 11 },
- { object: thingy, type: 'updated', name: 'b', oldValue: 16 },
- { object: thingy, type: 'updated', name: 'a', oldValue: 13 },
- { object: thingy, type: 'updated', name: 'b', oldValue: 18 },
+ { object: thingy, type: 'update', name: 'a', oldValue: 2 },
+ { object: thingy, type: 'update', name: 'b', oldValue: 4 },
+ { object: thingy, type: 'update', name: 'b', oldValue: 7 },
+ { object: thingy, type: 'update', name: 'a', oldValue: 5 },
+ { object: thingy, type: 'update', name: 'b', oldValue: 8 },
+ { object: thingy, type: 'update', name: 'a', oldValue: 10 },
+ { object: thingy, type: 'update', name: 'a', oldValue: 11 },
+ { object: thingy, type: 'update', name: 'b', oldValue: 16 },
+ { object: thingy, type: 'update', name: 'a', oldValue: 13 },
+ { object: thingy, type: 'update', name: 'b', oldValue: 18 },
]);
observer2.assertCallbackRecords([
{ object: thingy, type: Thingy.INCREMENT, incremented: 3 },
- { object: thingy, type: 'updated', name: 'b', oldValue: 7 },
+ { object: thingy, type: 'update', name: 'b', oldValue: 7 },
{ object: thingy, type: Thingy.MULTIPLY, multiplied: 2 },
- { object: thingy, type: 'updated', name: 'a', oldValue: 10 },
+ { object: thingy, type: 'update', name: 'a', oldValue: 10 },
{
object: thingy,
type: Thingy.INCREMENT_AND_MULTIPLY,
@@ -525,6 +615,69 @@ observer2.assertCallbackRecords([
}
]);
+// ArrayPush cached stub
+reset();
+
+function pushMultiple(arr) {
+ arr.push('a');
+ arr.push('b');
+ arr.push('c');
+}
+
+for (var i = 0; i < 5; i++) {
+ var arr = [];
+ pushMultiple(arr);
+}
+
+for (var i = 0; i < 5; i++) {
+ reset();
+ var arr = [];
+ Object.observe(arr, observer.callback);
+ pushMultiple(arr);
+ Object.unobserve(arr, observer.callback);
+ Object.deliverChangeRecords(observer.callback);
+ observer.assertCallbackRecords([
+ { object: arr, type: 'add', name: '0' },
+ { object: arr, type: 'update', name: 'length', oldValue: 0 },
+ { object: arr, type: 'add', name: '1' },
+ { object: arr, type: 'update', name: 'length', oldValue: 1 },
+ { object: arr, type: 'add', name: '2' },
+ { object: arr, type: 'update', name: 'length', oldValue: 2 },
+ ]);
+}
+
+
+// ArrayPop cached stub
+reset();
+
+function popMultiple(arr) {
+ arr.pop();
+ arr.pop();
+ arr.pop();
+}
+
+for (var i = 0; i < 5; i++) {
+ var arr = ['a', 'b', 'c'];
+ popMultiple(arr);
+}
+
+for (var i = 0; i < 5; i++) {
+ reset();
+ var arr = ['a', 'b', 'c'];
+ Object.observe(arr, observer.callback);
+ popMultiple(arr);
+ Object.unobserve(arr, observer.callback);
+ Object.deliverChangeRecords(observer.callback);
+ observer.assertCallbackRecords([
+ { object: arr, type: 'delete', name: '2', oldValue: 'c' },
+ { object: arr, type: 'update', name: 'length', oldValue: 3 },
+ { object: arr, type: 'delete', name: '1', oldValue: 'b' },
+ { object: arr, type: 'update', name: 'length', oldValue: 2 },
+ { object: arr, type: 'delete', name: '0', oldValue: 'a' },
+ { object: arr, type: 'update', name: 'length', oldValue: 1 },
+ ]);
+}
+
reset();
function RecursiveThingy() {}
@@ -545,7 +698,6 @@ RecursiveThingy.prototype = {
});
notifier.notify({
- object: this,
type: RecursiveThingy.MULTIPLY_FIRST_N,
multiplied: amount,
n: n
@@ -571,9 +723,9 @@ thingy.multiplyFirstN(2, 3); // [2, 4, 6, 4]
Object.deliverChangeRecords(observer.callback);
Object.deliverChangeRecords(observer2.callback);
observer.assertCallbackRecords([
- { object: thingy, type: 'updated', name: '2', oldValue: 3 },
- { object: thingy, type: 'updated', name: '1', oldValue: 2 },
- { object: thingy, type: 'updated', name: '0', oldValue: 1 }
+ { object: thingy, type: 'update', name: '2', oldValue: 3 },
+ { object: thingy, type: 'update', name: '1', oldValue: 2 },
+ { object: thingy, type: 'update', name: '0', oldValue: 1 }
]);
observer2.assertCallbackRecords([
{ object: thingy, type: RecursiveThingy.MULTIPLY_FIRST_N, multiplied: 2, n: 3 }
@@ -606,7 +758,6 @@ DeckSuit.prototype = {
});
notifier.notify({
- object: this,
type: DeckSuit.SHUFFLE
});
},
@@ -638,20 +789,20 @@ Object.observe(obj, observer.callback);
Object.observe(obj3, observer.callback);
Object.observe(obj2, observer.callback);
Object.getNotifier(obj).notify({
- type: 'new',
+ type: 'add',
});
Object.getNotifier(obj2).notify({
- type: 'updated',
+ type: 'update',
});
Object.getNotifier(obj3).notify({
- type: 'deleted',
+ type: 'delete',
});
Object.observe(obj3, observer.callback);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, type: 'new' },
- { object: obj2, type: 'updated' },
- { object: obj3, type: 'deleted' }
+ { object: obj, type: 'add' },
+ { object: obj2, type: 'update' },
+ { object: obj3, type: 'delete' }
]);
@@ -718,28 +869,28 @@ delete obj.a;
Object.defineProperty(obj, "a", {value: 11, configurable: true});
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, name: "a", type: "updated", oldValue: 1 },
- { object: obj, name: "a", type: "updated", oldValue: 2 },
- { object: obj, name: "a", type: "deleted", oldValue: 3 },
- { object: obj, name: "a", type: "new" },
- { object: obj, name: "a", type: "updated", oldValue: 4 },
- { object: obj, name: "a", type: "updated", oldValue: 5 },
- { object: obj, name: "a", type: "reconfigured" },
- { object: obj, name: "a", type: "updated", oldValue: 6 },
- { object: obj, name: "a", type: "reconfigured", oldValue: 8 },
- { object: obj, name: "a", type: "reconfigured", oldValue: 7 },
- { object: obj, name: "a", type: "reconfigured" },
- { object: obj, name: "a", type: "reconfigured" },
- { object: obj, name: "a", type: "reconfigured" },
- { object: obj, name: "a", type: "deleted" },
- { object: obj, name: "a", type: "new" },
- { object: obj, name: "a", type: "reconfigured" },
- { object: obj, name: "a", type: "updated", oldValue: 9 },
- { object: obj, name: "a", type: "updated", oldValue: 10 },
- { object: obj, name: "a", type: "updated", oldValue: 11 },
- { object: obj, name: "a", type: "updated", oldValue: 12 },
- { object: obj, name: "a", type: "deleted", oldValue: 36 },
- { object: obj, name: "a", type: "new" },
+ { object: obj, name: "a", type: "update", oldValue: 1 },
+ { object: obj, name: "a", type: "update", oldValue: 2 },
+ { object: obj, name: "a", type: "delete", oldValue: 3 },
+ { object: obj, name: "a", type: "add" },
+ { object: obj, name: "a", type: "update", oldValue: 4 },
+ { object: obj, name: "a", type: "update", oldValue: 5 },
+ { object: obj, name: "a", type: "reconfigure" },
+ { object: obj, name: "a", type: "update", oldValue: 6 },
+ { object: obj, name: "a", type: "reconfigure", oldValue: 8 },
+ { object: obj, name: "a", type: "reconfigure", oldValue: 7 },
+ { object: obj, name: "a", type: "reconfigure" },
+ { object: obj, name: "a", type: "reconfigure" },
+ { object: obj, name: "a", type: "reconfigure" },
+ { object: obj, name: "a", type: "delete" },
+ { object: obj, name: "a", type: "add" },
+ { object: obj, name: "a", type: "reconfigure" },
+ { object: obj, name: "a", type: "update", oldValue: 9 },
+ { object: obj, name: "a", type: "update", oldValue: 10 },
+ { object: obj, name: "a", type: "update", oldValue: 11 },
+ { object: obj, name: "a", type: "update", oldValue: 12 },
+ { object: obj, name: "a", type: "delete", oldValue: 36 },
+ { object: obj, name: "a", type: "add" },
]);
@@ -776,28 +927,28 @@ delete obj[1];
Object.defineProperty(obj, "1", {value: 11, configurable: true});
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, name: "1", type: "updated", oldValue: 1 },
- { object: obj, name: "1", type: "updated", oldValue: 2 },
- { object: obj, name: "1", type: "deleted", oldValue: 3 },
- { object: obj, name: "1", type: "new" },
- { object: obj, name: "1", type: "updated", oldValue: 4 },
- { object: obj, name: "1", type: "updated", oldValue: 5 },
- { object: obj, name: "1", type: "reconfigured" },
- { object: obj, name: "1", type: "updated", oldValue: 6 },
- { object: obj, name: "1", type: "reconfigured", oldValue: 8 },
- { object: obj, name: "1", type: "reconfigured", oldValue: 7 },
- { object: obj, name: "1", type: "reconfigured" },
- { object: obj, name: "1", type: "reconfigured" },
- { object: obj, name: "1", type: "reconfigured" },
- { object: obj, name: "1", type: "deleted" },
- { object: obj, name: "1", type: "new" },
- { object: obj, name: "1", type: "reconfigured" },
- { object: obj, name: "1", type: "updated", oldValue: 9 },
- { object: obj, name: "1", type: "updated", oldValue: 10 },
- { object: obj, name: "1", type: "updated", oldValue: 11 },
- { object: obj, name: "1", type: "updated", oldValue: 12 },
- { object: obj, name: "1", type: "deleted", oldValue: 36 },
- { object: obj, name: "1", type: "new" },
+ { object: obj, name: "1", type: "update", oldValue: 1 },
+ { object: obj, name: "1", type: "update", oldValue: 2 },
+ { object: obj, name: "1", type: "delete", oldValue: 3 },
+ { object: obj, name: "1", type: "add" },
+ { object: obj, name: "1", type: "update", oldValue: 4 },
+ { object: obj, name: "1", type: "update", oldValue: 5 },
+ { object: obj, name: "1", type: "reconfigure" },
+ { object: obj, name: "1", type: "update", oldValue: 6 },
+ { object: obj, name: "1", type: "reconfigure", oldValue: 8 },
+ { object: obj, name: "1", type: "reconfigure", oldValue: 7 },
+ { object: obj, name: "1", type: "reconfigure" },
+ { object: obj, name: "1", type: "reconfigure" },
+ { object: obj, name: "1", type: "reconfigure" },
+ { object: obj, name: "1", type: "delete" },
+ { object: obj, name: "1", type: "add" },
+ { object: obj, name: "1", type: "reconfigure" },
+ { object: obj, name: "1", type: "update", oldValue: 9 },
+ { object: obj, name: "1", type: "update", oldValue: 10 },
+ { object: obj, name: "1", type: "update", oldValue: 11 },
+ { object: obj, name: "1", type: "update", oldValue: 12 },
+ { object: obj, name: "1", type: "delete", oldValue: 36 },
+ { object: obj, name: "1", type: "add" },
]);
@@ -865,32 +1016,32 @@ function TestObserveConfigurable(obj, prop) {
Object.defineProperty(obj, prop, {value: 11, configurable: true});
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, name: prop, type: "updated", oldValue: 1 },
- { object: obj, name: prop, type: "updated", oldValue: 2 },
- { object: obj, name: prop, type: "deleted", oldValue: 3 },
- { object: obj, name: prop, type: "new" },
- { object: obj, name: prop, type: "updated", oldValue: 4 },
- { object: obj, name: prop, type: "updated", oldValue: 5 },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "updated", oldValue: 6 },
- { object: obj, name: prop, type: "reconfigured", oldValue: 8 },
- { object: obj, name: prop, type: "reconfigured", oldValue: 7 },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "deleted" },
- { object: obj, name: prop, type: "new" },
- { object: obj, name: prop, type: "deleted" },
- { object: obj, name: prop, type: "new" },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "updated", oldValue: 9 },
- { object: obj, name: prop, type: "updated", oldValue: 10 },
- { object: obj, name: prop, type: "updated", oldValue: 11 },
- { object: obj, name: prop, type: "updated", oldValue: 12 },
- { object: obj, name: prop, type: "deleted", oldValue: 36 },
- { object: obj, name: prop, type: "new" },
+ { object: obj, name: prop, type: "update", oldValue: 1 },
+ { object: obj, name: prop, type: "update", oldValue: 2 },
+ { object: obj, name: prop, type: "delete", oldValue: 3 },
+ { object: obj, name: prop, type: "add" },
+ { object: obj, name: prop, type: "update", oldValue: 4 },
+ { object: obj, name: prop, type: "update", oldValue: 5 },
+ { object: obj, name: prop, type: "reconfigure" },
+ { object: obj, name: prop, type: "update", oldValue: 6 },
+ { object: obj, name: prop, type: "reconfigure", oldValue: 8 },
+ { object: obj, name: prop, type: "reconfigure", oldValue: 7 },
+ { object: obj, name: prop, type: "reconfigure" },
+ { object: obj, name: prop, type: "reconfigure" },
+ { object: obj, name: prop, type: "reconfigure" },
+ { object: obj, name: prop, type: "reconfigure" },
+ { object: obj, name: prop, type: "reconfigure" },
+ { object: obj, name: prop, type: "delete" },
+ { object: obj, name: prop, type: "add" },
+ { object: obj, name: prop, type: "delete" },
+ { object: obj, name: prop, type: "add" },
+ { object: obj, name: prop, type: "reconfigure" },
+ { object: obj, name: prop, type: "update", oldValue: 9 },
+ { object: obj, name: prop, type: "update", oldValue: 10 },
+ { object: obj, name: prop, type: "update", oldValue: 11 },
+ { object: obj, name: prop, type: "update", oldValue: 12 },
+ { object: obj, name: prop, type: "delete", oldValue: 36 },
+ { object: obj, name: prop, type: "add" },
]);
Object.unobserve(obj, observer.callback);
delete obj[prop];
@@ -913,11 +1064,11 @@ function TestObserveNonConfigurable(obj, prop, desc) {
obj[prop] = 7; // ignored
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, name: prop, type: "updated", oldValue: 1 },
- { object: obj, name: prop, type: "updated", oldValue: 4 },
- { object: obj, name: prop, type: "updated", oldValue: 5 },
- { object: obj, name: prop, type: "updated", oldValue: 6 },
- { object: obj, name: prop, type: "reconfigured" },
+ { object: obj, name: prop, type: "update", oldValue: 1 },
+ { object: obj, name: prop, type: "update", oldValue: 4 },
+ { object: obj, name: prop, type: "update", oldValue: 5 },
+ { object: obj, name: prop, type: "update", oldValue: 6 },
+ { object: obj, name: prop, type: "reconfigure" },
]);
Object.unobserve(obj, observer.callback);
}
@@ -977,7 +1128,7 @@ var objects = [
createProxy(Proxy.create, null),
createProxy(Proxy.createFunction, function(){}),
];
-var properties = ["a", "1", 1, "length", "prototype", "name", "caller"];
+var properties = ["a", "1", 1, "length", "setPrototype", "name", "caller"];
// Cases that yield non-standard results.
function blacklisted(obj, prop) {
@@ -1031,31 +1182,31 @@ arr3[4] = 5;
Object.defineProperty(arr3, 'length', {value: 1, writable: false});
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: arr, name: '3', type: 'deleted', oldValue: 'd' },
- { object: arr, name: '2', type: 'deleted' },
- { object: arr, name: 'length', type: 'updated', oldValue: 4 },
- { object: arr, name: '1', type: 'deleted', oldValue: 'b' },
- { object: arr, name: 'length', type: 'updated', oldValue: 2 },
- { object: arr, name: 'length', type: 'updated', oldValue: 1 },
- { object: arr, name: 'length', type: 'reconfigured' },
- { object: arr2, name: '1', type: 'deleted', oldValue: 'beta' },
- { object: arr2, name: 'length', type: 'updated', oldValue: 2 },
- { object: arr2, name: 'length', type: 'reconfigured' },
- { object: arr3, name: '2', type: 'deleted', oldValue: 'goodbye' },
- { object: arr3, name: '0', type: 'deleted', oldValue: 'hello' },
- { object: arr3, name: 'length', type: 'updated', oldValue: 6 },
- { object: arr3, name: 'length', type: 'updated', oldValue: 0 },
- { object: arr3, name: 'length', type: 'updated', oldValue: 1 },
- { object: arr3, name: 'length', type: 'updated', oldValue: 2 },
- { object: arr3, name: 'length', type: 'updated', oldValue: 1 },
- { object: arr3, name: '4', type: 'new' },
- { object: arr3, name: '4', type: 'deleted', oldValue: 5 },
+ { object: arr, name: '3', type: 'delete', oldValue: 'd' },
+ { object: arr, name: '2', type: 'delete' },
+ { object: arr, name: 'length', type: 'update', oldValue: 4 },
+ { object: arr, name: '1', type: 'delete', oldValue: 'b' },
+ { object: arr, name: 'length', type: 'update', oldValue: 2 },
+ { object: arr, name: 'length', type: 'update', oldValue: 1 },
+ { object: arr, name: 'length', type: 'reconfigure' },
+ { object: arr2, name: '1', type: 'delete', oldValue: 'beta' },
+ { object: arr2, name: 'length', type: 'update', oldValue: 2 },
+ { object: arr2, name: 'length', type: 'reconfigure' },
+ { object: arr3, name: '2', type: 'delete', oldValue: 'goodbye' },
+ { object: arr3, name: '0', type: 'delete', oldValue: 'hello' },
+ { object: arr3, name: 'length', type: 'update', oldValue: 6 },
+ { object: arr3, name: 'length', type: 'update', oldValue: 0 },
+ { object: arr3, name: 'length', type: 'update', oldValue: 1 },
+ { object: arr3, name: 'length', type: 'update', oldValue: 2 },
+ { object: arr3, name: 'length', type: 'update', oldValue: 1 },
+ { object: arr3, name: '4', type: 'add' },
+ { object: arr3, name: '4', type: 'delete', oldValue: 5 },
// TODO(rafaelw): It breaks spec compliance to get two records here.
// When the TODO in v8natives.js::DefineArrayProperty is addressed
// which prevents DefineProperty from over-writing the magic length
// property, these will collapse into a single record.
- { object: arr3, name: 'length', type: 'updated', oldValue: 5 },
- { object: arr3, name: 'length', type: 'reconfigured' }
+ { object: arr3, name: 'length', type: 'update', oldValue: 5 },
+ { object: arr3, name: 'length', type: 'reconfigure' }
]);
Object.deliverChangeRecords(observer2.callback);
observer2.assertCallbackRecords([
@@ -1068,7 +1219,7 @@ observer2.assertCallbackRecords([
{ object: arr3, type: 'splice', index: 1, removed: [], addedCount: 1 },
{ object: arr3, type: 'splice', index: 1, removed: [,], addedCount: 0 },
{ object: arr3, type: 'splice', index: 1, removed: [], addedCount: 4 },
- { object: arr3, name: '4', type: 'new' },
+ { object: arr3, name: '4', type: 'add' },
{ object: arr3, type: 'splice', index: 1, removed: [,,,5], addedCount: 0 }
]);
@@ -1086,8 +1237,8 @@ Array.observe(slow_arr, slowSpliceCallback);
slow_arr.length = 100;
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: slow_arr, name: '500000000', type: 'deleted', oldValue: 'hello' },
- { object: slow_arr, name: 'length', type: 'updated', oldValue: 1000000000 },
+ { object: slow_arr, name: '500000000', type: 'delete', oldValue: 'hello' },
+ { object: slow_arr, name: 'length', type: 'update', oldValue: 1000000000 },
]);
Object.deliverChangeRecords(slowSpliceCallback);
assertEquals(spliceRecords.length, 1);
@@ -1113,11 +1264,11 @@ for (var i = 0; i < 5; i++) {
}
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, name: "a0", type: "new" },
- { object: obj, name: "a1", type: "new" },
- { object: obj, name: "a2", type: "new" },
- { object: obj, name: "a3", type: "new" },
- { object: obj, name: "a4", type: "new" },
+ { object: obj, name: "a0", type: "add" },
+ { object: obj, name: "a1", type: "add" },
+ { object: obj, name: "a2", type: "add" },
+ { object: obj, name: "a3", type: "add" },
+ { object: obj, name: "a4", type: "add" },
]);
reset();
@@ -1128,11 +1279,11 @@ for (var i = 0; i < 5; i++) {
}
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, name: "0", type: "new" },
- { object: obj, name: "1", type: "new" },
- { object: obj, name: "2", type: "new" },
- { object: obj, name: "3", type: "new" },
- { object: obj, name: "4", type: "new" },
+ { object: obj, name: "0", type: "add" },
+ { object: obj, name: "1", type: "add" },
+ { object: obj, name: "2", type: "add" },
+ { object: obj, name: "3", type: "add" },
+ { object: obj, name: "4", type: "add" },
]);
@@ -1149,15 +1300,15 @@ Object.defineProperty(arr, '400', {get: function(){}});
arr[50] = 30; // no length change expected
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: arr, name: '3', type: 'new' },
- { object: arr, name: 'length', type: 'updated', oldValue: 3 },
- { object: arr, name: '100', type: 'new' },
- { object: arr, name: 'length', type: 'updated', oldValue: 4 },
- { object: arr, name: '200', type: 'new' },
- { object: arr, name: 'length', type: 'updated', oldValue: 101 },
- { object: arr, name: '400', type: 'new' },
- { object: arr, name: 'length', type: 'updated', oldValue: 201 },
- { object: arr, name: '50', type: 'new' },
+ { object: arr, name: '3', type: 'add' },
+ { object: arr, name: 'length', type: 'update', oldValue: 3 },
+ { object: arr, name: '100', type: 'add' },
+ { object: arr, name: 'length', type: 'update', oldValue: 4 },
+ { object: arr, name: '200', type: 'add' },
+ { object: arr, name: 'length', type: 'update', oldValue: 101 },
+ { object: arr, name: '400', type: 'add' },
+ { object: arr, name: 'length', type: 'update', oldValue: 201 },
+ { object: arr, name: '50', type: 'add' },
]);
Object.deliverChangeRecords(observer2.callback);
observer2.assertCallbackRecords([
@@ -1165,7 +1316,7 @@ observer2.assertCallbackRecords([
{ object: arr, type: 'splice', index: 4, removed: [], addedCount: 97 },
{ object: arr, type: 'splice', index: 101, removed: [], addedCount: 100 },
{ object: arr, type: 'splice', index: 201, removed: [], addedCount: 200 },
- { object: arr, type: 'new', name: '50' },
+ { object: arr, type: 'add', name: '50' },
]);
@@ -1182,12 +1333,12 @@ array.push(3, 4);
array.push(5);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: array, name: '2', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '3', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 3 },
- { object: array, name: '4', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 4 },
+ { object: array, name: '2', type: 'add' },
+ { object: array, name: 'length', type: 'update', oldValue: 2 },
+ { object: array, name: '3', type: 'add' },
+ { object: array, name: 'length', type: 'update', oldValue: 3 },
+ { object: array, name: '4', type: 'add' },
+ { object: array, name: 'length', type: 'update', oldValue: 4 },
]);
Object.deliverChangeRecords(observer2.callback);
observer2.assertCallbackRecords([
@@ -1203,10 +1354,10 @@ array.pop();
array.pop();
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: array, name: '1', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '0', type: 'deleted', oldValue: 1 },
- { object: array, name: 'length', type: 'updated', oldValue: 1 },
+ { object: array, name: '1', type: 'delete', oldValue: 2 },
+ { object: array, name: 'length', type: 'update', oldValue: 2 },
+ { object: array, name: '0', type: 'delete', oldValue: 1 },
+ { object: array, name: 'length', type: 'update', oldValue: 1 },
]);
// Shift
@@ -1217,11 +1368,11 @@ array.shift();
array.shift();
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: array, name: '0', type: 'updated', oldValue: 1 },
- { object: array, name: '1', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '0', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 1 },
+ { object: array, name: '0', type: 'update', oldValue: 1 },
+ { object: array, name: '1', type: 'delete', oldValue: 2 },
+ { object: array, name: 'length', type: 'update', oldValue: 2 },
+ { object: array, name: '0', type: 'delete', oldValue: 2 },
+ { object: array, name: 'length', type: 'update', oldValue: 1 },
]);
// Unshift
@@ -1231,11 +1382,11 @@ Object.observe(array, observer.callback);
array.unshift(3, 4);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: array, name: '3', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '2', type: 'new' },
- { object: array, name: '0', type: 'updated', oldValue: 1 },
- { object: array, name: '1', type: 'updated', oldValue: 2 },
+ { object: array, name: '3', type: 'add' },
+ { object: array, name: 'length', type: 'update', oldValue: 2 },
+ { object: array, name: '2', type: 'add' },
+ { object: array, name: '0', type: 'update', oldValue: 1 },
+ { object: array, name: '1', type: 'update', oldValue: 2 },
]);
// Splice
@@ -1245,10 +1396,10 @@ Object.observe(array, observer.callback);
array.splice(1, 1, 4, 5);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: array, name: '3', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 3 },
- { object: array, name: '1', type: 'updated', oldValue: 2 },
- { object: array, name: '2', type: 'updated', oldValue: 3 },
+ { object: array, name: '3', type: 'add' },
+ { object: array, name: 'length', type: 'update', oldValue: 3 },
+ { object: array, name: '1', type: 'update', oldValue: 2 },
+ { object: array, name: '2', type: 'update', oldValue: 3 },
]);
// Sort
@@ -1261,11 +1412,11 @@ assertEquals(2, array[1]);
assertEquals(3, array[2]);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: array, name: '1', type: 'updated', oldValue: 2 },
- { object: array, name: '0', type: 'updated', oldValue: 3 },
- { object: array, name: '2', type: 'updated', oldValue: 1 },
- { object: array, name: '1', type: 'updated', oldValue: 3 },
- { object: array, name: '0', type: 'updated', oldValue: 2 },
+ { object: array, name: '1', type: 'update', oldValue: 2 },
+ { object: array, name: '0', type: 'update', oldValue: 3 },
+ { object: array, name: '2', type: 'update', oldValue: 1 },
+ { object: array, name: '1', type: 'update', oldValue: 3 },
+ { object: array, name: '0', type: 'update', oldValue: 2 },
]);
// Splice emitted after Array mutation methods
@@ -1347,9 +1498,9 @@ Object.observe(array, observer.callback);
Array.prototype.push.call(array, 3, 4);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: array, name: '2', type: 'new' },
- { object: array, name: '3', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
+ { object: array, name: '2', type: 'add' },
+ { object: array, name: '3', type: 'add' },
+ { object: array, name: 'length', type: 'update', oldValue: 2 },
]);
// Pop
@@ -1362,10 +1513,10 @@ array.pop();
array.pop();
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: array, name: '1', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '0', type: 'deleted', oldValue: 1 },
- { object: array, name: 'length', type: 'updated', oldValue: 1 },
+ { object: array, name: '1', type: 'delete', oldValue: 2 },
+ { object: array, name: 'length', type: 'update', oldValue: 2 },
+ { object: array, name: '0', type: 'delete', oldValue: 1 },
+ { object: array, name: 'length', type: 'update', oldValue: 1 },
]);
Object.deliverChangeRecords(observer2.callback);
observer2.assertCallbackRecords([
@@ -1383,11 +1534,11 @@ array.shift();
array.shift();
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: array, name: '0', type: 'updated', oldValue: 1 },
- { object: array, name: '1', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '0', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 1 },
+ { object: array, name: '0', type: 'update', oldValue: 1 },
+ { object: array, name: '1', type: 'delete', oldValue: 2 },
+ { object: array, name: 'length', type: 'update', oldValue: 2 },
+ { object: array, name: '0', type: 'delete', oldValue: 2 },
+ { object: array, name: 'length', type: 'update', oldValue: 1 },
]);
Object.deliverChangeRecords(observer2.callback);
observer2.assertCallbackRecords([
@@ -1404,17 +1555,17 @@ array.unshift(3, 4);
array.unshift(5);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: array, name: '3', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '2', type: 'new' },
- { object: array, name: '0', type: 'updated', oldValue: 1 },
- { object: array, name: '1', type: 'updated', oldValue: 2 },
- { object: array, name: '4', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 4 },
- { object: array, name: '3', type: 'updated', oldValue: 2 },
- { object: array, name: '2', type: 'updated', oldValue: 1 },
- { object: array, name: '1', type: 'updated', oldValue: 4 },
- { object: array, name: '0', type: 'updated', oldValue: 3 },
+ { object: array, name: '3', type: 'add' },
+ { object: array, name: 'length', type: 'update', oldValue: 2 },
+ { object: array, name: '2', type: 'add' },
+ { object: array, name: '0', type: 'update', oldValue: 1 },
+ { object: array, name: '1', type: 'update', oldValue: 2 },
+ { object: array, name: '4', type: 'add' },
+ { object: array, name: 'length', type: 'update', oldValue: 4 },
+ { object: array, name: '3', type: 'update', oldValue: 2 },
+ { object: array, name: '2', type: 'update', oldValue: 1 },
+ { object: array, name: '1', type: 'update', oldValue: 4 },
+ { object: array, name: '0', type: 'update', oldValue: 3 },
]);
Object.deliverChangeRecords(observer2.callback);
observer2.assertCallbackRecords([
@@ -1433,21 +1584,21 @@ array.splice(1, 2, 6, 7); // 5 6 7
array.splice(2, 0);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: array, name: '4', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 3 },
- { object: array, name: '3', type: 'new' },
- { object: array, name: '1', type: 'updated', oldValue: 2 },
- { object: array, name: '2', type: 'updated', oldValue: 3 },
-
- { object: array, name: '0', type: 'updated', oldValue: 1 },
- { object: array, name: '1', type: 'updated', oldValue: 4 },
- { object: array, name: '2', type: 'updated', oldValue: 5 },
- { object: array, name: '4', type: 'deleted', oldValue: 3 },
- { object: array, name: '3', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 5 },
-
- { object: array, name: '1', type: 'updated', oldValue: 2 },
- { object: array, name: '2', type: 'updated', oldValue: 3 },
+ { object: array, name: '4', type: 'add' },
+ { object: array, name: 'length', type: 'update', oldValue: 3 },
+ { object: array, name: '3', type: 'add' },
+ { object: array, name: '1', type: 'update', oldValue: 2 },
+ { object: array, name: '2', type: 'update', oldValue: 3 },
+
+ { object: array, name: '0', type: 'update', oldValue: 1 },
+ { object: array, name: '1', type: 'update', oldValue: 4 },
+ { object: array, name: '2', type: 'update', oldValue: 5 },
+ { object: array, name: '4', type: 'delete', oldValue: 3 },
+ { object: array, name: '3', type: 'delete', oldValue: 2 },
+ { object: array, name: 'length', type: 'update', oldValue: 5 },
+
+ { object: array, name: '1', type: 'update', oldValue: 2 },
+ { object: array, name: '2', type: 'update', oldValue: 3 },
]);
Object.deliverChangeRecords(observer2.callback);
observer2.assertCallbackRecords([
@@ -1466,8 +1617,8 @@ Object.observe(array, observer.callback);
array.splice(0, 1);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: array, name: '0', type: 'deleted', oldValue: 0 },
- { object: array, name: 'length', type: 'updated', oldValue: 1},
+ { object: array, name: '0', type: 'delete', oldValue: 0 },
+ { object: array, name: 'length', type: 'update', oldValue: 1},
]);
@@ -1485,10 +1636,10 @@ obj.__proto__ = q; // the __proto__ accessor is gone
// once we support observing the global object.
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: obj, name: '__proto__', type: 'prototype',
+ { object: obj, name: '__proto__', type: 'setPrototype',
oldValue: Object.prototype },
- { object: obj, name: '__proto__', type: 'prototype', oldValue: p },
- { object: obj, name: '__proto__', type: 'new' },
+ { object: obj, name: '__proto__', type: 'setPrototype', oldValue: p },
+ { object: obj, name: '__proto__', type: 'add' },
]);
@@ -1507,7 +1658,7 @@ observer.assertRecordCount(3);
// lazy creation of oldValue
assertSame(fun, observer.records[0].object);
assertEquals('prototype', observer.records[0].name);
-assertEquals('updated', observer.records[0].type);
+assertEquals('update', observer.records[0].type);
// The only existing reference to the oldValue object is in this
// record, so to test that lazy creation happened correctly
// we compare its constructor to our function (one of the invariants
@@ -1515,8 +1666,8 @@ assertEquals('updated', observer.records[0].type);
assertSame(fun, observer.records[0].oldValue.constructor);
observer.records.splice(0, 1);
observer.assertCallbackRecords([
- { object: fun, name: 'prototype', type: 'updated', oldValue: myproto },
- { object: fun, name: 'prototype', type: 'updated', oldValue: 7 },
+ { object: fun, name: 'prototype', type: 'update', oldValue: myproto },
+ { object: fun, name: 'prototype', type: 'update', oldValue: 7 },
]);
// Function.prototype should not be observable except on the object itself
@@ -1562,7 +1713,7 @@ function TestFastElements(prop, mutation, prepopulate, polymorphic, optimize) {
setElement(arr, prop, 989898);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
- { object: arr, name: "" + prop, type: 'updated', oldValue: 5 }
+ { object: arr, name: "" + prop, type: 'update', oldValue: 5 }
]);
}
@@ -1619,7 +1770,7 @@ function TestFastElementsLength(
var lengthRecord = observer.records[count];
assertSame(arr, lengthRecord.object);
assertEquals('length', lengthRecord.name);
- assertEquals('updated', lengthRecord.type);
+ assertEquals('update', lengthRecord.type);
assertSame(oldSize, lengthRecord.oldValue);
}
}
diff --git a/deps/v8/test/mjsunit/harmony/private.js b/deps/v8/test/mjsunit/harmony/private.js
new file mode 100644
index 0000000000..09cf7f7408
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private.js
@@ -0,0 +1,324 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-symbols --harmony-collections
+// Flags: --expose-gc --allow-natives-syntax
+
+var symbols = []
+
+// Test different forms of constructor calls, all equivalent.
+function TestNew() {
+ for (var i = 0; i < 2; ++i) {
+ for (var j = 0; j < 5; ++j) {
+ symbols.push(%CreatePrivateSymbol("66"))
+ symbols.push(Object(%CreatePrivateSymbol("66")).valueOf())
+ }
+ gc() // Promote existing symbols and then allocate some more.
+ }
+}
+TestNew()
+
+
+function TestType() {
+ for (var i in symbols) {
+ assertEquals("symbol", typeof symbols[i])
+ assertTrue(typeof symbols[i] === "symbol")
+ assertTrue(%SymbolIsPrivate(symbols[i]))
+ assertEquals(null, %_ClassOf(symbols[i]))
+ assertEquals("Symbol", %_ClassOf(new Symbol(symbols[i])))
+ assertEquals("Symbol", %_ClassOf(Object(symbols[i])))
+ }
+}
+TestType()
+
+
+function TestPrototype() {
+ for (var i in symbols) {
+ assertSame(Symbol.prototype, symbols[i].__proto__)
+ }
+}
+TestPrototype()
+
+
+function TestConstructor() {
+ for (var i in symbols) {
+ assertSame(Symbol, symbols[i].__proto__.constructor)
+ }
+}
+TestConstructor()
+
+
+function TestName() {
+ for (var i in symbols) {
+ var name = symbols[i].name
+ assertTrue(name === "66")
+ }
+}
+TestName()
+
+
+function TestToString() {
+ for (var i in symbols) {
+ assertThrows(function() { String(symbols[i]) }, TypeError)
+ assertThrows(function() { symbols[i] + "" }, TypeError)
+ assertThrows(function() { symbols[i].toString() }, TypeError)
+ assertThrows(function() { (new Symbol(symbols[i])).toString() }, TypeError)
+ assertThrows(function() { Object(symbols[i]).toString() }, TypeError)
+ assertEquals("[object Symbol]", Object.prototype.toString.call(symbols[i]))
+ }
+}
+TestToString()
+
+
+function TestToBoolean() {
+ for (var i in symbols) {
+ assertTrue(Boolean(symbols[i]).valueOf())
+ assertFalse(!symbols[i])
+ assertTrue(!!symbols[i])
+ assertTrue(symbols[i] && true)
+ assertFalse(!symbols[i] && false)
+ assertTrue(!symbols[i] || true)
+ assertEquals(1, symbols[i] ? 1 : 2)
+ assertEquals(2, !symbols[i] ? 1 : 2)
+ if (!symbols[i]) assertUnreachable();
+ if (symbols[i]) {} else assertUnreachable();
+ }
+}
+TestToBoolean()
+
+
+function TestToNumber() {
+ for (var i in symbols) {
+ assertSame(NaN, Number(symbols[i]).valueOf())
+ assertSame(NaN, symbols[i] + 0)
+ }
+}
+TestToNumber()
+
+
+function TestEquality() {
+ // Every symbol should equal itself, and non-strictly equal its wrapper.
+ for (var i in symbols) {
+ assertSame(symbols[i], symbols[i])
+ assertEquals(symbols[i], symbols[i])
+ assertTrue(Object.is(symbols[i], symbols[i]))
+ assertTrue(symbols[i] === symbols[i])
+ assertTrue(symbols[i] == symbols[i])
+ assertFalse(symbols[i] === new Symbol(symbols[i]))
+ assertFalse(new Symbol(symbols[i]) === symbols[i])
+ assertTrue(symbols[i] == new Symbol(symbols[i]))
+ assertTrue(new Symbol(symbols[i]) == symbols[i])
+ }
+
+ // All symbols should be distinct.
+ for (var i = 0; i < symbols.length; ++i) {
+ for (var j = i + 1; j < symbols.length; ++j) {
+ assertFalse(Object.is(symbols[i], symbols[j]))
+ assertFalse(symbols[i] === symbols[j])
+ assertFalse(symbols[i] == symbols[j])
+ }
+ }
+
+ // Symbols should not be equal to any other value (and the test terminates).
+ var values = [347, 1.275, NaN, "string", null, undefined, {}, function() {}]
+ for (var i in symbols) {
+ for (var j in values) {
+ assertFalse(symbols[i] === values[j])
+ assertFalse(values[j] === symbols[i])
+ assertFalse(symbols[i] == values[j])
+ assertFalse(values[j] == symbols[i])
+ }
+ }
+}
+TestEquality()
+
+
+function TestGet() {
+ for (var i in symbols) {
+ assertThrows(function() { symbols[i].toString() }, TypeError)
+ assertEquals(symbols[i], symbols[i].valueOf())
+ assertEquals(undefined, symbols[i].a)
+ assertEquals(undefined, symbols[i]["a" + "b"])
+ assertEquals(undefined, symbols[i]["" + "1"])
+ assertEquals(undefined, symbols[i][62])
+ }
+}
+TestGet()
+
+
+function TestSet() {
+ for (var i in symbols) {
+ symbols[i].toString = 0
+ assertThrows(function() { symbols[i].toString() }, TypeError)
+ symbols[i].valueOf = 0
+ assertEquals(symbols[i], symbols[i].valueOf())
+ symbols[i].a = 0
+ assertEquals(undefined, symbols[i].a)
+ symbols[i]["a" + "b"] = 0
+ assertEquals(undefined, symbols[i]["a" + "b"])
+ symbols[i][62] = 0
+ assertEquals(undefined, symbols[i][62])
+ }
+}
+TestSet()
+
+
+function TestCollections() {
+ var set = new Set
+ var map = new Map
+ var weakmap = new WeakMap
+ for (var i in symbols) {
+ set.add(symbols[i])
+ map.set(symbols[i], i)
+ weakmap.set(symbols[i], i)
+ }
+ assertEquals(symbols.length, set.size)
+ assertEquals(symbols.length, map.size)
+ for (var i in symbols) {
+ assertTrue(set.has(symbols[i]))
+ assertTrue(map.has(symbols[i]))
+ assertTrue(weakmap.has(symbols[i]))
+ assertEquals(i, map.get(symbols[i]))
+ assertEquals(i, weakmap.get(symbols[i]))
+ }
+ for (var i in symbols) {
+ assertTrue(set.delete(symbols[i]))
+ assertTrue(map.delete(symbols[i]))
+ assertTrue(weakmap.delete(symbols[i]))
+ }
+ assertEquals(0, set.size)
+ assertEquals(0, map.size)
+}
+TestCollections()
+
+
+
+function TestKeySet(obj) {
+ assertTrue(%HasFastProperties(obj))
+ // Set the even symbols via assignment.
+ for (var i = 0; i < symbols.length; i += 2) {
+ obj[symbols[i]] = i
+ // Object should remain in fast mode until too many properties were added.
+ assertTrue(%HasFastProperties(obj) || i >= 30)
+ }
+}
+
+
+function TestKeyDefine(obj) {
+ // Set the odd symbols via defineProperty (as non-enumerable).
+ for (var i = 1; i < symbols.length; i += 2) {
+ Object.defineProperty(obj, symbols[i], {value: i, configurable: true})
+ }
+}
+
+
+function TestKeyGet(obj) {
+ var obj2 = Object.create(obj)
+ for (var i in symbols) {
+ assertEquals(i|0, obj[symbols[i]])
+ assertEquals(i|0, obj2[symbols[i]])
+ }
+}
+
+
+function TestKeyHas() {
+ for (var i in symbols) {
+ assertTrue(symbols[i] in obj)
+ assertTrue(Object.hasOwnProperty.call(obj, symbols[i]))
+ }
+}
+
+
+function TestKeyEnum(obj) {
+ for (var name in obj) {
+ assertEquals("string", typeof name)
+ }
+}
+
+
+function TestKeyNames(obj) {
+ assertEquals(0, Object.keys(obj).length)
+
+ var names = Object.getOwnPropertyNames(obj)
+ for (var i in names) {
+ assertEquals("string", typeof names[i])
+ }
+}
+
+
+function TestKeyDescriptor(obj) {
+ for (var i in symbols) {
+ var desc = Object.getOwnPropertyDescriptor(obj, symbols[i]);
+ assertEquals(i|0, desc.value)
+ assertTrue(desc.configurable)
+ assertEquals(i % 2 == 0, desc.writable)
+ assertEquals(i % 2 == 0, desc.enumerable)
+ assertEquals(i % 2 == 0,
+ Object.prototype.propertyIsEnumerable.call(obj, symbols[i]))
+ }
+}
+
+
+function TestKeyDelete(obj) {
+ for (var i in symbols) {
+ delete obj[symbols[i]]
+ }
+ for (var i in symbols) {
+ assertEquals(undefined, Object.getOwnPropertyDescriptor(obj, symbols[i]))
+ }
+}
+
+
+var objs = [{}, [], Object.create(null), Object(1), new Map, function(){}]
+
+for (var i in objs) {
+ var obj = objs[i]
+ TestKeySet(obj)
+ TestKeyDefine(obj)
+ TestKeyGet(obj)
+ TestKeyHas(obj)
+ TestKeyEnum(obj)
+ TestKeyNames(obj)
+ TestKeyDescriptor(obj)
+ TestKeyDelete(obj)
+}
+
+
+function TestCachedKeyAfterScavenge() {
+ gc();
+ // Keyed property lookup are cached. Hereby we assume that the keys are
+ // tenured, so that we only have to clear the cache between mark compacts,
+ // but not between scavenges. This must also apply for symbol keys.
+ var key = Symbol("key");
+ var a = {};
+ a[key] = "abc";
+
+ for (var i = 0; i < 100000; i++) {
+ a[key] += "a"; // Allocations cause a scavenge.
+ }
+}
+TestCachedKeyAfterScavenge();
diff --git a/deps/v8/test/mjsunit/harmony/promises.js b/deps/v8/test/mjsunit/harmony/promises.js
new file mode 100644
index 0000000000..38ccd7fb2b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/promises.js
@@ -0,0 +1,791 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-promises --harmony-observation --allow-natives-syntax
+
+var asyncAssertsExpected = 0;
+
+function assertAsyncRan() { ++asyncAssertsExpected }
+
+function assertAsync(b, s) {
+ if (b) {
+ print(s, "succeeded")
+ } else {
+ %AbortJS(s + " FAILED!") // Simply throwing here will have no effect.
+ }
+ --asyncAssertsExpected
+}
+
+function assertAsyncDone(iteration) {
+ var iteration = iteration || 0
+ var dummy = {}
+ Object.observe(dummy,
+ function() {
+ if (asyncAssertsExpected === 0)
+ assertAsync(true, "all")
+ else if (iteration > 10) // Shouldn't take more.
+ assertAsync(false, "all")
+ else
+ assertAsyncDone(iteration + 1)
+ }
+ )
+ dummy.dummy = dummy
+}
+
+
+(function() {
+ assertThrows(function() { Promise(function() {}) }, TypeError)
+})();
+
+(function() {
+ assertTrue(new Promise(function() {}) instanceof Promise)
+})();
+
+(function() {
+ assertThrows(function() { new Promise(5) }, TypeError)
+})();
+
+(function() {
+ assertDoesNotThrow(function() { new Promise(function() { throw 5 }) })
+})();
+
+(function() {
+ (new Promise(function() { throw 5 })).chain(
+ assertUnreachable,
+ function(r) { assertAsync(r === 5, "new-throw") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ Promise.resolve(5);
+ Promise.resolve(5).chain(undefined, assertUnreachable).chain(
+ function(x) { assertAsync(x === 5, "resolved/chain-nohandler") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ Promise.reject(5).chain(assertUnreachable, undefined).chain(
+ assertUnreachable,
+ function(r) { assertAsync(r === 5, "rejected/chain-nohandler") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ Promise.resolve(5).then(undefined, assertUnreachable).chain(
+ function(x) { assertAsync(x === 5, "resolved/then-nohandler") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ Promise.reject(5).then(assertUnreachable, undefined).chain(
+ assertUnreachable,
+ function(r) { assertAsync(r === 5, "rejected/then-nohandler") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.chain(
+ function(x) { assertAsync(x === p2, "resolved/chain") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(
+ function(x) { assertAsync(x === 5, "resolved/then") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.reject(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.chain(
+ function(x) { assertAsync(x === p2, "rejected/chain") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.reject(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(
+ assertUnreachable,
+ function(x) { assertAsync(x === 5, "rejected/then") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.chain(function(x) { return x }, assertUnreachable).chain(
+ function(x) { assertAsync(x === p1, "resolved/chain/chain") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.chain(function(x) { return x }, assertUnreachable).then(
+ function(x) { assertAsync(x === 5, "resolved/chain/then") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.chain(function(x) { return 6 }, assertUnreachable).chain(
+ function(x) { assertAsync(x === 6, "resolved/chain/chain2") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.chain(function(x) { return 6 }, assertUnreachable).then(
+ function(x) { assertAsync(x === 6, "resolved/chain/then2") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(function(x) { return x + 1 }, assertUnreachable).chain(
+ function(x) { assertAsync(x === 6, "resolved/then/chain") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(function(x) { return x + 1 }, assertUnreachable).then(
+ function(x) { assertAsync(x === 6, "resolved/then/then") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(function(x){ return Promise.resolve(x+1) }, assertUnreachable).chain(
+ function(x) { assertAsync(x === 6, "resolved/then/chain2") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(function(x) { return Promise.resolve(x+1) }, assertUnreachable).then(
+ function(x) { assertAsync(x === 6, "resolved/then/then2") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.chain(function(x) { throw 6 }, assertUnreachable).chain(
+ assertUnreachable,
+ function(x) { assertAsync(x === 6, "resolved/chain-throw/chain") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.chain(function(x) { throw 6 }, assertUnreachable).then(
+ assertUnreachable,
+ function(x) { assertAsync(x === 6, "resolved/chain-throw/then") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(function(x) { throw 6 }, assertUnreachable).chain(
+ assertUnreachable,
+ function(x) { assertAsync(x === 6, "resolved/then-throw/chain") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(function(x) { throw 6 }, assertUnreachable).then(
+ assertUnreachable,
+ function(x) { assertAsync(x === 6, "resolved/then-throw/then") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
+ var p3 = Promise.resolve(p2)
+ p3.chain(
+ function(x) { assertAsync(x === p2, "resolved/thenable/chain") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
+ var p3 = Promise.resolve(p2)
+ p3.then(
+ function(x) { assertAsync(x === 5, "resolved/thenable/then") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.reject(5)
+ var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
+ var p3 = Promise.resolve(p2)
+ p3.chain(
+ function(x) { assertAsync(x === p2, "rejected/thenable/chain") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.reject(5)
+ var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
+ var p3 = Promise.resolve(p2)
+ p3.then(
+ assertUnreachable,
+ function(x) { assertAsync(x === 5, "rejected/thenable/then") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred = Promise.defer()
+ var p1 = deferred.promise
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.chain(
+ function(x) { assertAsync(x === p2, "chain/resolve") },
+ assertUnreachable
+ )
+ deferred.resolve(5)
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred = Promise.defer()
+ var p1 = deferred.promise
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(
+ function(x) { assertAsync(x === 5, "then/resolve") },
+ assertUnreachable
+ )
+ deferred.resolve(5)
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred = Promise.defer()
+ var p1 = deferred.promise
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.chain(
+ function(x) { assertAsync(x === p2, "chain/reject") },
+ assertUnreachable
+ )
+ deferred.reject(5)
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred = Promise.defer()
+ var p1 = deferred.promise
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(
+ assertUnreachable,
+ function(x) { assertAsync(x === 5, "then/reject") }
+ )
+ deferred.reject(5)
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred = Promise.defer()
+ var p1 = deferred.promise
+ var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
+ var p3 = Promise.resolve(p2)
+ p3.chain(
+ function(x) { assertAsync(x === p2, "chain/resolve/thenable") },
+ assertUnreachable
+ )
+ deferred.resolve(5)
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred = Promise.defer()
+ var p1 = deferred.promise
+ var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
+ var p3 = Promise.resolve(p2)
+ p3.then(
+ function(x) { assertAsync(x === 5, "then/resolve/thenable") },
+ assertUnreachable
+ )
+ deferred.resolve(5)
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred = Promise.defer()
+ var p1 = deferred.promise
+ var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
+ var p3 = Promise.resolve(p2)
+ p3.chain(
+ function(x) { assertAsync(x === p2, "chain/reject/thenable") },
+ assertUnreachable
+ )
+ deferred.reject(5)
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred = Promise.defer()
+ var p1 = deferred.promise
+ var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
+ var p3 = Promise.resolve(p2)
+ p3.then(
+ assertUnreachable,
+ function(x) { assertAsync(x === 5, "then/reject/thenable") }
+ )
+ deferred.reject(5)
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var deferred = Promise.defer()
+ var p3 = deferred.promise
+ p3.chain(
+ function(x) { assertAsync(x === p2, "chain/resolve2") },
+ assertUnreachable
+ )
+ deferred.resolve(p2)
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var deferred = Promise.defer()
+ var p3 = deferred.promise
+ p3.then(
+ function(x) { assertAsync(x === 5, "then/resolve2") },
+ assertUnreachable
+ )
+ deferred.resolve(p2)
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var deferred = Promise.defer()
+ var p3 = deferred.promise
+ p3.chain(
+ assertUnreachable,
+ function(x) { assertAsync(x === 5, "chain/reject2") }
+ )
+ deferred.reject(5)
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var deferred = Promise.defer()
+ var p3 = deferred.promise
+ p3.then(
+ assertUnreachable,
+ function(x) { assertAsync(x === 5, "then/reject2") }
+ )
+ deferred.reject(5)
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
+ var deferred = Promise.defer()
+ var p3 = deferred.promise
+ p3.chain(
+ function(x) { assertAsync(x === p2, "chain/resolve/thenable2") },
+ assertUnreachable
+ )
+ deferred.resolve(p2)
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(5)
+ var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
+ var deferred = Promise.defer()
+ var p3 = deferred.promise
+ p3.then(
+ function(x) { assertAsync(x === 5, "then/resolve/thenable2") },
+ assertUnreachable
+ )
+ deferred.resolve(p2)
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(0)
+ var p2 = p1.chain(function(x) { return p2 }, assertUnreachable)
+ p2.chain(
+ assertUnreachable,
+ function(r) { assertAsync(r instanceof TypeError, "cyclic/chain") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(0)
+ var p2 = p1.then(function(x) { return p2 }, assertUnreachable)
+ p2.chain(
+ assertUnreachable,
+ function(r) { assertAsync(r instanceof TypeError, "cyclic/then") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred = Promise.defer()
+ var p = deferred.promise
+ deferred.resolve(p)
+ p.chain(
+ function(x) { assertAsync(x === p, "cyclic/deferred/chain") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred = Promise.defer()
+ var p = deferred.promise
+ deferred.resolve(p)
+ p.then(
+ assertUnreachable,
+ function(r) { assertAsync(r instanceof TypeError, "cyclic/deferred/then") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ Promise.all({get length() { throw 666 }}).chain(
+ assertUnreachable,
+ function(r) { assertAsync(r === 666, "all/no-array") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ Promise.all([]).chain(
+ function(x) { assertAsync(x.length === 0, "all/resolve/empty") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred1 = Promise.defer()
+ var p1 = deferred1.promise
+ var deferred2 = Promise.defer()
+ var p2 = deferred2.promise
+ var deferred3 = Promise.defer()
+ var p3 = deferred3.promise
+ Promise.all([p1, p2, p3]).chain(
+ function(x) {
+ assertAsync(x.length === 3, "all/resolve")
+ assertAsync(x[0] === 1, "all/resolve/0")
+ assertAsync(x[1] === 2, "all/resolve/1")
+ assertAsync(x[2] === 3, "all/resolve/2")
+ },
+ assertUnreachable
+ )
+ deferred1.resolve(1)
+ deferred3.resolve(3)
+ deferred2.resolve(2)
+ assertAsyncRan()
+ assertAsyncRan()
+ assertAsyncRan()
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred = Promise.defer()
+ var p1 = deferred.promise
+ var p2 = Promise.resolve(2)
+ var p3 = Promise.defer().promise
+ Promise.all([p1, p2, p3]).chain(
+ assertUnreachable,
+ assertUnreachable
+ )
+ deferred.resolve(1)
+})();
+
+(function() {
+ var deferred1 = Promise.defer()
+ var p1 = deferred1.promise
+ var deferred2 = Promise.defer()
+ var p2 = deferred2.promise
+ var deferred3 = Promise.defer()
+ var p3 = deferred3.promise
+ Promise.all([p1, p2, p3]).chain(
+ assertUnreachable,
+ function(x) { assertAsync(x === 2, "all/reject") }
+ )
+ deferred1.resolve(1)
+ deferred3.resolve(3)
+ deferred2.reject(2)
+ assertAsyncRan()
+})();
+
+(function() {
+ Promise.race([]).chain(
+ assertUnreachable,
+ assertUnreachable
+ )
+})();
+
+(function() {
+ var p1 = Promise.resolve(1)
+ var p2 = Promise.resolve(2)
+ var p3 = Promise.resolve(3)
+ Promise.race([p1, p2, p3]).chain(
+ function(x) { assertAsync(x === 1, "resolved/one") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var p1 = Promise.resolve(1)
+ var p2 = Promise.resolve(2)
+ var p3 = Promise.resolve(3)
+ Promise.race([0, p1, p2, p3]).chain(
+ function(x) { assertAsync(x === 0, "resolved-const/one") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ Promise.race({get length() { throw 666 }}).chain(
+ assertUnreachable,
+ function(r) { assertAsync(r === 666, "one/no-array") }
+ )
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred1 = Promise.defer()
+ var p1 = deferred1.promise
+ var deferred2 = Promise.defer()
+ var p2 = deferred2.promise
+ var deferred3 = Promise.defer()
+ var p3 = deferred3.promise
+ Promise.race([p1, p2, p3]).chain(
+ function(x) { assertAsync(x === 3, "one/resolve") },
+ assertUnreachable
+ )
+ deferred3.resolve(3)
+ deferred1.resolve(1)
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred = Promise.defer()
+ var p1 = deferred.promise
+ var p2 = Promise.resolve(2)
+ var p3 = Promise.defer().promise
+ Promise.race([p1, p2, p3]).chain(
+ function(x) { assertAsync(x === 2, "resolved/one") },
+ assertUnreachable
+ )
+ deferred.resolve(1)
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred1 = Promise.defer()
+ var p1 = deferred1.promise
+ var deferred2 = Promise.defer()
+ var p2 = deferred2.promise
+ var deferred3 = Promise.defer()
+ var p3 = deferred3.promise
+ Promise.race([p1, p2, p3]).chain(
+ function(x) { assertAsync(x === 3, "one/resolve/reject") },
+ assertUnreachable
+ )
+ deferred3.resolve(3)
+ deferred1.reject(1)
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred1 = Promise.defer()
+ var p1 = deferred1.promise
+ var deferred2 = Promise.defer()
+ var p2 = deferred2.promise
+ var deferred3 = Promise.defer()
+ var p3 = deferred3.promise
+ Promise.race([p1, p2, p3]).chain(
+ assertUnreachable,
+ function(x) { assertAsync(x === 3, "one/reject/resolve") }
+ )
+ deferred3.reject(3)
+ deferred1.resolve(1)
+ assertAsyncRan()
+})();
+
+(function() {
+ var log
+ function MyPromise(resolver) {
+ log += "n"
+ var promise = new Promise(function(resolve, reject) {
+ resolver(
+ function(x) { log += "x" + x; resolve(x) },
+ function(r) { log += "r" + r; reject(r) }
+ )
+ })
+ promise.__proto__ = MyPromise.prototype
+ return promise
+ }
+
+ MyPromise.__proto__ = Promise
+ MyPromise.defer = function() {
+ log += "d"
+ return this.__proto__.defer.call(this)
+ }
+
+ MyPromise.prototype.__proto__ = Promise.prototype
+ MyPromise.prototype.chain = function(resolve, reject) {
+ log += "c"
+ return this.__proto__.__proto__.chain.call(this, resolve, reject)
+ }
+
+ log = ""
+ var p1 = new MyPromise(function(resolve, reject) { resolve(1) })
+ var p2 = new MyPromise(function(resolve, reject) { reject(2) })
+ var d3 = MyPromise.defer()
+ assertTrue(d3.promise instanceof Promise, "subclass/instance")
+ assertTrue(d3.promise instanceof MyPromise, "subclass/instance-my3")
+ assertTrue(log === "nx1nr2dn", "subclass/create")
+
+ log = ""
+ var p4 = MyPromise.resolve(4)
+ var p5 = MyPromise.reject(5)
+ assertTrue(p4 instanceof Promise, "subclass/instance4")
+ assertTrue(p4 instanceof MyPromise, "subclass/instance-my4")
+ assertTrue(p5 instanceof Promise, "subclass/instance5")
+ assertTrue(p5 instanceof MyPromise, "subclass/instance-my5")
+ d3.resolve(3)
+ assertTrue(log === "nx4nr5x3", "subclass/resolve")
+
+ log = ""
+ var d6 = MyPromise.defer()
+ d6.promise.chain(function(x) {
+ return new Promise(function(resolve) { resolve(x) })
+ }).chain(function() {})
+ d6.resolve(6)
+ assertTrue(log === "dncncnx6", "subclass/chain")
+
+ log = ""
+ Promise.all([11, Promise.resolve(12), 13, MyPromise.resolve(14), 15, 16])
+ assertTrue(log === "nx14cn", "subclass/all/arg")
+
+ log = ""
+ MyPromise.all([21, Promise.resolve(22), 23, MyPromise.resolve(24), 25, 26])
+ assertTrue(log === "nx24nnx21cnnx23cncnnx25cnnx26cn", "subclass/all/self")
+})();
+
+
+assertAsyncDone()
diff --git a/deps/v8/test/mjsunit/harmony/proxies-function.js b/deps/v8/test/mjsunit/harmony/proxies-function.js
index 6b8d098442..7b07d42423 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-function.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-function.js
@@ -53,8 +53,7 @@ var receiver
function TestCall(isStrict, callTrap) {
assertEquals(42, callTrap(5, 37))
- // TODO(rossberg): strict mode seems to be broken on x64...
- // assertSame(isStrict ? undefined : global_object, receiver)
+ assertSame(isStrict ? undefined : global_object, receiver)
var handler = {
get: function(r, k) {
@@ -67,8 +66,7 @@ function TestCall(isStrict, callTrap) {
receiver = 333
assertEquals(42, f(11, 31))
- // TODO(rossberg): strict mode seems to be broken on x64...
- // assertSame(isStrict ? undefined : global_object, receiver)
+ assertSame(isStrict ? undefined : global_object, receiver)
receiver = 333
assertEquals(42, o.f(10, 32))
assertSame(o, receiver)
@@ -746,3 +744,31 @@ function TestCalls() {
TestCalls()
*/
+
+var realms = [Realm.create(), Realm.create()];
+Realm.shared = {};
+
+Realm.eval(realms[0], "function f() { return this; };");
+Realm.eval(realms[0], "Realm.shared.f = f;");
+Realm.eval(realms[0], "Realm.shared.fg = this;");
+Realm.eval(realms[1], "function g() { return this; };");
+Realm.eval(realms[1], "Realm.shared.g = g;");
+Realm.eval(realms[1], "Realm.shared.gg = this;");
+
+var fp = Proxy.createFunction({}, Realm.shared.f);
+var gp = Proxy.createFunction({}, Realm.shared.g);
+
+for (var i = 0; i < 10; i++) {
+ assertEquals(Realm.shared.fg, fp());
+ assertEquals(Realm.shared.gg, gp());
+
+ with (this) {
+ assertEquals(this, fp());
+ assertEquals(this, gp());
+ }
+
+ with ({}) {
+ assertEquals(Realm.shared.fg, fp());
+ assertEquals(Realm.shared.gg, gp());
+ }
+}
diff --git a/deps/v8/test/mjsunit/harmony/proxies.js b/deps/v8/test/mjsunit/harmony/proxies.js
index f68e3bd157..d26ce1d149 100644
--- a/deps/v8/test/mjsunit/harmony/proxies.js
+++ b/deps/v8/test/mjsunit/harmony/proxies.js
@@ -25,7 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies
+// We change the stack size for the A64 simulator because at one point this test
+// enters an infinite recursion which goes through the runtime and we overflow
+// the system stack before the simulator stack.
+
+// Flags: --harmony-proxies --sim-stack-size=500
// Helper.
diff --git a/deps/v8/test/mjsunit/harmony/string-endswith.js b/deps/v8/test/mjsunit/harmony/string-endswith.js
index 128cf1d023..cc76b5fe4e 100644
--- a/deps/v8/test/mjsunit/harmony/string-endswith.js
+++ b/deps/v8/test/mjsunit/harmony/string-endswith.js
@@ -1,4 +1,4 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -66,8 +66,6 @@ var TEST_INPUT = [{
}, {
msg: "Boolean false", val: false
}, {
- msg: "Regular expression /\d+/", val: /\d+/
-}, {
msg: "Empty array []", val: []
}, {
msg: "Empty object {}", val: {}
@@ -134,3 +132,281 @@ assertTrue("abc".endsWith("bc", undefined));
assertFalse("abc".endsWith("bc", -43));
assertFalse("abc".endsWith("bc", -Infinity));
assertFalse("abc".endsWith("bc", NaN));
+
+// Test cases taken from
+// https://github.com/mathiasbynens/String.prototype.endsWith/blob/master/tests/tests.js
+Object.prototype[1] = 2; // try to break `arguments[1]`
+
+assertEquals(String.prototype.endsWith.length, 1);
+assertEquals(String.prototype.propertyIsEnumerable("endsWith"), false);
+
+assertEquals("undefined".endsWith(), true);
+assertEquals("undefined".endsWith(undefined), true);
+assertEquals("undefined".endsWith(null), false);
+assertEquals("null".endsWith(), false);
+assertEquals("null".endsWith(undefined), false);
+assertEquals("null".endsWith(null), true);
+
+assertEquals("abc".endsWith(), false);
+assertEquals("abc".endsWith(""), true);
+assertEquals("abc".endsWith("\0"), false);
+assertEquals("abc".endsWith("c"), true);
+assertEquals("abc".endsWith("b"), false);
+assertEquals("abc".endsWith("ab"), false);
+assertEquals("abc".endsWith("bc"), true);
+assertEquals("abc".endsWith("abc"), true);
+assertEquals("abc".endsWith("bcd"), false);
+assertEquals("abc".endsWith("abcd"), false);
+assertEquals("abc".endsWith("bcde"), false);
+
+assertEquals("abc".endsWith("", NaN), true);
+assertEquals("abc".endsWith("\0", NaN), false);
+assertEquals("abc".endsWith("c", NaN), false);
+assertEquals("abc".endsWith("b", NaN), false);
+assertEquals("abc".endsWith("ab", NaN), false);
+assertEquals("abc".endsWith("bc", NaN), false);
+assertEquals("abc".endsWith("abc", NaN), false);
+assertEquals("abc".endsWith("bcd", NaN), false);
+assertEquals("abc".endsWith("abcd", NaN), false);
+assertEquals("abc".endsWith("bcde", NaN), false);
+
+assertEquals("abc".endsWith("", false), true);
+assertEquals("abc".endsWith("\0", false), false);
+assertEquals("abc".endsWith("c", false), false);
+assertEquals("abc".endsWith("b", false), false);
+assertEquals("abc".endsWith("ab", false), false);
+assertEquals("abc".endsWith("bc", false), false);
+assertEquals("abc".endsWith("abc", false), false);
+assertEquals("abc".endsWith("bcd", false), false);
+assertEquals("abc".endsWith("abcd", false), false);
+assertEquals("abc".endsWith("bcde", false), false);
+
+assertEquals("abc".endsWith("", undefined), true);
+assertEquals("abc".endsWith("\0", undefined), false);
+assertEquals("abc".endsWith("c", undefined), true);
+assertEquals("abc".endsWith("b", undefined), false);
+assertEquals("abc".endsWith("ab", undefined), false);
+assertEquals("abc".endsWith("bc", undefined), true);
+assertEquals("abc".endsWith("abc", undefined), true);
+assertEquals("abc".endsWith("bcd", undefined), false);
+assertEquals("abc".endsWith("abcd", undefined), false);
+assertEquals("abc".endsWith("bcde", undefined), false);
+
+assertEquals("abc".endsWith("", null), true);
+assertEquals("abc".endsWith("\0", null), false);
+assertEquals("abc".endsWith("c", null), false);
+assertEquals("abc".endsWith("b", null), false);
+assertEquals("abc".endsWith("ab", null), false);
+assertEquals("abc".endsWith("bc", null), false);
+assertEquals("abc".endsWith("abc", null), false);
+assertEquals("abc".endsWith("bcd", null), false);
+assertEquals("abc".endsWith("abcd", null), false);
+assertEquals("abc".endsWith("bcde", null), false);
+
+assertEquals("abc".endsWith("", -Infinity), true);
+assertEquals("abc".endsWith("\0", -Infinity), false);
+assertEquals("abc".endsWith("c", -Infinity), false);
+assertEquals("abc".endsWith("b", -Infinity), false);
+assertEquals("abc".endsWith("ab", -Infinity), false);
+assertEquals("abc".endsWith("bc", -Infinity), false);
+assertEquals("abc".endsWith("abc", -Infinity), false);
+assertEquals("abc".endsWith("bcd", -Infinity), false);
+assertEquals("abc".endsWith("abcd", -Infinity), false);
+assertEquals("abc".endsWith("bcde", -Infinity), false);
+
+assertEquals("abc".endsWith("", -1), true);
+assertEquals("abc".endsWith("\0", -1), false);
+assertEquals("abc".endsWith("c", -1), false);
+assertEquals("abc".endsWith("b", -1), false);
+assertEquals("abc".endsWith("ab", -1), false);
+assertEquals("abc".endsWith("bc", -1), false);
+assertEquals("abc".endsWith("abc", -1), false);
+assertEquals("abc".endsWith("bcd", -1), false);
+assertEquals("abc".endsWith("abcd", -1), false);
+assertEquals("abc".endsWith("bcde", -1), false);
+
+assertEquals("abc".endsWith("", -0), true);
+assertEquals("abc".endsWith("\0", -0), false);
+assertEquals("abc".endsWith("c", -0), false);
+assertEquals("abc".endsWith("b", -0), false);
+assertEquals("abc".endsWith("ab", -0), false);
+assertEquals("abc".endsWith("bc", -0), false);
+assertEquals("abc".endsWith("abc", -0), false);
+assertEquals("abc".endsWith("bcd", -0), false);
+assertEquals("abc".endsWith("abcd", -0), false);
+assertEquals("abc".endsWith("bcde", -0), false);
+
+assertEquals("abc".endsWith("", +0), true);
+assertEquals("abc".endsWith("\0", +0), false);
+assertEquals("abc".endsWith("c", +0), false);
+assertEquals("abc".endsWith("b", +0), false);
+assertEquals("abc".endsWith("ab", +0), false);
+assertEquals("abc".endsWith("bc", +0), false);
+assertEquals("abc".endsWith("abc", +0), false);
+assertEquals("abc".endsWith("bcd", +0), false);
+assertEquals("abc".endsWith("abcd", +0), false);
+assertEquals("abc".endsWith("bcde", +0), false);
+
+assertEquals("abc".endsWith("", 1), true);
+assertEquals("abc".endsWith("\0", 1), false);
+assertEquals("abc".endsWith("c", 1), false);
+assertEquals("abc".endsWith("b", 1), false);
+assertEquals("abc".endsWith("ab", 1), false);
+assertEquals("abc".endsWith("bc", 1), false);
+assertEquals("abc".endsWith("abc", 1), false);
+assertEquals("abc".endsWith("bcd", 1), false);
+assertEquals("abc".endsWith("abcd", 1), false);
+assertEquals("abc".endsWith("bcde", 1), false);
+
+assertEquals("abc".endsWith("", 2), true);
+assertEquals("abc".endsWith("\0", 2), false);
+assertEquals("abc".endsWith("c", 2), false);
+assertEquals("abc".endsWith("b", 2), true);
+assertEquals("abc".endsWith("ab", 2), true);
+assertEquals("abc".endsWith("bc", 2), false);
+assertEquals("abc".endsWith("abc", 2), false);
+assertEquals("abc".endsWith("bcd", 2), false);
+assertEquals("abc".endsWith("abcd", 2), false);
+assertEquals("abc".endsWith("bcde", 2), false);
+
+assertEquals("abc".endsWith("", +Infinity), true);
+assertEquals("abc".endsWith("\0", +Infinity), false);
+assertEquals("abc".endsWith("c", +Infinity), true);
+assertEquals("abc".endsWith("b", +Infinity), false);
+assertEquals("abc".endsWith("ab", +Infinity), false);
+assertEquals("abc".endsWith("bc", +Infinity), true);
+assertEquals("abc".endsWith("abc", +Infinity), true);
+assertEquals("abc".endsWith("bcd", +Infinity), false);
+assertEquals("abc".endsWith("abcd", +Infinity), false);
+assertEquals("abc".endsWith("bcde", +Infinity), false);
+
+assertEquals("abc".endsWith("", true), true);
+assertEquals("abc".endsWith("\0", true), false);
+assertEquals("abc".endsWith("c", true), false);
+assertEquals("abc".endsWith("b", true), false);
+assertEquals("abc".endsWith("ab", true), false);
+assertEquals("abc".endsWith("bc", true), false);
+assertEquals("abc".endsWith("abc", true), false);
+assertEquals("abc".endsWith("bcd", true), false);
+assertEquals("abc".endsWith("abcd", true), false);
+assertEquals("abc".endsWith("bcde", true), false);
+
+assertEquals("abc".endsWith("", "x"), true);
+assertEquals("abc".endsWith("\0", "x"), false);
+assertEquals("abc".endsWith("c", "x"), false);
+assertEquals("abc".endsWith("b", "x"), false);
+assertEquals("abc".endsWith("ab", "x"), false);
+assertEquals("abc".endsWith("bc", "x"), false);
+assertEquals("abc".endsWith("abc", "x"), false);
+assertEquals("abc".endsWith("bcd", "x"), false);
+assertEquals("abc".endsWith("abcd", "x"), false);
+assertEquals("abc".endsWith("bcde", "x"), false);
+
+assertEquals("[a-z]+(bar)?".endsWith("(bar)?"), true);
+assertThrows(function() { "[a-z]+(bar)?".endsWith(/(bar)?/);
+}, TypeError);
+assertEquals("[a-z]+(bar)?".endsWith("[a-z]+", 6), true);
+assertThrows(function() { "[a-z]+(bar)?".endsWith(/(bar)?/);
+}, TypeError);
+assertThrows(function() { "[a-z]+/(bar)?/".endsWith(/(bar)?/);
+}, TypeError);
+
+// http://mathiasbynens.be/notes/javascript-unicode#poo-test
+var string = "I\xF1t\xEBrn\xE2ti\xF4n\xE0liz\xE6ti\xF8n\u2603\uD83D\uDCA9";
+assertEquals(string.endsWith(""), true);
+assertEquals(string.endsWith("\xF1t\xEBr"), false);
+assertEquals(string.endsWith("\xF1t\xEBr", 5), true);
+assertEquals(string.endsWith("\xE0liz\xE6"), false);
+assertEquals(string.endsWith("\xE0liz\xE6", 16), true);
+assertEquals(string.endsWith("\xF8n\u2603\uD83D\uDCA9"), true);
+assertEquals(string.endsWith("\xF8n\u2603\uD83D\uDCA9", 23), true);
+assertEquals(string.endsWith("\u2603"), false);
+assertEquals(string.endsWith("\u2603", 21), true);
+assertEquals(string.endsWith("\uD83D\uDCA9"), true);
+assertEquals(string.endsWith("\uD83D\uDCA9", 23), true);
+
+assertThrows(function() {
+ String.prototype.endsWith.call(undefined);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.endsWith.call(undefined, "b");
+}, TypeError);
+assertThrows(function() {
+ String.prototype.endsWith.call(undefined, "b", 4);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.endsWith.call(null);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.endsWith.call(null, "b");
+}, TypeError);
+assertThrows(function() {
+ String.prototype.endsWith.call(null, "b", 4);
+}, TypeError);
+assertEquals(String.prototype.endsWith.call(42, "2"), true);
+assertEquals(String.prototype.endsWith.call(42, "4"), false);
+assertEquals(String.prototype.endsWith.call(42, "b", 4), false);
+assertEquals(String.prototype.endsWith.call(42, "2", 1), false);
+assertEquals(String.prototype.endsWith.call(42, "2", 4), true);
+assertEquals(String.prototype.endsWith.call({
+ "toString": function() { return "abc"; }
+}, "b", 0), false);
+assertEquals(String.prototype.endsWith.call({
+ "toString": function() { return "abc"; }
+}, "b", 1), false);
+assertEquals(String.prototype.endsWith.call({
+ "toString": function() { return "abc"; }
+}, "b", 2), true);
+assertThrows(function() {
+ String.prototype.endsWith.call({
+ "toString": function() { throw RangeError(); }
+ }, /./);
+}, RangeError);
+assertThrows(function() {
+ String.prototype.endsWith.call({
+ "toString": function() { return "abc"; }
+ }, /./);
+}, TypeError);
+
+assertThrows(function() {
+ String.prototype.endsWith.apply(undefined);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.endsWith.apply(undefined, ["b"]); },
+TypeError);
+assertThrows(function() {
+ String.prototype.endsWith.apply(undefined, ["b", 4]);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.endsWith.apply(null);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.endsWith.apply(null, ["b"]);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.endsWith.apply(null, ["b", 4]);
+}, TypeError);
+assertEquals(String.prototype.endsWith.apply(42, ["2"]), true);
+assertEquals(String.prototype.endsWith.apply(42, ["4"]), false);
+assertEquals(String.prototype.endsWith.apply(42, ["b", 4]), false);
+assertEquals(String.prototype.endsWith.apply(42, ["2", 1]), false);
+assertEquals(String.prototype.endsWith.apply(42, ["2", 4]), true);
+assertEquals(String.prototype.endsWith.apply({
+ "toString": function() { return "abc"; }
+}, ["b", 0]), false);
+assertEquals(String.prototype.endsWith.apply({
+ "toString": function() { return "abc"; }
+}, ["b", 1]), false);
+assertEquals(String.prototype.endsWith.apply({
+ "toString": function() { return "abc"; }
+}, ["b", 2]), true);
+assertThrows(function() {
+ String.prototype.endsWith.apply({
+ "toString": function() { throw RangeError(); }
+ }, [/./]);
+}, RangeError);
+assertThrows(function() {
+ String.prototype.endsWith.apply({
+ "toString": function() { return "abc"; }
+ }, [/./]);
+}, TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/string-repeat.js b/deps/v8/test/mjsunit/harmony/string-repeat.js
index 182e5c0e0e..761089b1d0 100644
--- a/deps/v8/test/mjsunit/harmony/string-repeat.js
+++ b/deps/v8/test/mjsunit/harmony/string-repeat.js
@@ -71,4 +71,4 @@ var myobj = {
repeat : String.prototype.repeat
};
assertEquals("abc", myobj.repeat(1));
-assertEquals("abcabc", myobj.repeat(2)); \ No newline at end of file
+assertEquals("abcabc", myobj.repeat(2));
diff --git a/deps/v8/test/mjsunit/harmony/string-startswith.js b/deps/v8/test/mjsunit/harmony/string-startswith.js
index 60c85d31b3..d72f2946f5 100644
--- a/deps/v8/test/mjsunit/harmony/string-startswith.js
+++ b/deps/v8/test/mjsunit/harmony/string-startswith.js
@@ -1,4 +1,4 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -66,8 +66,6 @@ var TEST_INPUT = [{
}, {
msg: "Boolean false", val: false
}, {
- msg: "Regular expression /\d+/", val: /\d+/
-}, {
msg: "Empty array []", val: []
}, {
msg: "Empty object {}", val: {}
@@ -133,3 +131,273 @@ assertTrue("abc".startsWith("ab", -43));
assertTrue("abc".startsWith("ab", -Infinity));
assertFalse("abc".startsWith("bc", -42));
assertFalse("abc".startsWith("bc", -Infinity));
+
+// Test cases taken from
+// https://github.com/mathiasbynens/String.prototype.startsWith/blob/master/tests/tests.js
+Object.prototype[1] = 2; // try to break `arguments[1]`
+
+assertEquals(String.prototype.startsWith.length, 1);
+assertEquals(String.prototype.propertyIsEnumerable("startsWith"), false);
+
+assertEquals("undefined".startsWith(), true);
+assertEquals("undefined".startsWith(undefined), true);
+assertEquals("undefined".startsWith(null), false);
+assertEquals("null".startsWith(), false);
+assertEquals("null".startsWith(undefined), false);
+assertEquals("null".startsWith(null), true);
+
+assertEquals("abc".startsWith(), false);
+assertEquals("abc".startsWith(""), true);
+assertEquals("abc".startsWith("\0"), false);
+assertEquals("abc".startsWith("a"), true);
+assertEquals("abc".startsWith("b"), false);
+assertEquals("abc".startsWith("ab"), true);
+assertEquals("abc".startsWith("bc"), false);
+assertEquals("abc".startsWith("abc"), true);
+assertEquals("abc".startsWith("bcd"), false);
+assertEquals("abc".startsWith("abcd"), false);
+assertEquals("abc".startsWith("bcde"), false);
+
+assertEquals("abc".startsWith("", NaN), true);
+assertEquals("abc".startsWith("\0", NaN), false);
+assertEquals("abc".startsWith("a", NaN), true);
+assertEquals("abc".startsWith("b", NaN), false);
+assertEquals("abc".startsWith("ab", NaN), true);
+assertEquals("abc".startsWith("bc", NaN), false);
+assertEquals("abc".startsWith("abc", NaN), true);
+assertEquals("abc".startsWith("bcd", NaN), false);
+assertEquals("abc".startsWith("abcd", NaN), false);
+assertEquals("abc".startsWith("bcde", NaN), false);
+
+assertEquals("abc".startsWith("", false), true);
+assertEquals("abc".startsWith("\0", false), false);
+assertEquals("abc".startsWith("a", false), true);
+assertEquals("abc".startsWith("b", false), false);
+assertEquals("abc".startsWith("ab", false), true);
+assertEquals("abc".startsWith("bc", false), false);
+assertEquals("abc".startsWith("abc", false), true);
+assertEquals("abc".startsWith("bcd", false), false);
+assertEquals("abc".startsWith("abcd", false), false);
+assertEquals("abc".startsWith("bcde", false), false);
+
+assertEquals("abc".startsWith("", undefined), true);
+assertEquals("abc".startsWith("\0", undefined), false);
+assertEquals("abc".startsWith("a", undefined), true);
+assertEquals("abc".startsWith("b", undefined), false);
+assertEquals("abc".startsWith("ab", undefined), true);
+assertEquals("abc".startsWith("bc", undefined), false);
+assertEquals("abc".startsWith("abc", undefined), true);
+assertEquals("abc".startsWith("bcd", undefined), false);
+assertEquals("abc".startsWith("abcd", undefined), false);
+assertEquals("abc".startsWith("bcde", undefined), false);
+
+assertEquals("abc".startsWith("", null), true);
+assertEquals("abc".startsWith("\0", null), false);
+assertEquals("abc".startsWith("a", null), true);
+assertEquals("abc".startsWith("b", null), false);
+assertEquals("abc".startsWith("ab", null), true);
+assertEquals("abc".startsWith("bc", null), false);
+assertEquals("abc".startsWith("abc", null), true);
+assertEquals("abc".startsWith("bcd", null), false);
+assertEquals("abc".startsWith("abcd", null), false);
+assertEquals("abc".startsWith("bcde", null), false);
+
+assertEquals("abc".startsWith("", -Infinity), true);
+assertEquals("abc".startsWith("\0", -Infinity), false);
+assertEquals("abc".startsWith("a", -Infinity), true);
+assertEquals("abc".startsWith("b", -Infinity), false);
+assertEquals("abc".startsWith("ab", -Infinity), true);
+assertEquals("abc".startsWith("bc", -Infinity), false);
+assertEquals("abc".startsWith("abc", -Infinity), true);
+assertEquals("abc".startsWith("bcd", -Infinity), false);
+assertEquals("abc".startsWith("abcd", -Infinity), false);
+assertEquals("abc".startsWith("bcde", -Infinity), false);
+
+assertEquals("abc".startsWith("", -1), true);
+assertEquals("abc".startsWith("\0", -1), false);
+assertEquals("abc".startsWith("a", -1), true);
+assertEquals("abc".startsWith("b", -1), false);
+assertEquals("abc".startsWith("ab", -1), true);
+assertEquals("abc".startsWith("bc", -1), false);
+assertEquals("abc".startsWith("abc", -1), true);
+assertEquals("abc".startsWith("bcd", -1), false);
+assertEquals("abc".startsWith("abcd", -1), false);
+assertEquals("abc".startsWith("bcde", -1), false);
+
+assertEquals("abc".startsWith("", -0), true);
+assertEquals("abc".startsWith("\0", -0), false);
+assertEquals("abc".startsWith("a", -0), true);
+assertEquals("abc".startsWith("b", -0), false);
+assertEquals("abc".startsWith("ab", -0), true);
+assertEquals("abc".startsWith("bc", -0), false);
+assertEquals("abc".startsWith("abc", -0), true);
+assertEquals("abc".startsWith("bcd", -0), false);
+assertEquals("abc".startsWith("abcd", -0), false);
+assertEquals("abc".startsWith("bcde", -0), false);
+
+assertEquals("abc".startsWith("", +0), true);
+assertEquals("abc".startsWith("\0", +0), false);
+assertEquals("abc".startsWith("a", +0), true);
+assertEquals("abc".startsWith("b", +0), false);
+assertEquals("abc".startsWith("ab", +0), true);
+assertEquals("abc".startsWith("bc", +0), false);
+assertEquals("abc".startsWith("abc", +0), true);
+assertEquals("abc".startsWith("bcd", +0), false);
+assertEquals("abc".startsWith("abcd", +0), false);
+assertEquals("abc".startsWith("bcde", +0), false);
+
+assertEquals("abc".startsWith("", 1), true);
+assertEquals("abc".startsWith("\0", 1), false);
+assertEquals("abc".startsWith("a", 1), false);
+assertEquals("abc".startsWith("b", 1), true);
+assertEquals("abc".startsWith("ab", 1), false);
+assertEquals("abc".startsWith("bc", 1), true);
+assertEquals("abc".startsWith("abc", 1), false);
+assertEquals("abc".startsWith("bcd", 1), false);
+assertEquals("abc".startsWith("abcd", 1), false);
+assertEquals("abc".startsWith("bcde", 1), false);
+
+assertEquals("abc".startsWith("", +Infinity), true);
+assertEquals("abc".startsWith("\0", +Infinity), false);
+assertEquals("abc".startsWith("a", +Infinity), false);
+assertEquals("abc".startsWith("b", +Infinity), false);
+assertEquals("abc".startsWith("ab", +Infinity), false);
+assertEquals("abc".startsWith("bc", +Infinity), false);
+assertEquals("abc".startsWith("abc", +Infinity), false);
+assertEquals("abc".startsWith("bcd", +Infinity), false);
+assertEquals("abc".startsWith("abcd", +Infinity), false);
+assertEquals("abc".startsWith("bcde", +Infinity), false);
+
+assertEquals("abc".startsWith("", true), true);
+assertEquals("abc".startsWith("\0", true), false);
+assertEquals("abc".startsWith("a", true), false);
+assertEquals("abc".startsWith("b", true), true);
+assertEquals("abc".startsWith("ab", true), false);
+assertEquals("abc".startsWith("bc", true), true);
+assertEquals("abc".startsWith("abc", true), false);
+assertEquals("abc".startsWith("bcd", true), false);
+assertEquals("abc".startsWith("abcd", true), false);
+assertEquals("abc".startsWith("bcde", true), false);
+
+assertEquals("abc".startsWith("", "x"), true);
+assertEquals("abc".startsWith("\0", "x"), false);
+assertEquals("abc".startsWith("a", "x"), true);
+assertEquals("abc".startsWith("b", "x"), false);
+assertEquals("abc".startsWith("ab", "x"), true);
+assertEquals("abc".startsWith("bc", "x"), false);
+assertEquals("abc".startsWith("abc", "x"), true);
+assertEquals("abc".startsWith("bcd", "x"), false);
+assertEquals("abc".startsWith("abcd", "x"), false);
+assertEquals("abc".startsWith("bcde", "x"), false);
+
+assertEquals("[a-z]+(bar)?".startsWith("[a-z]+"), true);
+assertThrows(function() { "[a-z]+(bar)?".startsWith(/[a-z]+/); }, TypeError);
+assertEquals("[a-z]+(bar)?".startsWith("(bar)?", 6), true);
+assertThrows(function() { "[a-z]+(bar)?".startsWith(/(bar)?/); }, TypeError);
+assertThrows(function() { "[a-z]+/(bar)?/".startsWith(/(bar)?/); }, TypeError);
+
+// http://mathiasbynens.be/notes/javascript-unicode#poo-test
+var string = "I\xF1t\xEBrn\xE2ti\xF4n\xE0liz\xE6ti\xF8n\u2603\uD83D\uDCA9";
+assertEquals(string.startsWith(""), true);
+assertEquals(string.startsWith("\xF1t\xEBr"), false);
+assertEquals(string.startsWith("\xF1t\xEBr", 1), true);
+assertEquals(string.startsWith("\xE0liz\xE6"), false);
+assertEquals(string.startsWith("\xE0liz\xE6", 11), true);
+assertEquals(string.startsWith("\xF8n\u2603\uD83D\uDCA9"), false);
+assertEquals(string.startsWith("\xF8n\u2603\uD83D\uDCA9", 18), true);
+assertEquals(string.startsWith("\u2603"), false);
+assertEquals(string.startsWith("\u2603", 20), true);
+assertEquals(string.startsWith("\uD83D\uDCA9"), false);
+assertEquals(string.startsWith("\uD83D\uDCA9", 21), true);
+
+assertThrows(function() {
+ String.prototype.startsWith.call(undefined);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.startsWith.call(undefined, "b");
+}, TypeError);
+assertThrows(function() {
+ String.prototype.startsWith.call(undefined, "b", 4);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.startsWith.call(null);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.startsWith.call(null, "b");
+}, TypeError);
+assertThrows(function() {
+ String.prototype.startsWith.call(null, "b", 4);
+}, TypeError);
+assertEquals(String.prototype.startsWith.call(42, "2"), false);
+assertEquals(String.prototype.startsWith.call(42, "4"), true);
+assertEquals(String.prototype.startsWith.call(42, "b", 4), false);
+assertEquals(String.prototype.startsWith.call(42, "2", 1), true);
+assertEquals(String.prototype.startsWith.call(42, "2", 4), false);
+assertEquals(String.prototype.startsWith.call({
+ "toString": function() { return "abc"; }
+}, "b", 0), false);
+assertEquals(String.prototype.startsWith.call({
+ "toString": function() { return "abc"; }
+}, "b", 1), true);
+assertEquals(String.prototype.startsWith.call({
+ "toString": function() { return "abc"; }
+}, "b", 2), false);
+assertThrows(function() {
+ String.prototype.startsWith.call({
+ "toString": function() { throw RangeError(); }
+ }, /./);
+}, RangeError);
+assertThrows(function() {
+ String.prototype.startsWith.call({
+ "toString": function() { return "abc"; }
+ }, /./);
+}, TypeError);
+
+assertThrows(function() {
+ String.prototype.startsWith.apply(undefined);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.startsWith.apply(undefined, ["b"]);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.startsWith.apply(undefined, ["b", 4]);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.startsWith.apply(null);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.startsWith.apply(null, ["b"]);
+}, TypeError);
+assertThrows(function() {
+ String.prototype.startsWith.apply(null, ["b", 4]);
+}, TypeError);
+assertEquals(String.prototype.startsWith.apply(42, ["2"]), false);
+assertEquals(String.prototype.startsWith.apply(42, ["4"]), true);
+assertEquals(String.prototype.startsWith.apply(42, ["b", 4]), false);
+assertEquals(String.prototype.startsWith.apply(42, ["2", 1]), true);
+assertEquals(String.prototype.startsWith.apply(42, ["2", 4]), false);
+assertEquals(String.prototype.startsWith.apply({
+ "toString": function() {
+ return "abc";
+ }
+}, ["b", 0]), false);
+assertEquals(String.prototype.startsWith.apply({
+ "toString": function() {
+ return "abc";
+ }
+}, ["b", 1]), true);
+assertEquals(String.prototype.startsWith.apply({
+ "toString": function() {
+ return "abc";
+ }
+}, ["b", 2]), false);
+assertThrows(function() {
+ String.prototype.startsWith.apply({
+ "toString": function() { throw RangeError(); }
+ }, [/./]);
+}, RangeError);
+assertThrows(function() {
+ String.prototype.startsWith.apply({
+ "toString": function() { return "abc"; }
+ }, [/./]);
+}, TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/symbols.js b/deps/v8/test/mjsunit/harmony/symbols.js
index 5eaa1a37d8..ce02a05ace 100644
--- a/deps/v8/test/mjsunit/harmony/symbols.js
+++ b/deps/v8/test/mjsunit/harmony/symbols.js
@@ -59,6 +59,7 @@ function TestType() {
for (var i in symbols) {
assertEquals("symbol", typeof symbols[i])
assertTrue(typeof symbols[i] === "symbol")
+ assertFalse(%SymbolIsPrivate(symbols[i]))
assertEquals(null, %_ClassOf(symbols[i]))
assertEquals("Symbol", %_ClassOf(new Symbol(symbols[i])))
assertEquals("Symbol", %_ClassOf(Object(symbols[i])))
@@ -272,7 +273,7 @@ function TestKeyGet(obj) {
}
-function TestKeyHas() {
+function TestKeyHas(obj) {
for (var i in symbols) {
assertTrue(symbols[i] in obj)
assertTrue(Object.hasOwnProperty.call(obj, symbols[i]))
@@ -297,6 +298,15 @@ function TestKeyNames(obj) {
}
+function TestGetOwnPropertySymbols(obj) {
+ var syms = Object.getOwnPropertySymbols(obj)
+ assertEquals(syms.length, symbols.length)
+ for (var i in syms) {
+ assertEquals("symbol", typeof syms[i])
+ }
+}
+
+
function TestKeyDescriptor(obj) {
for (var i in symbols) {
var desc = Object.getOwnPropertyDescriptor(obj, symbols[i]);
@@ -330,6 +340,7 @@ for (var i in objs) {
TestKeyHas(obj)
TestKeyEnum(obj)
TestKeyNames(obj)
+ TestGetOwnPropertySymbols(obj)
TestKeyDescriptor(obj)
TestKeyDelete(obj)
}
@@ -344,8 +355,49 @@ function TestCachedKeyAfterScavenge() {
var a = {};
a[key] = "abc";
- for (var i = 0; i < 1000000; i++) {
+ for (var i = 0; i < 100000; i++) {
a[key] += "a"; // Allocations cause a scavenge.
}
}
TestCachedKeyAfterScavenge();
+
+
+function TestGetOwnPropertySymbolsWithProto() {
+ // We need to be have fast properties to have insertion order for property
+ // keys. The current limit is currently 30 properties.
+ var syms = symbols.slice(0, 30);
+ var proto = {}
+ var object = Object.create(proto)
+ for (var i = 0; i < syms.length; i++) {
+ // Even on object, odd on proto.
+ if (i % 2) {
+ proto[syms[i]] = i
+ } else {
+ object[syms[i]] = i
+ }
+ }
+
+ assertTrue(%HasFastProperties(object));
+
+ var objectOwnSymbols = Object.getOwnPropertySymbols(object)
+ assertEquals(objectOwnSymbols.length, syms.length / 2)
+
+ for (var i = 0; i < objectOwnSymbols.length; i++) {
+ assertEquals(objectOwnSymbols[i], syms[i * 2])
+ }
+}
+TestGetOwnPropertySymbolsWithProto()
+
+
+function TestGetOwnPropertySymbolsWithPrivateSymbols() {
+ var privateSymbol = %CreatePrivateSymbol("private")
+ var publicSymbol = Symbol()
+ var publicSymbol2 = Symbol()
+ var obj = {}
+ obj[publicSymbol] = 1
+ obj[privateSymbol] = 2
+ obj[publicSymbol2] = 3
+ var syms = Object.getOwnPropertySymbols(obj)
+ assertEquals(syms, [publicSymbol, publicSymbol2])
+}
+TestGetOwnPropertySymbolsWithPrivateSymbols()
diff --git a/deps/v8/test/mjsunit/json2.js b/deps/v8/test/mjsunit/json2.js
index cf20b909b4..0894d779ac 100644
--- a/deps/v8/test/mjsunit/json2.js
+++ b/deps/v8/test/mjsunit/json2.js
@@ -40,12 +40,13 @@ function TestStringify(expected, input) {
var array_1 = [];
var array_2 = [];
-array_1[100000] = 1;
-array_2[100000] = function() { return 1; };
-var nulls = "";
-for (var i = 0; i < 100000; i++) {
- nulls += 'null,';
+array_1[1<<17] = 1;
+array_2[1<<17] = function() { return 1; };
+var nulls = "null,";
+for (var i = 0; i < 17; i++) {
+ nulls += nulls;
}
+
expected_1 = '[' + nulls + '1]';
expected_2 = '[' + nulls + 'null]';
TestStringify(expected_1, array_1);
diff --git a/deps/v8/test/mjsunit/keyed-array-call.js b/deps/v8/test/mjsunit/keyed-array-call.js
new file mode 100644
index 0000000000..b97da3cf1b
--- /dev/null
+++ b/deps/v8/test/mjsunit/keyed-array-call.js
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var a = [function(a) { return a+10; },
+ function(a) { return a+20; }];
+a.__proto__.test = function(a) { return a+30; }
+function f(i) {
+ return "r" + (1, a[i](i+1), a[i](i+2));
+}
+
+assertEquals("r12", f(0));
+assertEquals("r12", f(0));
+assertEquals("r23", f(1));
+assertEquals("r23", f(1));
+
+// Deopt the stub.
+assertEquals("rtest230", f("test"));
+
+var a2 = [function(a) { return a+10; },,
+ function(a) { return a+20; }];
+a2.__proto__.test = function(a) { return a+30; }
+function f2(i) {
+ return "r" + (1, a2[i](i+1), a2[i](i+2));
+}
+
+assertEquals("r12", f2(0));
+assertEquals("r12", f2(0));
+assertEquals("r24", f2(2));
+assertEquals("r24", f2(2));
+
+// Deopt the stub. This will throw given that undefined is not a function.
+assertThrows(function() { f2(1) });
diff --git a/deps/v8/test/mjsunit/large-object-literal.js b/deps/v8/test/mjsunit/large-object-literal.js
index 70a27696ba..47ba56e101 100644
--- a/deps/v8/test/mjsunit/large-object-literal.js
+++ b/deps/v8/test/mjsunit/large-object-literal.js
@@ -53,4 +53,3 @@ var sizes = [0, 1, 2, 100, 200, 400, 1000];
for (var i = 0; i < sizes.length; i++) {
testLiteral(sizes[i]);
}
-
diff --git a/deps/v8/test/mjsunit/limit-locals.js b/deps/v8/test/mjsunit/limit-locals.js
index a166f30617..1d36c80e5d 100644
--- a/deps/v8/test/mjsunit/limit-locals.js
+++ b/deps/v8/test/mjsunit/limit-locals.js
@@ -34,9 +34,9 @@ function function_with_n_locals(n) {
test_suffix = " suffix";
var src = "test_prefix + (function () {"
for (var i = 1; i <= n; i++) {
- src += "var x" + i + ";";
+ src += "; var x" + i;
}
- src += "return " + n + ";})() + test_suffix";
+ src += "; return " + n + ";})() + test_suffix";
return eval(src);
}
diff --git a/deps/v8/test/mjsunit/load-callback-from-value-classic.js b/deps/v8/test/mjsunit/load-callback-from-value-classic.js
new file mode 100644
index 0000000000..0030c61cf8
--- /dev/null
+++ b/deps/v8/test/mjsunit/load-callback-from-value-classic.js
@@ -0,0 +1,38 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Object.defineProperty(Boolean.prototype, "v",
+ {get:function() { return this; }});
+
+function f(b) {
+ return b.v;
+}
+
+assertEquals("object", typeof f(true));
+assertEquals("object", typeof f(true));
+assertEquals("object", typeof f(true));
+assertEquals("object", typeof f(true));
diff --git a/deps/v8/test/mjsunit/local-load-from-eval.js b/deps/v8/test/mjsunit/local-load-from-eval.js
index 0fdac9a791..e07cd0d574 100644
--- a/deps/v8/test/mjsunit/local-load-from-eval.js
+++ b/deps/v8/test/mjsunit/local-load-from-eval.js
@@ -36,4 +36,3 @@ test("assertEquals(27, x);");
test("(function() { assertEquals(27, x) })();");
test("(function() { var y = 42; eval('1'); assertEquals(42, y); })();");
test("(function() { var y = 42; eval('var y = 2; var z = 2;'); assertEquals(2, y); })();");
-
diff --git a/deps/v8/test/mjsunit/math-floor-part1.js b/deps/v8/test/mjsunit/math-floor-part1.js
index 313f27236a..b57b3e20dc 100644
--- a/deps/v8/test/mjsunit/math-floor-part1.js
+++ b/deps/v8/test/mjsunit/math-floor-part1.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --max-new-space-size=256 --allow-natives-syntax
+// Flags: --max-new-space-size=128 --allow-natives-syntax
var test_id = 0;
@@ -60,15 +60,19 @@ function test() {
testFloor(0, 0.49999999999999994);
testFloor(0, 0.5);
testFloor(0, 0.7);
+ testFloor(0, 1.0 - Number.EPSILON);
testFloor(-1, -0.1);
testFloor(-1, -0.49999999999999994);
testFloor(-1, -0.5);
testFloor(-1, -0.7);
testFloor(1, 1);
testFloor(1, 1.1);
+ testFloor(1, 1.0 + Number.EPSILON);
testFloor(1, 1.5);
testFloor(1, 1.7);
testFloor(-1, -1);
+ testFloor(-1, -1 + Number.EPSILON);
+ testFloor(-2, -1 - Number.EPSILON);
testFloor(-2, -1.1);
testFloor(-2, -1.5);
testFloor(-2, -1.7);
@@ -83,6 +87,7 @@ function test() {
// Test in a loop to cover the custom IC and GC-related issues.
-for (var i = 0; i < 100; i++) {
+for (var i = 0; i < 10; i++) {
test();
+ new Array(i * 10000);
}
diff --git a/deps/v8/test/mjsunit/math-pow.js b/deps/v8/test/mjsunit/math-pow.js
index fb5f8a1f90..ffbf418242 100644
--- a/deps/v8/test/mjsunit/math-pow.js
+++ b/deps/v8/test/mjsunit/math-pow.js
@@ -170,4 +170,4 @@ function test() {
test();
test();
%OptimizeFunctionOnNextCall(test);
-test(); \ No newline at end of file
+test();
diff --git a/deps/v8/test/mjsunit/math-round.js b/deps/v8/test/mjsunit/math-round.js
index 102c970f38..bf59069207 100644
--- a/deps/v8/test/mjsunit/math-round.js
+++ b/deps/v8/test/mjsunit/math-round.js
@@ -80,6 +80,15 @@ testRound(-9007199254740990, -9007199254740990);
testRound(-9007199254740991, -9007199254740991);
testRound(Number.MAX_VALUE, Number.MAX_VALUE);
testRound(-Number.MAX_VALUE, -Number.MAX_VALUE);
+testRound(Number.MAX_SAFE_INTEGER, Number.MAX_SAFE_INTEGER);
+testRound(Number.MAX_SAFE_INTEGER + 1, Number.MAX_SAFE_INTEGER + 1);
+testRound(Number.MAX_SAFE_INTEGER + 2, Number.MAX_SAFE_INTEGER + 2);
+testRound(Number.MAX_SAFE_INTEGER + 3, Number.MAX_SAFE_INTEGER + 3);
+testRound(Number.MAX_SAFE_INTEGER + 4, Number.MAX_SAFE_INTEGER + 4);
+testRound(Number.MIN_SAFE_INTEGER, Number.MIN_SAFE_INTEGER);
+testRound(Number.MIN_SAFE_INTEGER - 1, Number.MIN_SAFE_INTEGER - 1);
+testRound(Number.MIN_SAFE_INTEGER - 2, Number.MIN_SAFE_INTEGER - 2);
+testRound(Number.MIN_SAFE_INTEGER - 3, Number.MIN_SAFE_INTEGER - 3);
testRound(536870911, 536870910.5);
testRound(536870911, 536870911);
@@ -170,5 +179,3 @@ testRound(min_smi31, min_smi31 - 0.5);
testRound(min_smi31 + 1, min_smi31 + 0.5);
testRound(min_smi32, min_smi32 - 0.5);
testRound(min_smi32 + 1, min_smi32 + 0.5);
-
-
diff --git a/deps/v8/test/mjsunit/math-sqrt.js b/deps/v8/test/mjsunit/math-sqrt.js
index 43fbf6b2dc..d5de2e97fe 100644
--- a/deps/v8/test/mjsunit/math-sqrt.js
+++ b/deps/v8/test/mjsunit/math-sqrt.js
@@ -50,4 +50,3 @@ test(-0, -0);
test(Infinity, Infinity);
// -Infinity is smaller than 0 so it should return NaN
test(NaN, -Infinity);
-
diff --git a/deps/v8/test/mjsunit/mirror-boolean.js b/deps/v8/test/mjsunit/mirror-boolean.js
index 311c781760..bbcec7325e 100644
--- a/deps/v8/test/mjsunit/mirror-boolean.js
+++ b/deps/v8/test/mjsunit/mirror-boolean.js
@@ -56,4 +56,4 @@ function testBooleanMirror(b) {
// Test all boolean values.
testBooleanMirror(true);
-testBooleanMirror(false); \ No newline at end of file
+testBooleanMirror(false);
diff --git a/deps/v8/test/mjsunit/mirror-undefined.js b/deps/v8/test/mjsunit/mirror-undefined.js
index 7f63239e56..1d54b51b92 100644
--- a/deps/v8/test/mjsunit/mirror-undefined.js
+++ b/deps/v8/test/mjsunit/mirror-undefined.js
@@ -47,4 +47,4 @@ assertEquals('undefined', mirror.toText());
// Parse JSON representation and check.
var fromJSON = eval('(' + json + ')');
-assertEquals('undefined', fromJSON.type); \ No newline at end of file
+assertEquals('undefined', fromJSON.type);
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 129353730c..5f03774d75 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -54,6 +54,10 @@ var assertSame;
// and the properties of non-Array objects).
var assertEquals;
+
+// The difference between expected and found value is within certain tolerance.
+var assertEqualsDelta;
+
// The found object is an Array with the same length and elements
// as the expected object. The expected object doesn't need to be an Array,
// as long as it's "array-ish".
@@ -247,6 +251,12 @@ var assertUnoptimized;
};
+ assertEqualsDelta =
+ function assertEqualsDelta(expected, found, delta, name_opt) {
+ assertTrue(Math.abs(expected - found) <= delta, name_opt);
+ };
+
+
assertArrayEquals = function assertArrayEquals(expected, found, name_opt) {
var start = "";
if (name_opt) {
@@ -383,4 +393,3 @@ var assertUnoptimized;
}
})();
-
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 256bd3ecd4..4dcf7eec63 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -30,15 +30,10 @@
# All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
- # TODO(mvstanton) Re-enable when the performance is bearable again.
- 'regress/regress-2185-2': [SKIP],
-
##############################################################################
# Flaky tests.
- # BUG(v8:2921): Flaky on ia32 nosnap, arm and nacl.
- 'debug-step-4-in-frame': [PASS, [('system == linux and arch == ia32 or '
- 'arch == arm or arch == nacl_ia32 or '
- 'arch == nacl_x64'), FLAKY]],
+ # BUG(v8:2921).
+ 'debug-step-4-in-frame': [PASS, FAIL, SLOW],
##############################################################################
# Fails.
@@ -104,9 +99,106 @@
##############################################################################
# Long running test that reproduces memory leak and should be run manually.
'regress/regress-2073': [SKIP],
+
+ ##############################################################################
+ # Tests verifying CHECK and ASSERT.
+ 'verify-check-false': [FAIL, NO_VARIANTS],
+ 'verify-assert-false': [NO_VARIANTS, ['mode == release', PASS], ['mode == debug', FAIL]],
+
+ ##############################################################################
+ # Tests with different versions for release and debug.
+ 'compiler/alloc-number': [PASS, ['mode == debug', SKIP]],
+ 'compiler/alloc-number-debug': [PASS, ['mode == release', SKIP]],
+ 'regress/regress-634': [PASS, ['mode == debug', SKIP]],
+ 'regress/regress-634-debug': [PASS, ['mode == release', SKIP]],
+
+ # BUG(336820). TODO(bmeurer): Investigate.
+ 'regress/regress-336820': [PASS, FAIL],
+
+ # BUG(v8:2989). PASS/FAIL on linux32 because crankshaft is turned off for
+ # nosse2. Also for arm novfp3.
+ 'regress/regress-2989': [FAIL, NO_VARIANTS, ['system == linux and arch == ia32 or arch == arm and simulator == True', PASS]],
}], # ALWAYS
##############################################################################
+['gc_stress == True', {
+ # Skip tests not suitable for GC stress.
+ 'allocation-site-info': [SKIP],
+ 'array-constructor-feedback': [SKIP],
+ 'array-feedback': [SKIP],
+ 'array-literal-feedback': [SKIP],
+ 'd8-performance-now': [SKIP],
+ 'elements-kind': [SKIP],
+ 'fast-prototype': [SKIP],
+ 'opt-elements-kind': [SKIP],
+ 'osr-elements-kind': [SKIP],
+ 'regress/regress-165637': [SKIP],
+ 'regress/regress-2249': [SKIP],
+}], # 'gc_stress == True'
+
+##############################################################################
+['arch == a64', {
+
+ # Requires bigger stack size in the Genesis and if stack size is increased,
+ # the test requires too much time to run. However, the problem test covers
+ # should be platform-independent.
+ 'regress/regress-1132': [SKIP],
+
+ # Pass but take too long to run. Skip.
+ # Some similar tests (with fewer iterations) may be included in a64-js tests.
+ 'compiler/regress-arguments': [SKIP],
+ 'compiler/regress-gvn': [SKIP],
+ 'compiler/regress-max-locals-for-osr': [SKIP],
+ 'compiler/regress-4': [SKIP],
+ 'compiler/regress-or': [SKIP],
+ 'compiler/regress-rep-change': [SKIP],
+ 'regress/regress-1117': [SKIP],
+ 'regress/regress-1145': [SKIP],
+ 'regress/regress-1849': [SKIP],
+ 'regress/regress-3247124': [SKIP],
+ 'regress/regress-634': [SKIP],
+ 'regress/regress-91008': [SKIP],
+ 'regress/regress-91010': [SKIP],
+ 'regress/regress-91013': [SKIP],
+ 'regress/regress-99167': [SKIP],
+
+ # Long running tests.
+ 'regress/regress-2185': [PASS, ['mode == debug', PASS, TIMEOUT]],
+ 'regress/regress-2185-2': [PASS, TIMEOUT],
+ 'whitespaces': [PASS, TIMEOUT],
+
+ # Stack manipulations in LiveEdit is not implemented for this arch.
+ 'debug-liveedit-check-stack': [SKIP],
+ 'debug-liveedit-stack-padding': [SKIP],
+ 'debug-liveedit-restart-frame': [SKIP],
+ 'debug-liveedit-double-call': [SKIP],
+
+ # BUG(v8:3147). It works on other architectures by accident.
+ 'regress/regress-conditional-position': [FAIL],
+
+ # BUG(v8:3156): Fails on gc stress bots.
+ 'compiler/concurrent-invalidate-transition-map': [PASS, ['gc_stress == True', FAIL]],
+}], # 'arch == a64'
+
+['arch == a64 and mode == debug and simulator_run == True', {
+
+ # Pass but take too long with the simulator in debug mode.
+ 'array-sort': [PASS, TIMEOUT],
+ 'packed-elements': [SKIP],
+ 'regexp-global': [SKIP],
+ 'compiler/alloc-numbers': [SKIP],
+ 'harmony/symbols': [SKIP],
+}], # 'arch == a64 and mode == debug and simulator_run == True'
+
+##############################################################################
+['asan == True', {
+ # Skip tests not suitable for ASAN.
+ 'big-array-literal': [SKIP],
+ 'big-object-literal': [SKIP],
+ 'regress/regress-crbug-178790': [SKIP],
+}], # 'asan == True'
+
+##############################################################################
['arch == arm or arch == android_arm', {
# Slow tests which times out in debug mode.
@@ -126,30 +218,12 @@
'string-indexof-2': [PASS, TIMEOUT],
'mirror-object': [PASS, TIMEOUT],
- # BUG(3251035): Timeouts in long looping crankshaft optimization
- # tests. Skipping because having them timeout takes too long on the
- # buildbot.
+ # Long running tests. Skipping because having them timeout takes too long on
+ # the buildbot.
'compiler/alloc-number': [SKIP],
- 'compiler/array-length': [SKIP],
- 'compiler/assignment-deopt': [SKIP],
- 'compiler/deopt-args': [SKIP],
- 'compiler/inline-compare': [SKIP],
- 'compiler/inline-global-access': [SKIP],
- 'compiler/optimized-function-calls': [SKIP],
- 'compiler/pic': [SKIP],
- 'compiler/property-calls': [SKIP],
- 'compiler/recursive-deopt': [SKIP],
- 'compiler/regress-4': [SKIP],
- 'compiler/regress-funcaller': [SKIP],
- 'compiler/regress-rep-change': [SKIP],
- 'compiler/regress-arguments': [SKIP],
- 'compiler/regress-funarguments': [SKIP],
- 'compiler/regress-3249650': [SKIP],
- 'compiler/simple-deopt': [SKIP],
'regress/regress-490': [SKIP],
'regress/regress-634': [SKIP],
'regress/regress-create-exception': [SKIP],
- 'regress/regress-3218915': [SKIP],
'regress/regress-3247124': [SKIP],
# Requires bigger stack size in the Genesis and if stack size is increased,
@@ -165,6 +239,18 @@
# Currently always deopt on minus zero
'math-floor-of-div-minus-zero': [SKIP],
+
+ ############################################################################
+ # Slow tests.
+ 'regress/regress-2185-2': [PASS, SLOW],
+ 'mirror-object': [PASS, SLOW],
+ 'compiler/osr-with-args': [PASS, SLOW],
+ 'array-sort': [PASS, SLOW],
+ 'packed-elements': [PASS, SLOW],
+ 'regress/regress-91008': [PASS, SLOW],
+ 'regress/regress-2790': [PASS, SLOW],
+ 'regress/regress-json-stringify-gc': [PASS, SLOW],
+ 'regress/regress-1122': [PASS, SLOW],
}], # 'arch == arm or arch == android_arm'
##############################################################################
@@ -183,30 +269,12 @@
'mirror-object': [PASS, TIMEOUT],
'string-indexof-2': [PASS, TIMEOUT],
- # BUG(3251035): Timeouts in long looping crankshaft optimization
- # tests. Skipping because having them timeout takes too long on the
- # buildbot.
+ # Long running tests. Skipping because having them timeout takes too long on
+ # the buildbot.
'compiler/alloc-number': [SKIP],
- 'compiler/array-length': [SKIP],
- 'compiler/assignment-deopt': [SKIP],
- 'compiler/deopt-args': [SKIP],
- 'compiler/inline-compare': [SKIP],
- 'compiler/inline-global-access': [SKIP],
- 'compiler/optimized-function-calls': [SKIP],
- 'compiler/pic': [SKIP],
- 'compiler/property-calls': [SKIP],
- 'compiler/recursive-deopt': [SKIP],
- 'compiler/regress-4': [SKIP],
- 'compiler/regress-funcaller': [SKIP],
- 'compiler/regress-rep-change': [SKIP],
- 'compiler/regress-arguments': [SKIP],
- 'compiler/regress-funarguments': [SKIP],
- 'compiler/regress-3249650': [SKIP],
- 'compiler/simple-deopt': [SKIP],
'regress/regress-490': [SKIP],
'regress/regress-634': [SKIP],
'regress/regress-create-exception': [SKIP],
- 'regress/regress-3218915': [SKIP],
'regress/regress-3247124': [SKIP],
# Requires bigger stack size in the Genesis and if stack size is increased,
diff --git a/deps/v8/test/mjsunit/mul-exhaustive-part6.js b/deps/v8/test/mjsunit/mul-exhaustive-part6.js
index 91cb798a7d..fb2b2ce8e5 100644
--- a/deps/v8/test/mjsunit/mul-exhaustive-part6.js
+++ b/deps/v8/test/mjsunit/mul-exhaustive-part6.js
@@ -551,4 +551,4 @@ f(281475001876479, 8388609);
f(562949903089665, 16777215);
f(562949936644096, 16777216);
f(562949970198527, 16777217);
-f(1125899839733761, 33554431); \ No newline at end of file
+f(1125899839733761, 33554431);
diff --git a/deps/v8/test/mjsunit/nans.js b/deps/v8/test/mjsunit/nans.js
index d212afdeed..987ad6e78e 100644
--- a/deps/v8/test/mjsunit/nans.js
+++ b/deps/v8/test/mjsunit/nans.js
@@ -97,7 +97,3 @@ function TestFloatQuietNan() {
}
TestFloatQuietNan();
-
-
-
-
diff --git a/deps/v8/test/mjsunit/never-optimize.js b/deps/v8/test/mjsunit/never-optimize.js
index 55b1f11981..643588ebf4 100644
--- a/deps/v8/test/mjsunit/never-optimize.js
+++ b/deps/v8/test/mjsunit/never-optimize.js
@@ -60,4 +60,4 @@ if (%GetOptimizationStatus(o1) != 4) {
// 2 => not optimized.
assertUnoptimized(u1);
assertOptimized(u2);
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/mjsunit/new-string-add.js b/deps/v8/test/mjsunit/new-string-add.js
new file mode 100644
index 0000000000..f5b7cbfbf2
--- /dev/null
+++ b/deps/v8/test/mjsunit/new-string-add.js
@@ -0,0 +1,197 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --new-string-add
+
+assertEquals("ab", "a" + "b", "ll");
+
+assertEquals("12", "1" + "2", "dd");
+assertEquals("123", "1" + "2" + "3", "ddd");
+assertEquals("123", 1 + "2" + "3", "ndd");
+assertEquals("123", "1" + 2 + "3", "dnd");
+assertEquals("123", "1" + "2" + 3, "ddn");
+
+assertEquals("123", "1" + 2 + 3, "dnn");
+assertEquals("123", 1 + "2" + 3, "ndn");
+assertEquals("33", 1 + 2 + "3", "nnd");
+
+var x = "1";
+assertEquals("12", x + 2, "vn");
+assertEquals("12", x + "2", "vd");
+assertEquals("21", 2 + x, "nv");
+assertEquals("21", "2" + x, "dv");
+
+var y = "2";
+assertEquals("12", x + y, "vdvd");
+
+x = 1;
+assertEquals("12", x + y, "vnvd");
+
+y = 2;
+assertEquals(3, x + y, "vnvn");
+
+x = "1";
+assertEquals("12", x + y, "vdvn");
+
+y = "2";
+assertEquals("12", x + y, "vdvd2");
+
+(function(x, y) {
+ var z = "3";
+ var w = "4";
+
+ assertEquals("11", x + x, "xx");
+ assertEquals("12", x + y, "xy");
+ assertEquals("13", x + z, "xz");
+ assertEquals("14", x + w, "xw");
+
+ assertEquals("21", y + x, "yx");
+ assertEquals("22", y + y, "yy");
+ assertEquals("23", y + z, "yz");
+ assertEquals("24", y + w, "yw");
+
+ assertEquals("31", z + x, "zx");
+ assertEquals("32", z + y, "zy");
+ assertEquals("33", z + z, "zz");
+ assertEquals("34", z + w, "zw");
+
+ assertEquals("41", w + x, "wx");
+ assertEquals("42", w + y, "wy");
+ assertEquals("43", w + z, "wz");
+ assertEquals("44", w + w, "ww");
+
+ (function(){x = 1; z = 3;})();
+
+ assertEquals(2, x + x, "x'x");
+ assertEquals("12", x + y, "x'y");
+ assertEquals(4, x + z, "x'z'");
+ assertEquals("14", x + w, "x'w");
+
+ assertEquals("21", y + x, "yx'");
+ assertEquals("22", y + y, "yy");
+ assertEquals("23", y + z, "yz'");
+ assertEquals("24", y + w, "yw");
+
+ assertEquals(4, z + x, "z'x'");
+ assertEquals("32", z + y, "z'y");
+ assertEquals(6, z + z, "z'z'");
+ assertEquals("34", z + w, "z'w");
+
+ assertEquals("41", w + x, "wx'");
+ assertEquals("42", w + y, "wy");
+ assertEquals("43", w + z, "wz'");
+ assertEquals("44", w + w, "ww");
+})("1", "2");
+
+assertEquals("142", "1" + new Number(42), "sN");
+assertEquals("421", new Number(42) + "1", "Ns");
+assertEquals(84, new Number(42) + new Number(42), "NN");
+
+assertEquals("142", "1" + new String("42"), "sS");
+assertEquals("421", new String("42") + "1", "Ss");
+assertEquals("142", "1" + new String("42"), "sS");
+assertEquals("4242", new String("42") + new String("42"), "SS");
+
+assertEquals("1true", "1" + true, "sb");
+assertEquals("true1", true + "1", "bs");
+assertEquals(2, true + true, "bs");
+
+assertEquals("1true", "1" + new Boolean(true), "sB");
+assertEquals("true1", new Boolean(true) + "1", "Bs");
+assertEquals(2, new Boolean(true) + new Boolean(true), "Bs");
+
+assertEquals("1undefined", "1" + void 0, "sv");
+assertEquals("undefined1", (void 0) + "1", "vs");
+assertTrue(isNaN(void 0 + void 0), "vv");
+
+assertEquals("1null", "1" + null, "su");
+assertEquals("null1", null + "1", "us");
+assertEquals(0, null + null, "uu");
+
+(function (i) {
+ // Check that incoming frames are merged correctly.
+ var x;
+ var y;
+ var z;
+ var w;
+ switch (i) {
+ case 1: x = 42; y = "stry"; z = "strz"; w = 42; break;
+ default: x = "strx", y = 42; z = "strz"; w = 42; break;
+ }
+ var resxx = x + x;
+ var resxy = x + y;
+ var resxz = x + z;
+ var resxw = x + w;
+ var resyx = y + x;
+ var resyy = y + y;
+ var resyz = y + z;
+ var resyw = y + w;
+ var reszx = z + x;
+ var reszy = z + y;
+ var reszz = z + z;
+ var reszw = z + w;
+ var reswx = w + x;
+ var reswy = w + y;
+ var reswz = w + z;
+ var resww = w + w;
+ assertEquals(84, resxx, "swxx");
+ assertEquals("42stry", resxy, "swxy");
+ assertEquals("42strz", resxz, "swxz");
+ assertEquals(84, resxw, "swxw");
+ assertEquals("stry42", resyx, "swyx");
+ assertEquals("strystry", resyy, "swyy");
+ assertEquals("strystrz", resyz, "swyz");
+ assertEquals("stry42", resyw, "swyw");
+ assertEquals("strz42", reszx, "swzx");
+ assertEquals("strzstry", reszy, "swzy");
+ assertEquals("strzstrz", reszz, "swzz");
+ assertEquals("strz42", reszw, "swzw");
+ assertEquals(84, reswx, "swwx");
+ assertEquals("42stry", reswy, "swwy");
+ assertEquals("42strz", reswz, "swwz");
+ assertEquals(84, resww, "swww");
+})(1);
+
+// Generate ascii and non ascii strings from length 0 to 20.
+var ascii = 'aaaaaaaaaaaaaaaaaaaa';
+var non_ascii = '\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234';
+assertEquals(20, ascii.length);
+assertEquals(20, non_ascii.length);
+var a = Array(21);
+var b = Array(21);
+for (var i = 0; i <= 20; i++) {
+ a[i] = ascii.substring(0, i);
+ b[i] = non_ascii.substring(0, i);
+}
+
+// Add ascii and non-ascii strings generating strings with length from 0 to 20.
+for (var i = 0; i <= 20; i++) {
+ for (var j = 0; j < i; j++) {
+ assertEquals(a[i], a[j] + a[i - j])
+ assertEquals(b[i], b[j] + b[i - j])
+ }
+}
diff --git a/deps/v8/test/mjsunit/new.js b/deps/v8/test/mjsunit/new.js
index 1062628b20..ca098a12bd 100644
--- a/deps/v8/test/mjsunit/new.js
+++ b/deps/v8/test/mjsunit/new.js
@@ -53,4 +53,3 @@ x = new String('foo');
assertTrue(x === new Construct(x));
x = function() { };
assertTrue(x === new Construct(x));
-
diff --git a/deps/v8/test/mjsunit/newline-in-string.js b/deps/v8/test/mjsunit/newline-in-string.js
index 8c3ff86402..e71d8cf6a2 100644
--- a/deps/v8/test/mjsunit/newline-in-string.js
+++ b/deps/v8/test/mjsunit/newline-in-string.js
@@ -42,5 +42,3 @@ assertEquals('asdfasdf', eval(code));
// Allow LF+CR in multiline string literals.
code = "'asdf\\" + String.fromCharCode(0xA) + String.fromCharCode(0xD) + "asdf'";
assertEquals('asdfasdf', eval(code));
-
-
diff --git a/deps/v8/test/mjsunit/number-is.js b/deps/v8/test/mjsunit/number-is.js
index 1589fc64ad..53f0876786 100644
--- a/deps/v8/test/mjsunit/number-is.js
+++ b/deps/v8/test/mjsunit/number-is.js
@@ -25,11 +25,16 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Test Harmony Number.isFinite() and Number.isNaN() functions.
+// Test number predicates that Harmony adds to the Number constructor:
+// isFinite(), isNaN(), isInteger(), isSafeInteger().
assertTrue(Number.isFinite(0));
assertTrue(Number.isFinite(Number.MIN_VALUE));
assertTrue(Number.isFinite(Number.MAX_VALUE));
+assertTrue(Number.isFinite(Number.MIN_SAFE_INTEGER));
+assertTrue(Number.isFinite(Number.MIN_SAFE_INTEGER - 13));
+assertTrue(Number.isFinite(Number.MAX_SAFE_INTEGER));
+assertTrue(Number.isFinite(Number.MAX_SAFE_INTEGER + 23));
assertFalse(Number.isFinite(Number.NaN));
assertFalse(Number.isFinite(Number.POSITIVE_INFINITY));
assertFalse(Number.isFinite(Number.NEGATIVE_INFINITY));
@@ -45,9 +50,12 @@ assertFalse(Number.isFinite(undefined));
assertFalse(Number.isNaN(0));
assertFalse(Number.isNaN(Number.MIN_VALUE));
assertFalse(Number.isNaN(Number.MAX_VALUE));
+assertFalse(Number.isNaN(Number.MIN_SAFE_INTEGER - 13));
+assertFalse(Number.isNaN(Number.MAX_SAFE_INTEGER + 23));
assertTrue(Number.isNaN(Number.NaN));
assertFalse(Number.isNaN(Number.POSITIVE_INFINITY));
assertFalse(Number.isNaN(Number.NEGATIVE_INFINITY));
+assertFalse(Number.isNaN(Number.EPSILON));
assertFalse(Number.isNaN(new Number(0)));
assertFalse(Number.isNaN(1/0));
assertFalse(Number.isNaN(-1/0));
@@ -56,3 +64,63 @@ assertFalse(Number.isNaN([]));
assertFalse(Number.isNaN("s"));
assertFalse(Number.isNaN(null));
assertFalse(Number.isNaN(undefined));
+
+assertFalse(Number.isInteger({}));
+assertFalse(Number.isInteger([]));
+assertFalse(Number.isInteger("s"));
+assertFalse(Number.isInteger(null));
+assertFalse(Number.isInteger(undefined));
+assertFalse(Number.isInteger(new Number(2)));
+assertTrue(Number.isInteger(0));
+assertFalse(Number.isInteger(Number.MIN_VALUE));
+assertTrue(Number.isInteger(Number.MAX_VALUE));
+assertTrue(Number.isInteger(Number.MIN_SAFE_INTEGER));
+assertTrue(Number.isInteger(Number.MIN_SAFE_INTEGER - 13));
+assertTrue(Number.isInteger(Number.MAX_SAFE_INTEGER));
+assertTrue(Number.isInteger(Number.MAX_SAFE_INTEGER + 23));
+assertFalse(Number.isInteger(Number.NaN));
+assertFalse(Number.isInteger(Number.POSITIVE_INFINITY));
+assertFalse(Number.isInteger(Number.NEGATIVE_INFINITY));
+assertFalse(Number.isInteger(1/0));
+assertFalse(Number.isInteger(-1/0));
+assertFalse(Number.isInteger(Number.EPSILON));
+
+assertFalse(Number.isSafeInteger({}));
+assertFalse(Number.isSafeInteger([]));
+assertFalse(Number.isSafeInteger("s"));
+assertFalse(Number.isSafeInteger(null));
+assertFalse(Number.isSafeInteger(undefined));
+assertFalse(Number.isSafeInteger(new Number(2)));
+assertTrue(Number.isSafeInteger(0));
+assertTrue(Number.isSafeInteger(Number.MIN_SAFE_INTEGER));
+assertFalse(Number.isSafeInteger(Number.MIN_SAFE_INTEGER - 13));
+assertTrue(Number.isSafeInteger(Number.MIN_SAFE_INTEGER + 13));
+assertTrue(Number.isSafeInteger(Number.MAX_SAFE_INTEGER));
+assertFalse(Number.isSafeInteger(Number.MAX_SAFE_INTEGER + 23));
+assertTrue(Number.isSafeInteger(Number.MAX_SAFE_INTEGER - 23));
+assertFalse(Number.isSafeInteger(Number.MIN_VALUE));
+assertFalse(Number.isSafeInteger(Number.MAX_VALUE));
+assertFalse(Number.isSafeInteger(Number.NaN));
+assertFalse(Number.isSafeInteger(Number.POSITIVE_INFINITY));
+assertFalse(Number.isSafeInteger(Number.NEGATIVE_INFINITY));
+assertFalse(Number.isSafeInteger(1/0));
+assertFalse(Number.isSafeInteger(-1/0));
+assertFalse(Number.isSafeInteger(Number.EPSILON));
+
+var near_upper = Math.pow(2, 52);
+assertTrue(Number.isSafeInteger(near_upper));
+assertFalse(Number.isSafeInteger(2 * near_upper));
+assertTrue(Number.isSafeInteger(2 * near_upper - 1));
+assertTrue(Number.isSafeInteger(2 * near_upper - 2));
+assertFalse(Number.isSafeInteger(2 * near_upper + 1));
+assertFalse(Number.isSafeInteger(2 * near_upper + 2));
+assertFalse(Number.isSafeInteger(2 * near_upper + 7));
+
+var near_lower = -near_upper;
+assertTrue(Number.isSafeInteger(near_lower));
+assertFalse(Number.isSafeInteger(2 * near_lower));
+assertTrue(Number.isSafeInteger(2 * near_lower + 1));
+assertTrue(Number.isSafeInteger(2 * near_lower + 2));
+assertFalse(Number.isSafeInteger(2 * near_lower - 1));
+assertFalse(Number.isSafeInteger(2 * near_lower - 2));
+assertFalse(Number.isSafeInteger(2 * near_lower - 7));
diff --git a/deps/v8/test/mjsunit/number-tostring-func.js b/deps/v8/test/mjsunit/number-tostring-func.js
index c64706e703..4fc97e5e0d 100644
--- a/deps/v8/test/mjsunit/number-tostring-func.js
+++ b/deps/v8/test/mjsunit/number-tostring-func.js
@@ -362,6 +362,3 @@ testToPrecision("-1.234e+4", Number(-12344.67), (4));
// dtoa does not do this in its original form.
assertEquals("1.3", 1.25.toPrecision(2), "1.25.toPrecision(2)");
assertEquals("1.4", 1.35.toPrecision(2), "1.35.toPrecision(2)");
-
-
-
diff --git a/deps/v8/test/mjsunit/number-tostring-small.js b/deps/v8/test/mjsunit/number-tostring-small.js
index dbd2b59235..068b3a46e5 100644
--- a/deps/v8/test/mjsunit/number-tostring-small.js
+++ b/deps/v8/test/mjsunit/number-tostring-small.js
@@ -392,4 +392,3 @@ Number(-12344.67).toPrecision(4);
(-91.1234).toPrecision(6);
(-91.1234).toPrecision(7);
(-91.1234).toPrecision(8);
-
diff --git a/deps/v8/test/mjsunit/number-tostring.js b/deps/v8/test/mjsunit/number-tostring.js
index 35e77e2a69..55655f4082 100644
--- a/deps/v8/test/mjsunit/number-tostring.js
+++ b/deps/v8/test/mjsunit/number-tostring.js
@@ -335,6 +335,3 @@ assertEquals("-1.234e+4", Number(-12344.67).toPrecision(4));
// dtoa does not do this in its original form.
assertEquals("1.3", 1.25.toPrecision(2), "1.25.toPrecision(2)");
assertEquals("1.4", 1.35.toPrecision(2), "1.35.toPrecision(2)");
-
-
-
diff --git a/deps/v8/test/mjsunit/object-freeze.js b/deps/v8/test/mjsunit/object-freeze.js
index a0717a171c..3b7987402f 100644
--- a/deps/v8/test/mjsunit/object-freeze.js
+++ b/deps/v8/test/mjsunit/object-freeze.js
@@ -314,3 +314,26 @@ assertTrue(%HasFastProperties(obj));
Object.freeze(obj);
assertTrue(%HasFastProperties(obj));
assertTrue(Object.isFrozen(obj));
+
+// Test array built-in functions with freeze.
+obj = [1,2,3];
+Object.freeze(obj);
+// if frozen implies sealed, then the tests in object-seal.js are mostly
+// sufficient.
+assertTrue(Object.isSealed(obj));
+
+assertDoesNotThrow(function() { obj.push(); });
+assertDoesNotThrow(function() { obj.unshift(); });
+assertDoesNotThrow(function() { obj.splice(0,0); });
+assertTrue(Object.isFrozen(obj));
+
+// Verify that an item can't be changed with splice.
+assertThrows(function() { obj.splice(0,1,1); }, TypeError);
+
+// Verify that unshift() with no arguments will fail if it reifies from
+// the prototype into the object.
+obj = [1,,3];
+obj.__proto__[1] = 1;
+assertEquals(1, obj[1]);
+Object.freeze(obj);
+assertThrows(function() { obj.unshift(); }, TypeError);
diff --git a/deps/v8/test/mjsunit/object-literal-conversions.js b/deps/v8/test/mjsunit/object-literal-conversions.js
index 742f814ba3..7db2cf519c 100644
--- a/deps/v8/test/mjsunit/object-literal-conversions.js
+++ b/deps/v8/test/mjsunit/object-literal-conversions.js
@@ -43,4 +43,3 @@ var test6 = { 17.31: function() {}, "17.31": 7 };
assertEquals(7, test5[13]);
assertEquals(7, test6[17.31]);
-
diff --git a/deps/v8/test/mjsunit/object-literal-gc.js b/deps/v8/test/mjsunit/object-literal-gc.js
index b9d6285cfe..ddbeef635b 100644
--- a/deps/v8/test/mjsunit/object-literal-gc.js
+++ b/deps/v8/test/mjsunit/object-literal-gc.js
@@ -63,4 +63,3 @@ var sizes = [0, 1, 2, 100, 200, 400, 1000];
for (var i = 0; i < sizes.length; i++) {
testLiteral(sizes[i]);
}
-
diff --git a/deps/v8/test/mjsunit/object-seal.js b/deps/v8/test/mjsunit/object-seal.js
index f21baed377..3afddb9d60 100644
--- a/deps/v8/test/mjsunit/object-seal.js
+++ b/deps/v8/test/mjsunit/object-seal.js
@@ -28,6 +28,7 @@
// Tests the Object.seal and Object.isSealed methods - ES 15.2.3.9 and
// ES 15.2.3.12
+// Flags: --allow-natives-syntax --noalways-opt
// Test that we throw an error if an object is not passed as argument.
var non_objects = new Array(undefined, null, 1, -1, 0, 42.43);
@@ -192,3 +193,77 @@ assertFalse(Object.isSealed(obj4));
// Make sure that Object.seal returns the sealed object.
var obj4 = {};
assertTrue(obj4 === Object.seal(obj4));
+
+//
+// Test that built-in array functions can't modify a sealed array.
+//
+obj = [1, 2, 3];
+var objControl = [4, 5, 6];
+
+// Allow these functions to set up monomorphic calls, using custom built-ins.
+var push_call = function(a) { a.push(10); return a; }
+var pop_call = function(a) { return a.pop(); }
+for (var i = 0; i < 3; i++) {
+ push_call(obj);
+ pop_call(obj);
+}
+
+Object.seal(obj);
+assertThrows(function() { push_call(obj); }, TypeError);
+assertThrows(function() { pop_call(obj); }, TypeError);
+
+// But the control object is fine at these sites.
+assertDoesNotThrow(function() { push_call(objControl); });
+assertDoesNotThrow(function() { pop_call(objControl); });
+
+assertDoesNotThrow(function() { obj.push(); });
+assertThrows(function() { obj.push(3); }, TypeError);
+assertThrows(function() { obj.pop(); }, TypeError);
+assertThrows(function() { obj.shift(3); }, TypeError);
+assertDoesNotThrow(function() { obj.unshift(); });
+assertThrows(function() { obj.unshift(1); }, TypeError);
+assertThrows(function() { obj.splice(0, 0, 100, 101, 102); }, TypeError);
+assertDoesNotThrow(function() { obj.splice(0,0); });
+
+assertDoesNotThrow(function() { objControl.push(3); });
+assertDoesNotThrow(function() { objControl.pop(); });
+assertDoesNotThrow(function() { objControl.shift(3); });
+assertDoesNotThrow(function() { objControl.unshift(); });
+assertDoesNotThrow(function() { objControl.splice(0, 0, 100, 101, 102); });
+
+// Verify that crankshaft still does the right thing.
+obj = [1, 2, 3];
+
+push_call = function(a) { a.push(1000); return a; }
+// Include a call site that doesn't have a custom built-in.
+var shift_call = function(a) { a.shift(1000); return a; }
+for (var i = 0; i < 3; i++) {
+ push_call(obj);
+ shift_call(obj);
+}
+
+%OptimizeFunctionOnNextCall(push_call);
+%OptimizeFunctionOnNextCall(shift_call);
+push_call(obj);
+shift_call(obj);
+assertOptimized(push_call);
+assertOptimized(shift_call);
+Object.seal(obj);
+assertThrows(function() { push_call(obj); }, TypeError);
+assertThrows(function() { shift_call(obj); }, TypeError);
+assertUnoptimized(push_call);
+assertUnoptimized(shift_call);
+assertDoesNotThrow(function() { push_call(objControl); });
+assertDoesNotThrow(function() { shift_call(objControl); });
+
+// Verify special behavior of splice on sealed objects.
+obj = [1,2,3];
+Object.seal(obj);
+assertDoesNotThrow(function() { obj.splice(0,1,100); });
+assertEquals(100, obj[0]);
+assertDoesNotThrow(function() { obj.splice(0,2,1,2); });
+assertDoesNotThrow(function() { obj.splice(1,2,1,2); });
+// Count of items to delete is clamped by length.
+assertDoesNotThrow(function() { obj.splice(1,2000,1,2); });
+assertThrows(function() { obj.splice(0,0,1); }, TypeError);
+assertThrows(function() { obj.splice(1,2000,1,2,3); }, TypeError);
diff --git a/deps/v8/test/mjsunit/opt-elements-kind.js b/deps/v8/test/mjsunit/opt-elements-kind.js
index fe6b8b9bfb..f26bb42067 100644
--- a/deps/v8/test/mjsunit/opt-elements-kind.js
+++ b/deps/v8/test/mjsunit/opt-elements-kind.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
-// Flags: --notrack_allocation_sites
// Limit the number of stress runs to reduce polymorphism it defeats some of the
// assumptions made about how elements transitions work because transition stubs
@@ -40,11 +39,6 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
-// Reset the GC stress mode to be off. Needed because AllocationMementos only
-// live for one gc, so a gc that happens in certain fragile areas of the test
-// can break assumptions.
-%SetFlags("--gc-interval=-1")
-
support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
if (support_smi_only_arrays) {
@@ -114,8 +108,20 @@ function assertKind(expected, obj, name_opt) {
}
%NeverOptimizeFunction(construct_smis);
+
+// This code exists to eliminate the learning influence of AllocationSites
+// on the following tests.
+var __sequence = 0;
+function make_array_string() {
+ this.__sequence = this.__sequence + 1;
+ return "/* " + this.__sequence + " */ [0, 0, 0];"
+}
+function make_array() {
+ return eval(make_array_string());
+}
+
function construct_smis() {
- var a = [0, 0, 0];
+ var a = make_array();
a[0] = 0; // Send the COW array map to the steak house.
assertKind(elements_kind.fast_smi_only, a);
return a;
diff --git a/deps/v8/test/mjsunit/osr-elements-kind.js b/deps/v8/test/mjsunit/osr-elements-kind.js
index 8d43377321..2ad3c43487 100644
--- a/deps/v8/test/mjsunit/osr-elements-kind.js
+++ b/deps/v8/test/mjsunit/osr-elements-kind.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
-// Flags: --notrack_allocation_sites
// Limit the number of stress runs to reduce polymorphism it defeats some of the
// assumptions made about how elements transitions work because transition stubs
@@ -40,11 +39,6 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
-// Reset the GC stress mode to be off. Needed because AllocationMementos only
-// live for one gc, so a gc that happens in certain fragile areas of the test
-// can break assumptions.
-%SetFlags("--gc-interval=-1")
-
support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
if (support_smi_only_arrays) {
@@ -120,8 +114,19 @@ function assertKind(expected, obj, name_opt) {
for (var i = 0; i < 1000000; i++) { }
if (support_smi_only_arrays) {
+ // This code exists to eliminate the learning influence of AllocationSites
+ // on the following tests.
+ var __sequence = 0;
+ function make_array_string() {
+ this.__sequence = this.__sequence + 1;
+ return "/* " + this.__sequence + " */ [0, 0, 0];"
+ }
+ function make_array() {
+ return eval(make_array_string());
+ }
+
function construct_smis() {
- var a = [0, 0, 0];
+ var a = make_array();
a[0] = 0; // Send the COW array map to the steak house.
assertKind(elements_kind.fast_smi_only, a);
return a;
diff --git a/deps/v8/test/mjsunit/packed-elements.js b/deps/v8/test/mjsunit/packed-elements.js
index cfcdf8031f..4a87373064 100644
--- a/deps/v8/test/mjsunit/packed-elements.js
+++ b/deps/v8/test/mjsunit/packed-elements.js
@@ -109,4 +109,3 @@ if (has_packed_elements) {
test_with_optimization(test5);
test_with_optimization(test6);
}
-
diff --git a/deps/v8/test/mjsunit/parse-int-float.js b/deps/v8/test/mjsunit/parse-int-float.js
index 5a9b6f33cc..a0f5039230 100644
--- a/deps/v8/test/mjsunit/parse-int-float.js
+++ b/deps/v8/test/mjsunit/parse-int-float.js
@@ -114,3 +114,12 @@ assertEquals(state, "throwingString");
state = null;
try { parseInt(throwingString, throwingRadix); } catch (e) {}
assertEquals(state, "throwingString");
+
+// And finally, check that the Harmony additions to the Number
+// constructor is available:
+assertTrue("parseInt" in Number);
+assertTrue("parseFloat" in Number);
+assertSame( Number.parseInt, parseInt);
+assertSame(Number.parseFloat, parseFloat);
+assertEquals(Number.parseFloat('0.1'), parseFloat('0.1'));
+assertEquals(Number.parseInt('0xea'), parseInt('0xEA'));
diff --git a/deps/v8/test/mjsunit/property-object-key.js b/deps/v8/test/mjsunit/property-object-key.js
index 5eb1e1b9ec..3556cb9ee2 100644
--- a/deps/v8/test/mjsunit/property-object-key.js
+++ b/deps/v8/test/mjsunit/property-object-key.js
@@ -33,4 +33,3 @@ object[key] = 87;
assertEquals(87, object[key]);
object[key]++;
assertEquals(88, object[key]);
-
diff --git a/deps/v8/test/mjsunit/proto-accessor.js b/deps/v8/test/mjsunit/proto-accessor.js
new file mode 100644
index 0000000000..aca6ec5428
--- /dev/null
+++ b/deps/v8/test/mjsunit/proto-accessor.js
@@ -0,0 +1,81 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var desc = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
+assertEquals("function", typeof desc.get);
+assertEquals("function", typeof desc.set);
+assertDoesNotThrow("desc.get.call({})");
+assertDoesNotThrow("desc.set.call({}, {})");
+
+
+var obj = {};
+var obj2 = {};
+desc.set.call(obj, obj2);
+assertEquals(obj.__proto__, obj2);
+assertEquals(desc.get.call(obj), obj2);
+
+
+// Check that any redefinition of the __proto__ accessor works.
+Object.defineProperty(Object.prototype, "__proto__", {
+ get: function() {
+ return 42;
+ }
+});
+assertEquals({}.__proto__, 42);
+assertEquals(desc.get.call({}), Object.prototype);
+
+
+var desc2 = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
+assertEquals(desc2.get.call({}), 42);
+assertDoesNotThrow("desc2.set.call({})");
+
+
+Object.defineProperty(Object.prototype, "__proto__", { set:function(x){} });
+var desc3 = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
+assertDoesNotThrow("desc3.get.call({})");
+assertDoesNotThrow("desc3.set.call({})");
+
+
+Object.defineProperty(Object.prototype, "__proto__", { set: undefined });
+assertThrows(function() {
+ "use strict";
+ var o = {};
+ var p = {};
+ o.__proto__ = p;
+}, TypeError);
+
+
+assertTrue(delete Object.prototype.__proto__);
+var o = {};
+var p = {};
+o.__proto__ = p;
+assertEquals(Object.getPrototypeOf(o), Object.prototype);
+var desc4 = Object.getOwnPropertyDescriptor(o, "__proto__");
+assertTrue(desc4.configurable);
+assertTrue(desc4.enumerable);
+assertTrue(desc4.writable);
+assertEquals(desc4.value, p);
diff --git a/deps/v8/test/mjsunit/prototype.js b/deps/v8/test/mjsunit/prototype.js
index bfc1a799d4..a953422c32 100644
--- a/deps/v8/test/mjsunit/prototype.js
+++ b/deps/v8/test/mjsunit/prototype.js
@@ -89,5 +89,3 @@ var test = new Object;
test.__proto__ = (new Array()).__proto__;
test.length = 14;
assertEquals(14, test.length);
-
-
diff --git a/deps/v8/test/mjsunit/recursive-store-opt.js b/deps/v8/test/mjsunit/recursive-store-opt.js
new file mode 100644
index 0000000000..fb2649248d
--- /dev/null
+++ b/deps/v8/test/mjsunit/recursive-store-opt.js
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function g() {
+ this.x = this;
+}
+
+function f() {
+ return new g();
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regexp-indexof.js b/deps/v8/test/mjsunit/regexp-indexof.js
index a504dd8d57..09f2fec007 100644
--- a/deps/v8/test/mjsunit/regexp-indexof.js
+++ b/deps/v8/test/mjsunit/regexp-indexof.js
@@ -74,4 +74,4 @@ CheckMatch(/a(.)/g, "xyzzyabxyzzyacxyzzy", [[5, 2], [12, 2]]);
CheckMatch(/a|(?:)/g, "aba", [[0, 1], [1, 0], [2, 1], [3, 0]]);
CheckMatch(/a|(?:)/g, "baba", [[0, 0], [1, 1], [2, 0], [3, 1], [4, 0]]);
-CheckMatch(/a|(?:)/g, "bab", [[0, 0], [1, 1], [2, 0], [3, 0]]); \ No newline at end of file
+CheckMatch(/a|(?:)/g, "bab", [[0, 0], [1, 1], [2, 0], [3, 0]]);
diff --git a/deps/v8/test/mjsunit/regexp-multiline-stack-trace.js b/deps/v8/test/mjsunit/regexp-multiline-stack-trace.js
deleted file mode 100644
index fc248ef6a2..0000000000
--- a/deps/v8/test/mjsunit/regexp-multiline-stack-trace.js
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The flags below are to test the trace-calls functionality and the
-// preallocated meessage memory.
-// Flags: --trace-calls --preallocate-message-memory
-
-/**
- * @fileoverview Check that various regexp constructs work as intended.
- * Particularly those regexps that use ^ and $.
- */
-
-assertTrue(/^bar/.test("bar"));
-assertTrue(/^bar/.test("bar\nfoo"));
-assertFalse(/^bar/.test("foo\nbar"));
-assertTrue(/^bar/m.test("bar"));
-assertTrue(/^bar/m.test("bar\nfoo"));
-assertTrue(/^bar/m.test("foo\nbar"));
-
-assertTrue(/bar$/.test("bar"));
-assertFalse(/bar$/.test("bar\nfoo"));
-assertTrue(/bar$/.test("foo\nbar"));
-assertTrue(/bar$/m.test("bar"));
-assertTrue(/bar$/m.test("bar\nfoo"));
-assertTrue(/bar$/m.test("foo\nbar"));
-
-assertFalse(/^bxr/.test("bar"));
-assertFalse(/^bxr/.test("bar\nfoo"));
-assertFalse(/^bxr/m.test("bar"));
-assertFalse(/^bxr/m.test("bar\nfoo"));
-assertFalse(/^bxr/m.test("foo\nbar"));
-
-assertFalse(/bxr$/.test("bar"));
-assertFalse(/bxr$/.test("foo\nbar"));
-assertFalse(/bxr$/m.test("bar"));
-assertFalse(/bxr$/m.test("bar\nfoo"));
-assertFalse(/bxr$/m.test("foo\nbar"));
-
-
-assertTrue(/^.*$/.test(""));
-assertTrue(/^.*$/.test("foo"));
-assertFalse(/^.*$/.test("\n"));
-assertTrue(/^.*$/m.test("\n"));
-
-assertTrue(/^[\s]*$/.test(" "));
-assertTrue(/^[\s]*$/.test("\n"));
-
-assertTrue(/^[^]*$/.test(""));
-assertTrue(/^[^]*$/.test("foo"));
-assertTrue(/^[^]*$/.test("\n"));
-
-assertTrue(/^([()\s]|.)*$/.test("()\n()"));
-assertTrue(/^([()\n]|.)*$/.test("()\n()"));
-assertFalse(/^([()]|.)*$/.test("()\n()"));
-assertTrue(/^([()]|.)*$/m.test("()\n()"));
-assertTrue(/^([()]|.)*$/m.test("()\n"));
-assertTrue(/^[()]*$/m.test("()\n."));
-
-assertTrue(/^[\].]*$/.test("...]..."));
-
-
-function check_case(lc, uc) {
- var a = new RegExp("^" + lc + "$");
- assertFalse(a.test(uc));
- a = new RegExp("^" + lc + "$", "i");
- assertTrue(a.test(uc));
-
- var A = new RegExp("^" + uc + "$");
- assertFalse(A.test(lc));
- A = new RegExp("^" + uc + "$", "i");
- assertTrue(A.test(lc));
-
- a = new RegExp("^[" + lc + "]$");
- assertFalse(a.test(uc));
- a = new RegExp("^[" + lc + "]$", "i");
- assertTrue(a.test(uc));
-
- A = new RegExp("^[" + uc + "]$");
- assertFalse(A.test(lc));
- A = new RegExp("^[" + uc + "]$", "i");
- assertTrue(A.test(lc));
-}
-
-
-check_case("a", "A");
-// Aring
-check_case(String.fromCharCode(229), String.fromCharCode(197));
-// Russian G
-check_case(String.fromCharCode(0x413), String.fromCharCode(0x433));
-
-
-assertThrows("a = new RegExp('[z-a]');");
diff --git a/deps/v8/test/mjsunit/regexp-results-cache.js b/deps/v8/test/mjsunit/regexp-results-cache.js
index 7ee8c3fac4..9de866106b 100644
--- a/deps/v8/test/mjsunit/regexp-results-cache.js
+++ b/deps/v8/test/mjsunit/regexp-results-cache.js
@@ -75,4 +75,3 @@ assertEquals("Friends,", words[0]);
words[0] = "Enemies,";
words = string.split(" ");
assertEquals("Friends,", words[0]);
-
diff --git a/deps/v8/test/mjsunit/regress-3135.js b/deps/v8/test/mjsunit/regress-3135.js
new file mode 100644
index 0000000000..8088432c8e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-3135.js
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Properties are serialized once.
+assertEquals('{"x":1}', JSON.stringify({ x : 1 }, ["x", 1, "x", 1]));
+assertEquals('{"1":1}', JSON.stringify({ 1 : 1 }, ["x", 1, "x", 1]));
+assertEquals('{"1":1}', JSON.stringify({ 1 : 1 }, ["1", 1, "1", 1]));
+assertEquals('{"1":1}', JSON.stringify({ 1 : 1 }, [1, "1", 1, "1"]));
+
+// Properties are visited at most once.
+var fired = 0;
+var getter_obj = { get x() { fired++; return 2; } };
+assertEquals('{"x":2}', JSON.stringify(getter_obj, ["x", "y", "x"]));
+assertEquals(1, fired);
+
+// Order of the replacer array is followed.
+assertEquals('{"y":4,"x":3}', JSON.stringify({ x : 3, y : 4}, ["y", "x"]));
+assertEquals('{"y":4,"1":2,"x":3}',
+ JSON.stringify({ x : 3, y : 4, 1 : 2 }, ["y", 1, "x"]));
+
+// __proto__ is ignored and doesn't break anything.
+var a = { x : 8 };
+a.__proto__ = { x : 7 };
+assertEquals('{"x":8}', JSON.stringify(a, ["__proto__", "x", "__proto__"]));
+
+// Arrays are not affected by the replacer array.
+assertEquals("[9,8,7]", JSON.stringify([9, 8, 7], [1, 1]));
+var mixed_arr = [11,12,13];
+mixed_arr.x = 10;
+assertEquals('[11,12,13]', JSON.stringify(mixed_arr, [1, 0, 1]));
+
+// Array elements of objects are affected.
+var mixed_obj = { x : 3 };
+mixed_obj[0] = 6;
+mixed_obj[1] = 5;
+assertEquals('{"1":5,"0":6}', JSON.stringify(mixed_obj, [1, 0, 1]));
+
+// Nested object.
+assertEquals('{"z":{"x":3},"x":1}',
+ JSON.stringify({ x: 1, y:2, z: {x:3, b:4}}, ["z","x"]));
+
+// Objects in the replacer array are ignored.
+assertEquals('{}',
+ JSON.stringify({ x : 1, "1": 1 }, [{}]));
+assertEquals('{}',
+ JSON.stringify({ x : 1, "1": 1 }, [true, undefined, null]));
+assertEquals('{}',
+ JSON.stringify({ x : 1, "1": 1 },
+ [{ toString: function() { return "x";} }]));
+assertEquals('{}',
+ JSON.stringify({ x : 1, "1": 1 },
+ [{ valueOf: function() { return 1;} }]));
diff --git a/deps/v8/test/mjsunit/regress-330046.js b/deps/v8/test/mjsunit/regress-330046.js
new file mode 100644
index 0000000000..d94b804ac0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-330046.js
@@ -0,0 +1,61 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --use-osr --allow-natives-syntax --crankshaft
+
+var o1 = {a : 10};
+var o2 = { };
+o2.__proto__ = o1;
+var o3 = { };
+o3.__proto__ = o2;
+
+function f(n, x, b) {
+ var sum = x.a;
+ for (var i = 0; i < n; i++) {
+ sum = 1.0 / i;
+ }
+ return sum;
+}
+
+f(10, o3);
+f(20, o3);
+f(30, o3);
+%OptimizeFunctionOnNextCall(f, "concurrent");
+f(100000, o3);
+// At this point OSR replaces already optimized code.
+// Check that it evicts old code from cache.
+
+// This causes all code for f to be lazily deopted.
+o2.a = 5;
+
+// If OSR did not evict the old code, it will be installed in f here.
+%OptimizeFunctionOnNextCall(f);
+f(10, o3);
+
+// The old code is already deoptimized, but f still points to it.
+// Disassembling it will crash.
+%DebugDisassembleFunction(f);
diff --git a/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js b/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js
new file mode 100644
index 0000000000..fb7280a0d1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js
@@ -0,0 +1,65 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+(function BinopInEffectContextDeoptAndOsr() {
+ function f(a, deopt, osr) {
+ var result = (a + 10, "result");
+ var dummy = deopt + 0;
+ if (osr) while (%GetOptimizationStatus(f) == 2) {}
+ return result;
+ }
+
+ assertEquals("result", f(true, 3, false));
+ assertEquals("result", f(true, 3, false));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("result", f(true, "foo", true));
+})();
+
+
+(function BinopInEffectContextLazyDeopt() {
+ function deopt_f() {
+ %DeoptimizeFunction(f);
+ return "dummy";
+ }
+
+ function h() {
+ return { toString : deopt_f };
+ }
+
+ function g(x) {
+ }
+
+ function f() {
+ return g(void(h() + ""));
+ };
+
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+})();
diff --git a/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js b/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js
new file mode 100644
index 0000000000..9a36c141b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f(deopt, osr) {
+ var result = "result";
+ %_CallFunction(0, 0, function() {});
+ var dummy = deopt + 0;
+ if (osr) while (%GetOptimizationStatus(f) == 2) {}
+ return result;
+}
+
+assertEquals("result", f(3, false));
+assertEquals("result", f(3, false));
+%OptimizeFunctionOnNextCall(f);
+assertEquals("result", f("foo", true));
diff --git a/deps/v8/test/mjsunit/regress/clear-keyed-call.js b/deps/v8/test/mjsunit/regress/clear-keyed-call.js
new file mode 100644
index 0000000000..6870f606c0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/clear-keyed-call.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --allow-natives-syntax
+
+function f(a) {
+ a[0](1);
+}
+
+f([function(a) { return a; }]);
+f([function(a) { return a; }]);
+f([function(a) { return a; }]);
+%NotifyContextDisposed();
+gc();
+gc();
+gc();
diff --git a/deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js b/deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js
new file mode 100644
index 0000000000..b28dff73a7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js
@@ -0,0 +1,47 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function lazyDeopt() {
+ %DeoptimizeFunction(test);
+ return "deopt";
+}
+
+var x = { toString : lazyDeopt };
+
+function g(x) {
+ return "result";
+}
+
+function test(x) {
+ return g(void(x == ""));
+}
+
+test(x);
+%OptimizeFunctionOnNextCall(test);
+assertEquals("result", test(x));
diff --git a/deps/v8/test/mjsunit/regress/d8-readbuffer.js b/deps/v8/test/mjsunit/regress/d8-readbuffer.js
new file mode 100644
index 0000000000..8aec02dcac
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/d8-readbuffer.js
@@ -0,0 +1,40 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that reading a file actually returns the file's contents.
+
+// Env: TEST_FILE_NAME
+assertEquals("string", typeof TEST_FILE_NAME);
+
+var a = new Uint8Array(readbuffer(TEST_FILE_NAME));
+
+// First line of this file.
+var expected = "// Copyright 2014 the V8 project authors. All rights reserved.";
+
+for (var i = 0; i < expected.length; i++) {
+ assertEquals(expected.charCodeAt(i), a[i]);
+}
diff --git a/deps/v8/test/mjsunit/regress/internalized-string-not-equal.js b/deps/v8/test/mjsunit/regress/internalized-string-not-equal.js
new file mode 100644
index 0000000000..911279b43e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/internalized-string-not-equal.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// A bug in r15773, when masks for internalized string and string types
+// were reorganized.
+function equal(o1, o2) {
+ return (o1 == o2);
+}
+var a = "abc";
+var b = "abc";
+equal(a, b);
+equal(a, b);
+%OptimizeFunctionOnNextCall(equal);
+assertTrue(equal(1.3, 1.3));
diff --git a/deps/v8/test/mjsunit/regress/number-named-call-deopt.js b/deps/v8/test/mjsunit/regress/number-named-call-deopt.js
new file mode 100644
index 0000000000..1598af12b4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/number-named-call-deopt.js
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f(x, deopt, osr) {
+ var res = "result";
+ void(x.toString(10, deopt + 0));
+ if (osr) for (var i = 0; i < 100000; i++) { }
+ return res;
+}
+
+f(4, 0, false);
+f(4, 0, false);
+f(4, 0, false);
+%OptimizeFunctionOnNextCall(f);
+assertEquals("result", f(4, "deopt", true));
diff --git a/deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js b/deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js
new file mode 100644
index 0000000000..6188279248
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js
@@ -0,0 +1,25 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function t1() { return this instanceof t1; }
+function t2() { return this instanceof t2; }
+
+var o1 = new (function() { })();
+Object.defineProperty(o1, "t", {get:function() { return this instanceof o1.constructor; }});
+var o2 = new (function() { })();
+Object.defineProperty(o2, "t", {get:function() { return this instanceof o1.constructor; }});
+var o3 = new (function() { })();
+o3.t = true;
+
+function f(o) {
+ return 1 + (o.t ? 1 : 2);
+}
+
+f(o1);
+f(o1);
+f(o2);
+%OptimizeFunctionOnNextCall(f);
+f(o3);
diff --git a/deps/v8/test/mjsunit/regress/regress-1017.js b/deps/v8/test/mjsunit/regress/regress-1017.js
index 3daf5428ab..440449a5bf 100644
--- a/deps/v8/test/mjsunit/regress/regress-1017.js
+++ b/deps/v8/test/mjsunit/regress/regress-1017.js
@@ -33,4 +33,3 @@
// to store the 33rd character. This fails an ASSERT in debug mode.
assertEquals(33, "12345678901234567890123456789012\u2028".length);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1039610.js b/deps/v8/test/mjsunit/regress/regress-1039610.js
index fd5c549c1c..757801fcee 100644
--- a/deps/v8/test/mjsunit/regress/regress-1039610.js
+++ b/deps/v8/test/mjsunit/regress/regress-1039610.js
@@ -26,4 +26,4 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Make sure that the Debug object does not return to the global object
-assertTrue(typeof(Debug) === 'undefined'); \ No newline at end of file
+assertTrue(typeof(Debug) === 'undefined');
diff --git a/deps/v8/test/mjsunit/regress/regress-105.js b/deps/v8/test/mjsunit/regress/regress-105.js
index 9a4d5c4744..8b8030ffec 100644
--- a/deps/v8/test/mjsunit/regress/regress-105.js
+++ b/deps/v8/test/mjsunit/regress/regress-105.js
@@ -41,4 +41,3 @@ object.toString = custom_toString;
assertEquals(2, Number(object));
assertEquals('I', String(object)[0]);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1066899.js b/deps/v8/test/mjsunit/regress/regress-1066899.js
index 37fd554b54..5840f26234 100644
--- a/deps/v8/test/mjsunit/regress/regress-1066899.js
+++ b/deps/v8/test/mjsunit/regress/regress-1066899.js
@@ -34,4 +34,3 @@ function Crash() {
}
Crash();
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1092.js b/deps/v8/test/mjsunit/regress/regress-1092.js
index 00422cb452..24efb37b40 100644
--- a/deps/v8/test/mjsunit/regress/regress-1092.js
+++ b/deps/v8/test/mjsunit/regress/regress-1092.js
@@ -32,4 +32,3 @@ this.w = 0;
this.x = 1;
this.y = 2;
this.z = 3;
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1099.js b/deps/v8/test/mjsunit/regress/regress-1099.js
index 36f09e4eb9..49e4a52794 100644
--- a/deps/v8/test/mjsunit/regress/regress-1099.js
+++ b/deps/v8/test/mjsunit/regress/regress-1099.js
@@ -48,4 +48,4 @@ for (var i = 0; i < 5; i++) {
}
%OptimizeFunctionOnNextCall(y);
-assertTrue(y("foo")); \ No newline at end of file
+assertTrue(y("foo"));
diff --git a/deps/v8/test/mjsunit/regress/regress-1112.js b/deps/v8/test/mjsunit/regress/regress-1112.js
index d780106ba0..41c9193898 100644
--- a/deps/v8/test/mjsunit/regress/regress-1112.js
+++ b/deps/v8/test/mjsunit/regress/regress-1112.js
@@ -33,4 +33,3 @@ Object.defineProperty(this,
{ configurable: true, enumerable: true, value: 3 });
assertEquals(3, this[1]);
assertTrue(this.hasOwnProperty("1"));
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1114040.js b/deps/v8/test/mjsunit/regress/regress-1114040.js
index 9d1b320a9a..ef7ee31f09 100644
--- a/deps/v8/test/mjsunit/regress/regress-1114040.js
+++ b/deps/v8/test/mjsunit/regress/regress-1114040.js
@@ -55,4 +55,3 @@ function TestContinue() {
assertEquals("01", TestBreak());
assertEquals("01", TestContinue());
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1117.js b/deps/v8/test/mjsunit/regress/regress-1117.js
index 981a1b7a3f..664dadd067 100644
--- a/deps/v8/test/mjsunit/regress/regress-1117.js
+++ b/deps/v8/test/mjsunit/regress/regress-1117.js
@@ -41,4 +41,3 @@ assertEquals(Infinity, 1/bar(5));
assertEquals(Infinity, 1/bar(5));
%OptimizeFunctionOnNextCall(bar);
assertEquals(-Infinity, 1/bar(-5));
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1178598.js b/deps/v8/test/mjsunit/regress/regress-1178598.js
index 9caaec2f01..135c596872 100644
--- a/deps/v8/test/mjsunit/regress/regress-1178598.js
+++ b/deps/v8/test/mjsunit/regress/regress-1178598.js
@@ -86,5 +86,3 @@ var value = (function() {
})();
assertEquals(87, value);
-
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1181.js b/deps/v8/test/mjsunit/regress/regress-1181.js
index d45a0bee35..d5b8ab1797 100644
--- a/deps/v8/test/mjsunit/regress/regress-1181.js
+++ b/deps/v8/test/mjsunit/regress/regress-1181.js
@@ -51,4 +51,4 @@ function check(count) {
check(150);
check(200);
-check(350); \ No newline at end of file
+check(350);
diff --git a/deps/v8/test/mjsunit/regress/regress-1246.js b/deps/v8/test/mjsunit/regress/regress-1246.js
index f3dbec627b..ca425ec2b7 100644
--- a/deps/v8/test/mjsunit/regress/regress-1246.js
+++ b/deps/v8/test/mjsunit/regress/regress-1246.js
@@ -80,4 +80,3 @@ for (var i = 0; i < radix16.length; i++) {
assertEquals(0xaf, parseInt("0xaf", radix));
assertEquals(0xaf, parseInt("af", radix));
}
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1254366.js b/deps/v8/test/mjsunit/regress/regress-1254366.js
index 2f9e011e94..9acda3c19b 100644
--- a/deps/v8/test/mjsunit/regress/regress-1254366.js
+++ b/deps/v8/test/mjsunit/regress/regress-1254366.js
@@ -35,4 +35,3 @@ Object.prototype.findOrStore = function() {
var a = new Object();
assertEquals(gee, a.findOrStore());
assertEquals(gee, a.findOrStore());
-
diff --git a/deps/v8/test/mjsunit/regress/regress-131994.js b/deps/v8/test/mjsunit/regress/regress-131994.js
index 8347653a94..7f600959da 100644
--- a/deps/v8/test/mjsunit/regress/regress-131994.js
+++ b/deps/v8/test/mjsunit/regress/regress-131994.js
@@ -67,4 +67,3 @@ function h() {
h();
assertFalse(exception);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-137.js b/deps/v8/test/mjsunit/regress/regress-137.js
index cc7b68c0f2..f343eba14a 100644
--- a/deps/v8/test/mjsunit/regress/regress-137.js
+++ b/deps/v8/test/mjsunit/regress/regress-137.js
@@ -43,4 +43,3 @@
};
fail("case 10", "Default case", "Heap number not recognized as Smi value");
})();
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1546.js b/deps/v8/test/mjsunit/regress/regress-1546.js
index 7f1fa58b49..3f1839cd37 100644
--- a/deps/v8/test/mjsunit/regress/regress-1546.js
+++ b/deps/v8/test/mjsunit/regress/regress-1546.js
@@ -29,4 +29,4 @@
// Should't throw. Scanner incorrectly truncated to char before comparing
// with "*", so it ended the comment early.
-eval("/*\u822a/ */"); \ No newline at end of file
+eval("/*\u822a/ */");
diff --git a/deps/v8/test/mjsunit/regress/regress-1591.js b/deps/v8/test/mjsunit/regress/regress-1591.js
index 69efd0bd87..dbf40dfdbe 100644
--- a/deps/v8/test/mjsunit/regress/regress-1591.js
+++ b/deps/v8/test/mjsunit/regress/regress-1591.js
@@ -45,4 +45,3 @@ var used_custom_lookup = false;
var expected_message = "ReferenceError: f is not defined";
assertTrue(stack.indexOf(expected_message) >= 0);
assertFalse(used_custom_lookup);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1647.js b/deps/v8/test/mjsunit/regress/regress-1647.js
index a6afcc0be2..ab6608c1ef 100644
--- a/deps/v8/test/mjsunit/regress/regress-1647.js
+++ b/deps/v8/test/mjsunit/regress/regress-1647.js
@@ -40,4 +40,3 @@ for (var i = 0; i < 10; i++) f();
%OptimizeFunctionOnNextCall(f);
t.number = 2;
f();
-
diff --git a/deps/v8/test/mjsunit/regress/regress-165637.js b/deps/v8/test/mjsunit/regress/regress-165637.js
index 72af528a86..84c9041216 100644
--- a/deps/v8/test/mjsunit/regress/regress-165637.js
+++ b/deps/v8/test/mjsunit/regress/regress-165637.js
@@ -45,10 +45,6 @@ function do_slices() {
return Date.now() - start;
}
-// Reset the GC stress mode to be off. Needed so that the runtime of this test
-// stays within bounds even if we run in GC stress mode.
-%SetFlags("--gc-interval=-1 --noforce-marking-deque-overflows");
-
// Should never take more than 3 seconds (if the bug is fixed, the test takes
// considerably less time than 3 seconds).
assertTrue(do_slices() < (3 * 1000));
diff --git a/deps/v8/test/mjsunit/regress/regress-166379.js b/deps/v8/test/mjsunit/regress/regress-166379.js
index b19afbdde6..2cda61182b 100644
--- a/deps/v8/test/mjsunit/regress/regress-166379.js
+++ b/deps/v8/test/mjsunit/regress/regress-166379.js
@@ -36,4 +36,3 @@ assertEquals(1, mod(3, 2));
// Surprise mod with overflow.
assertEquals(-Infinity, 1/mod(-2147483648, -1));
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1748.js b/deps/v8/test/mjsunit/regress/regress-1748.js
index e287e55496..41b7c70c01 100644
--- a/deps/v8/test/mjsunit/regress/regress-1748.js
+++ b/deps/v8/test/mjsunit/regress/regress-1748.js
@@ -32,4 +32,4 @@
var str = Array(10000).join("X");
str.replace(/^|X/g, function(m, i, s) {
if (i > 0) assertEquals("X", m, "at position 0x" + i.toString(16));
-}); \ No newline at end of file
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-1757.js b/deps/v8/test/mjsunit/regress/regress-1757.js
index f7a5516cac..35e7355c33 100644
--- a/deps/v8/test/mjsunit/regress/regress-1757.js
+++ b/deps/v8/test/mjsunit/regress/regress-1757.js
@@ -29,4 +29,4 @@
var a = "abcdefghijklmnopqrstuvqxy"+"z";
externalizeString(a, true);
-assertEquals('b', a.substring(1).charAt(0)); \ No newline at end of file
+assertEquals('b', a.substring(1).charAt(0));
diff --git a/deps/v8/test/mjsunit/regress/regress-1853.js b/deps/v8/test/mjsunit/regress/regress-1853.js
index cfafe82fa3..7175688821 100644
--- a/deps/v8/test/mjsunit/regress/regress-1853.js
+++ b/deps/v8/test/mjsunit/regress/regress-1853.js
@@ -113,4 +113,3 @@ eval('function test2() { \n' +
test1();
test2();
assertEquals(3, break_count);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-186.js b/deps/v8/test/mjsunit/regress/regress-186.js
index 335869d6b0..0212855896 100644
--- a/deps/v8/test/mjsunit/regress/regress-186.js
+++ b/deps/v8/test/mjsunit/regress/regress-186.js
@@ -69,4 +69,3 @@ function testGlobal() {
runTest(testLocal);
runTest(testConstLocal);
runTest(testGlobal);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1919169.js b/deps/v8/test/mjsunit/regress/regress-1919169.js
index a73231289f..fbb82bae67 100644
--- a/deps/v8/test/mjsunit/regress/regress-1919169.js
+++ b/deps/v8/test/mjsunit/regress/regress-1919169.js
@@ -37,4 +37,3 @@ function test() {
// Crash does not occur when code is run at the top level.
test();
-
diff --git a/deps/v8/test/mjsunit/regress/regress-193.js b/deps/v8/test/mjsunit/regress/regress-193.js
index f803483106..457c208153 100644
--- a/deps/v8/test/mjsunit/regress/regress-193.js
+++ b/deps/v8/test/mjsunit/regress/regress-193.js
@@ -40,5 +40,3 @@ f()();
// The call to f should get the constructor of the receiver which is
// the constructor of the global object.
assertEquals(constructor, f());
-
-
diff --git a/deps/v8/test/mjsunit/regress/regress-20070207.js b/deps/v8/test/mjsunit/regress/regress-20070207.js
index b7f7a5cc6f..701a079ade 100644
--- a/deps/v8/test/mjsunit/regress/regress-20070207.js
+++ b/deps/v8/test/mjsunit/regress/regress-20070207.js
@@ -39,4 +39,3 @@ assertFalse(f(-10));
assertFalse(f(-5));
assertFalse(f(0));
assertFalse(f(10));
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2027.js b/deps/v8/test/mjsunit/regress/regress-2027.js
index 00ed03f650..3f2062b90c 100644
--- a/deps/v8/test/mjsunit/regress/regress-2027.js
+++ b/deps/v8/test/mjsunit/regress/regress-2027.js
@@ -45,4 +45,3 @@ Check(d.setUTCHours(10));
Check(d.setUTCDate(10));
Check(d.setUTCMonth(10));
Check(d.setUTCFullYear(2010));
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2119.js b/deps/v8/test/mjsunit/regress/regress-2119.js
index 54840c238b..b735ddf3a6 100644
--- a/deps/v8/test/mjsunit/regress/regress-2119.js
+++ b/deps/v8/test/mjsunit/regress/regress-2119.js
@@ -33,4 +33,3 @@ function strict_function() {
}
assertThrows(strict_function);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2170.js b/deps/v8/test/mjsunit/regress/regress-2170.js
index 01cb1eaf8f..99f432dfcf 100644
--- a/deps/v8/test/mjsunit/regress/regress-2170.js
+++ b/deps/v8/test/mjsunit/regress/regress-2170.js
@@ -55,4 +55,3 @@ try {
} catch (e) {
assertUnreachable();
}
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2172.js b/deps/v8/test/mjsunit/regress/regress-2172.js
index 5d06f4eef4..6d938453d3 100644
--- a/deps/v8/test/mjsunit/regress/regress-2172.js
+++ b/deps/v8/test/mjsunit/regress/regress-2172.js
@@ -32,4 +32,3 @@ for (var i = 0; i < 10000; i++){
for (var i = 0; i < 10000; i++){
(i + "\u1234\0").split(/(.)\1/i);
}
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2250.js b/deps/v8/test/mjsunit/regress/regress-2250.js
index 9d2fd4412f..88d4dd434f 100644
--- a/deps/v8/test/mjsunit/regress/regress-2250.js
+++ b/deps/v8/test/mjsunit/regress/regress-2250.js
@@ -65,4 +65,3 @@ test();
// function should no longer deopt when called.
test();
assertOptimized(test);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2284.js b/deps/v8/test/mjsunit/regress/regress-2284.js
index 561401998a..24bf58e2db 100644
--- a/deps/v8/test/mjsunit/regress/regress-2284.js
+++ b/deps/v8/test/mjsunit/regress/regress-2284.js
@@ -27,6 +27,6 @@
// Flags: --allow-natives-syntax
-assertThrows("%foobar();", TypeError);
-assertThrows("%constructor();", TypeError);
-assertThrows("%constructor(23);", TypeError);
+assertThrows("%foobar();", Error);
+assertThrows("%constructor();", Error);
+assertThrows("%constructor(23);", Error);
diff --git a/deps/v8/test/mjsunit/regress/regress-2285.js b/deps/v8/test/mjsunit/regress/regress-2285.js
index efda4cde32..a0d628df93 100644
--- a/deps/v8/test/mjsunit/regress/regress-2285.js
+++ b/deps/v8/test/mjsunit/regress/regress-2285.js
@@ -29,4 +29,3 @@
assertThrows(function() { %_CallFunction(null, 0, ""); });
assertThrows(function() { %_CallFunction(null, 0, 1); });
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2286.js b/deps/v8/test/mjsunit/regress/regress-2286.js
index 372451ec44..0264900442 100644
--- a/deps/v8/test/mjsunit/regress/regress-2286.js
+++ b/deps/v8/test/mjsunit/regress/regress-2286.js
@@ -28,5 +28,5 @@
// Flags: --allow-natives-syntax
assertThrows("f()", ReferenceError);
-assertThrows("%f()", TypeError);
+assertThrows("%f()", Error);
assertThrows("%_f()", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-2289.js b/deps/v8/test/mjsunit/regress/regress-2289.js
index e89ec6e143..d860bbbf70 100644
--- a/deps/v8/test/mjsunit/regress/regress-2289.js
+++ b/deps/v8/test/mjsunit/regress/regress-2289.js
@@ -30,5 +30,3 @@ for (var i = 0; i < 12; i++) foo += foo;
foo = foo + 'b' + foo;
foo.replace(/b/, "a");
-
-
diff --git a/deps/v8/test/mjsunit/regress/regress-231.js b/deps/v8/test/mjsunit/regress/regress-231.js
index 0c6e5b3495..bce0500013 100644
--- a/deps/v8/test/mjsunit/regress/regress-231.js
+++ b/deps/v8/test/mjsunit/regress/regress-231.js
@@ -89,4 +89,4 @@ var str = 'GgcyDGgcy.saaaa.aDGaaa.aynaaaaaaaaacaaaaagcaaaaaaaancaDGgnayr' +
//Shouldn't crash.
var res = re.test(str);
-assertTrue(res); \ No newline at end of file
+assertTrue(res);
diff --git a/deps/v8/test/mjsunit/regress/regress-2318.js b/deps/v8/test/mjsunit/regress/regress-2318.js
index ca67ab2ca5..e31e0f904e 100644
--- a/deps/v8/test/mjsunit/regress/regress-2318.js
+++ b/deps/v8/test/mjsunit/regress/regress-2318.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --nostack-trace-on-abort
+// Flags: --expose-debug-as debug --nostack-trace-on-abort --stack-size=150
function f() {
var i = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-2374.js b/deps/v8/test/mjsunit/regress/regress-2374.js
index b333720ac0..f379cc55b7 100644
--- a/deps/v8/test/mjsunit/regress/regress-2374.js
+++ b/deps/v8/test/mjsunit/regress/regress-2374.js
@@ -31,4 +31,4 @@ var obj = JSON.parse(msg);
var obj2 = JSON.parse(msg);
assertEquals(JSON.stringify(obj), JSON.stringify(obj2));
-assertEquals(JSON.stringify(obj, null, 0), JSON.stringify(obj2)); \ No newline at end of file
+assertEquals(JSON.stringify(obj, null, 0), JSON.stringify(obj2));
diff --git a/deps/v8/test/mjsunit/regress/regress-237617.js b/deps/v8/test/mjsunit/regress/regress-237617.js
index dabf828ae8..7b7e50f247 100644
--- a/deps/v8/test/mjsunit/regress/regress-237617.js
+++ b/deps/v8/test/mjsunit/regress/regress-237617.js
@@ -40,4 +40,3 @@ try {
assertTrue(error_stack.indexOf("test stack") > 0);
assertTrue(error_stack.indexOf("illegal") < 0);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2419.js b/deps/v8/test/mjsunit/regress/regress-2419.js
index 9cd453a58e..4ffafbe6eb 100644
--- a/deps/v8/test/mjsunit/regress/regress-2419.js
+++ b/deps/v8/test/mjsunit/regress/regress-2419.js
@@ -34,4 +34,3 @@ var b = {0: 5, 1: 4, 2: 3, 3: 2, 4: 1, 5: 0, length: 6};
Object.freeze(b);
Array.prototype.sort.call(b);
assertPropertiesEqual({0: 5, 1: 4, 2: 3, 3: 2, 4: 1, 5: 0, length: 6}, b);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2438.js b/deps/v8/test/mjsunit/regress/regress-2438.js
index 3f4fd7df57..7be7e71687 100644
--- a/deps/v8/test/mjsunit/regress/regress-2438.js
+++ b/deps/v8/test/mjsunit/regress/regress-2438.js
@@ -49,4 +49,3 @@ testSideEffects("zzzz", /a/);
testSideEffects("zzzz", /a/g);
testSideEffects("xaxa", /a/);
testSideEffects("xaxa", /a/g);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2444.js b/deps/v8/test/mjsunit/regress/regress-2444.js
index 8fb8d8b52e..41b6a95e7a 100644
--- a/deps/v8/test/mjsunit/regress/regress-2444.js
+++ b/deps/v8/test/mjsunit/regress/regress-2444.js
@@ -116,5 +116,3 @@ assertEquals(0,
object_factory(1, 0, [1, 0, 0]),
object_factory(2, 1, [1, 1, 0])));
assertFlags([1, 1, 1]);
-
-
diff --git a/deps/v8/test/mjsunit/regress/regress-246.js b/deps/v8/test/mjsunit/regress/regress-246.js
index 09b746b7aa..22e29fdde0 100644
--- a/deps/v8/test/mjsunit/regress/regress-246.js
+++ b/deps/v8/test/mjsunit/regress/regress-246.js
@@ -28,4 +28,4 @@
// See: http://code.google.com/p/v8/issues/detail?id=246
assertTrue(/(?:text)/.test("text"));
-assertEquals(["text"], /(?:text)/.exec("text")); \ No newline at end of file
+assertEquals(["text"], /(?:text)/.exec("text"));
diff --git a/deps/v8/test/mjsunit/regress/regress-2570.js b/deps/v8/test/mjsunit/regress/regress-2570.js
index 4e32a21e4b..f1331e8805 100644
--- a/deps/v8/test/mjsunit/regress/regress-2570.js
+++ b/deps/v8/test/mjsunit/regress/regress-2570.js
@@ -29,4 +29,4 @@ var o = ["\u56e7", // Switch JSON stringifier to two-byte mode.
"\u00e6"]; // Latin-1 character.
assertEquals('["\u56e7","\u00e6"]', JSON.stringify(o));
-assertEquals('["\u56e7","\u00e6"]', JSON.stringify(o, null, 0)); \ No newline at end of file
+assertEquals('["\u56e7","\u00e6"]', JSON.stringify(o, null, 0));
diff --git a/deps/v8/test/mjsunit/regress/regress-2596.js b/deps/v8/test/mjsunit/regress/regress-2596.js
index 1d327fe0f8..e7006085a1 100644
--- a/deps/v8/test/mjsunit/regress/regress-2596.js
+++ b/deps/v8/test/mjsunit/regress/regress-2596.js
@@ -51,6 +51,3 @@ assertTrue(isNaN(boom(0)));
assertTrue(isNaN(boom(0)));
assertTrue(isNaN(boom(0)));
assertTrue(isNaN(boom(0)));
-
-
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2618.js b/deps/v8/test/mjsunit/regress/regress-2618.js
index d1afa368dc..363557bff8 100644
--- a/deps/v8/test/mjsunit/regress/regress-2618.js
+++ b/deps/v8/test/mjsunit/regress/regress-2618.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --use-osr --allow-natives-syntax
+// Flags: --use-osr --allow-natives-syntax --no-concurrent-osr
function f() {
do {
diff --git a/deps/v8/test/mjsunit/regress/regress-2624.js b/deps/v8/test/mjsunit/regress/regress-2624.js
index 2bfd7b2585..fe94a10133 100644
--- a/deps/v8/test/mjsunit/regress/regress-2624.js
+++ b/deps/v8/test/mjsunit/regress/regress-2624.js
@@ -33,4 +33,3 @@ for(var i = 0; i < 800; i++) {
}
source += '"';
eval(source);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2671-1.js b/deps/v8/test/mjsunit/regress/regress-2671-1.js
index 042a501e5a..7937d6a336 100644
--- a/deps/v8/test/mjsunit/regress/regress-2671-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-2671-1.js
@@ -42,4 +42,3 @@ f();
f();
%OptimizeFunctionOnNextCall(f);
f();
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2671.js b/deps/v8/test/mjsunit/regress/regress-2671.js
index 8da1b8f07f..73a3098d80 100644
--- a/deps/v8/test/mjsunit/regress/regress-2671.js
+++ b/deps/v8/test/mjsunit/regress/regress-2671.js
@@ -42,4 +42,3 @@ f();
f();
%OptimizeFunctionOnNextCall(f);
f();
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2690.js b/deps/v8/test/mjsunit/regress/regress-2690.js
index 5fe7dc42dc..0ed4c5c679 100644
--- a/deps/v8/test/mjsunit/regress/regress-2690.js
+++ b/deps/v8/test/mjsunit/regress/regress-2690.js
@@ -26,4 +26,3 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
assertTrue(/\1[a]/.test("\1a"));
-
diff --git a/deps/v8/test/mjsunit/regress/regress-270142.js b/deps/v8/test/mjsunit/regress/regress-270142.js
new file mode 100644
index 0000000000..6e0865c4f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-270142.js
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that "name"'s property descriptor for non-strict and strict
+// functions correspond.
+
+function f(x) {
+ return x;
+}
+
+function g(x) {
+ "use strict";
+ return x;
+}
+
+function checkNameDescriptor(f) {
+ var descriptor = Object.getOwnPropertyDescriptor(f, "name");
+ assertFalse(descriptor.configurable);
+ assertFalse(descriptor.enumerable);
+ assertFalse(descriptor.writable);
+}
+
+checkNameDescriptor(f);
+checkNameDescriptor(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-2711.js b/deps/v8/test/mjsunit/regress/regress-2711.js
index a58e789745..d5ac2baa33 100644
--- a/deps/v8/test/mjsunit/regress/regress-2711.js
+++ b/deps/v8/test/mjsunit/regress/regress-2711.js
@@ -27,7 +27,7 @@
// Test that frozen arrays don't let their length change
var a = Object.freeze([1]);
-a.push(2);
+assertThrows(function() { a.push(2); }, TypeError);
assertEquals(1, a.length);
-a.push(2);
+assertThrows(function() { a.push(2); }, TypeError);
assertEquals(1, a.length);
diff --git a/deps/v8/test/mjsunit/bugs/bug-2758.js b/deps/v8/test/mjsunit/regress/regress-2758.js
index ee78844400..ee78844400 100644
--- a/deps/v8/test/mjsunit/bugs/bug-2758.js
+++ b/deps/v8/test/mjsunit/regress/regress-2758.js
diff --git a/deps/v8/test/mjsunit/regress/regress-2790.js b/deps/v8/test/mjsunit/regress/regress-2790.js
index 86305b8867..927f2607cc 100644
--- a/deps/v8/test/mjsunit/regress/regress-2790.js
+++ b/deps/v8/test/mjsunit/regress/regress-2790.js
@@ -26,6 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test that we can create arrays of any size.
-for (var i = 1000; i < 1000000; i += 97) {
+for (var i = 1000; i < 1000000; i += 197) {
new Array(i);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-298269.js b/deps/v8/test/mjsunit/regress/regress-298269.js
index 329ff824df..f6604dfcb6 100644
--- a/deps/v8/test/mjsunit/regress/regress-298269.js
+++ b/deps/v8/test/mjsunit/regress/regress-298269.js
@@ -42,4 +42,4 @@ Cb(s1);
var s3 = "string for triggering osr in Cb";
for (var i = 0; i < 16; i++) s3 = s3 + s3;
Cb(s3);
-Cb(s1 + s2); \ No newline at end of file
+Cb(s1 + s2);
diff --git a/deps/v8/test/mjsunit/regress/regress-2984.js b/deps/v8/test/mjsunit/regress/regress-2984.js
index de7895db0f..914409cd91 100644
--- a/deps/v8/test/mjsunit/regress/regress-2984.js
+++ b/deps/v8/test/mjsunit/regress/regress-2984.js
@@ -31,4 +31,3 @@ assertEquals("abcdefghijklmn\xffopq",
assertEquals("\xff", "\u0178".toLowerCase());
assertEquals("ABCDEFGHIJKLMN\u0178OPQ",
("abcdefghijk" + "lmn\xffopq").toUpperCase());
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2988.js b/deps/v8/test/mjsunit/regress/regress-2988.js
new file mode 100644
index 0000000000..0311d2b76d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2988.js
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --gc-global --throws
+
+var f = eval("(function f() { throw 'kaboom'; })");
+
+// Prepare that next MessageHandler::MakeMessageObject will result in
+// reclamation of existing script wrapper while weak handle is used.
+%FunctionGetScript(f);
+%SetAllocationTimeout(1000, 2);
+
+// This call throws to the console but the --throws flag passed to this
+// test will make sure we don't count it as an actual failure.
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-2989.js b/deps/v8/test/mjsunit/regress/regress-2989.js
new file mode 100644
index 0000000000..49c4a1cb03
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2989.js
@@ -0,0 +1,35 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+(function ArgumentsObjectChange() {
+ function f(x) {
+ x = 42;
+ return f.arguments[0];
+ }
+
+ f(0);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(42, f(0));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-299979.js b/deps/v8/test/mjsunit/regress/regress-299979.js
new file mode 100644
index 0000000000..0afbcb3571
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-299979.js
@@ -0,0 +1,34 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+(function(){
+ "use strict";
+ var list = Object.freeze([1, 2, 3]);
+ assertThrows(function() { list.unshift(4); }, TypeError);
+ assertThrows(function() { list.shift(); }, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-3010.js b/deps/v8/test/mjsunit/regress/regress-3010.js
new file mode 100644
index 0000000000..7aeec64828
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3010.js
@@ -0,0 +1,65 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+(function() {
+ function testOneSize(current_size) {
+ var eval_string = 'obj = {';
+ for (var current = 0; current <= current_size; ++current) {
+ eval_string += 'k' + current + ':' + current + ','
+ }
+ eval_string += '};';
+ eval(eval_string);
+ for (var i = 0; i <= current_size; i++) {
+ assertEquals(i, obj['k'+i]);
+ }
+ var current_number = 0;
+ for (var x in obj) {
+ assertEquals(current_number, obj[x]);
+ current_number++;
+ }
+ }
+
+ testOneSize(127);
+ testOneSize(128);
+ testOneSize(129);
+
+ testOneSize(255);
+ testOneSize(256);
+ testOneSize(257);
+
+ testOneSize(511);
+ testOneSize(512);
+ testOneSize(513);
+
+ testOneSize(1023);
+ testOneSize(1024);
+ testOneSize(1025);
+
+ testOneSize(2047);
+ testOneSize(2048);
+ testOneSize(2049);
+}())
diff --git a/deps/v8/test/mjsunit/regress/regress-3025.js b/deps/v8/test/mjsunit/regress/regress-3025.js
new file mode 100644
index 0000000000..ccb3830687
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3025.js
@@ -0,0 +1,32 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var n = 0x8000000000000800;
+assertEquals(n, 9223372036854778000);
+var s = n.toString(5);
+var v = parseInt(s, 5);
+assertEquals(n, v);
diff --git a/deps/v8/test/mjsunit/regress/regress-3026.js b/deps/v8/test/mjsunit/regress/regress-3026.js
new file mode 100644
index 0000000000..d25c88d432
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3026.js
@@ -0,0 +1,28 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+assertEquals([], "abc".split(undefined, 0));
diff --git a/deps/v8/test/mjsunit/regress/regress-3027.js b/deps/v8/test/mjsunit/regress/regress-3027.js
index c7ebd539b6..6336594052 100644
--- a/deps/v8/test/mjsunit/regress/regress-3027.js
+++ b/deps/v8/test/mjsunit/regress/regress-3027.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Test to exceed the Heap::MaxRegularSpaceAllocationSize with an array
+// Test to exceed the Page::MaxRegularHeapObjectSize with an array
// constructor call taking many arguments.
function boom() {
diff --git a/deps/v8/test/mjsunit/regress/regress-3029.js b/deps/v8/test/mjsunit/regress/regress-3029.js
new file mode 100644
index 0000000000..ae412dff2b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3029.js
@@ -0,0 +1,45 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function c(x) {
+ undefined.boom();
+}
+
+function f() {
+ return new c();
+}
+
+function g() {
+ f();
+}
+
+assertThrows("g()", TypeError);
+assertThrows("g()", TypeError);
+%OptimizeFunctionOnNextCall(g);
+assertThrows("g()", TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-3032.js b/deps/v8/test/mjsunit/regress/regress-3032.js
new file mode 100755
index 0000000000..ae54543758
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3032.js
@@ -0,0 +1,30 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+for (var i = 0; i < 1000000; i++) { }
+var xl = 4096;
+var z = i % xl;
diff --git a/deps/v8/test/mjsunit/regress/regress-3039.js b/deps/v8/test/mjsunit/regress/regress-3039.js
new file mode 100644
index 0000000000..3c7f62c16e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3039.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function do_div(x, y) {
+ return (x / y) | 0;
+}
+
+// Preparation.
+assertEquals(17, do_div(51, 3));
+assertEquals(13, do_div(65, 5));
+%OptimizeFunctionOnNextCall(do_div);
+assertEquals(11, do_div(77, 7));
+
+// The actual test. We should not trigger a floating point exception.
+assertEquals(-2147483648, do_div(-2147483648, -1));
diff --git a/deps/v8/test/mjsunit/regress/regress-3138.js b/deps/v8/test/mjsunit/regress/regress-3138.js
new file mode 100644
index 0000000000..acb121d2bd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3138.js
@@ -0,0 +1,40 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function f(){
+ assertEquals("function", typeof f);
+})();
+
+(function f(){
+ var f; // Variable shadows function name.
+ assertEquals("undefined", typeof f);
+})();
+
+(function f(){
+ var f;
+ assertEquals("undefined", typeof f);
+ with ({}); // Force context allocation of both variable and function name.
+})();
+
+assertEquals("undefined", typeof f);
+
+// var initialization is intercepted by with scope.
+(function() {
+ var o = { a: 1 };
+ with (o) {
+ var a = 2;
+ }
+ assertEquals("undefined", typeof a);
+ assertEquals(2, o.a);
+})();
+
+// const initialization is not intercepted by with scope.
+(function() {
+ var o = { a: 1 };
+ with (o) {
+ const a = 2;
+ }
+ assertEquals(2, a);
+ assertEquals(1, o.a);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-3158.js b/deps/v8/test/mjsunit/regress/regress-3158.js
new file mode 100644
index 0000000000..c69127395e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3158.js
@@ -0,0 +1,24 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+Array.prototype[0] = 'a';
+delete Array.prototype[0];
+
+function foo(a, i) {
+ return a[i];
+}
+
+var a = new Array(100000);
+a[3] = 'x';
+
+foo(a, 3);
+foo(a, 3);
+foo(a, 3);
+%OptimizeFunctionOnNextCall(foo);
+foo(a, 3);
+Array.prototype[0] = 'a';
+var z = foo(a, 0);
+assertEquals('a', z);
diff --git a/deps/v8/test/mjsunit/regress/regress-3159.js b/deps/v8/test/mjsunit/regress/regress-3159.js
new file mode 100644
index 0000000000..cfc8a39b8d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3159.js
@@ -0,0 +1,10 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+try {
+ new Uint32Array(new ArrayBuffer(1), 2, 3);
+} catch (e) {
+ assertEquals("start offset of Uint32Array should be a multiple of 4",
+ e.message);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-317.js b/deps/v8/test/mjsunit/regress/regress-317.js
index b742fa1f86..85f408b841 100644
--- a/deps/v8/test/mjsunit/regress/regress-317.js
+++ b/deps/v8/test/mjsunit/regress/regress-317.js
@@ -28,4 +28,3 @@
// Ensure replacement with string allows $ in replacement string.
assertEquals("a$ec", "abc".replace("b", "$e"), "$e isn't meaningful");
-
diff --git a/deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js b/deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js
index c8aed9e38c..4a48a61ab3 100644
--- a/deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js
+++ b/deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js
@@ -25,9 +25,20 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --nostress-opt --allow-natives-syntax
+// Flags: --nostress-opt --allow-natives-syntax --mock-arraybuffer-allocator
var maxSize = %MaxSmi() + 1;
-var ab = new ArrayBuffer(maxSize);
+var ab;
+
+// Allocate the largest ArrayBuffer we can on this architecture.
+for (k = 8; k >= 1 && ab == null; k = k/2) {
+ try {
+ ab = new ArrayBuffer(maxSize * k);
+ } catch (e) {
+ ab = null;
+ }
+}
+
+assertTrue(ab != null);
function TestArray(constr) {
assertThrows(function() {
@@ -44,4 +55,3 @@ TestArray(Int32Array);
TestArray(Float32Array);
TestArray(Float64Array);
TestArray(Uint8ClampedArray);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-320532.js b/deps/v8/test/mjsunit/regress/regress-320532.js
new file mode 100644
index 0000000000..6ec4b97293
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-320532.js
@@ -0,0 +1,42 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --noalways-opt
+// Flags: --stress-runs=8 --send-idle-notification --gc-global
+
+
+function bar() { return new Array(); }
+bar();
+bar();
+%OptimizeFunctionOnNextCall(bar);
+a = bar();
+function foo(len) { return new Array(len); }
+foo(0);
+foo(0);
+%OptimizeFunctionOnNextCall(bar);
+foo(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-323845.js b/deps/v8/test/mjsunit/regress/regress-323845.js
new file mode 100644
index 0000000000..4e81657917
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-323845.js
@@ -0,0 +1,47 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test that breaks escape analysis because objects escape over
+// the arguments object.
+
+// Flags: --allow-natives-syntax
+
+function h() {
+ g.arguments;
+}
+
+function g(x) {
+ h();
+}
+
+function f() {
+ g({});
+}
+
+f(); f(); f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-324028.js b/deps/v8/test/mjsunit/regress/regress-324028.js
new file mode 100644
index 0000000000..7fe0fcd81f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-324028.js
@@ -0,0 +1,38 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var badObj = { length : 1e40 };
+
+assertThrows(function() { new Uint8Array(badObj); }, RangeError);
+assertThrows(function() { new Uint8ClampedArray(badObj); }, RangeError);
+assertThrows(function() { new Int8Array(badObj); }, RangeError);
+assertThrows(function() { new Uint16Array(badObj); }, RangeError);
+assertThrows(function() { new Int16Array(badObj); }, RangeError);
+assertThrows(function() { new Uint32Array(badObj); }, RangeError);
+assertThrows(function() { new Int32Array(badObj); }, RangeError);
+assertThrows(function() { new Float32Array(badObj); }, RangeError);
+assertThrows(function() { new Float64Array(badObj); }, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-325676.js b/deps/v8/test/mjsunit/regress/regress-325676.js
new file mode 100644
index 0000000000..427bbc38dc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-325676.js
@@ -0,0 +1,69 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// If a function parameter is forced to be context allocated,
+// debug evaluate need to resolve it to a context slot instead of
+// parameter slot on the stack.
+
+var Debug = debug.Debug;
+
+var expected;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertEquals(expected, exec_state.frame(0).evaluate('arg').value());
+ exec_state.frame(0).evaluate('arg = "evaluated";');
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+function f(arg) {
+ expected = arg;
+ debugger;
+ assertEquals("evaluated", arg);
+
+ arg = "value";
+ expected = arg;
+ debugger;
+ assertEquals("evaluated", arg);
+
+ // Forces arg to be context allocated even though a parameter.
+ function g() { arg; }
+}
+
+f();
+f(1);
+f(1, 2);
+
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-331416.js b/deps/v8/test/mjsunit/regress/regress-331416.js
new file mode 100644
index 0000000000..0c60fced14
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-331416.js
@@ -0,0 +1,52 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function load(a, i) {
+ return a[i];
+}
+load([1, 2, 3], "length");
+load(3);
+load([1, 2, 3], 3);
+load(0, 0);
+%OptimizeFunctionOnNextCall(load);
+assertEquals(2, load([1, 2, 3], 1));
+assertEquals(undefined, load(0, 0));
+
+function store(a, i, x) {
+ a[i] = x;
+}
+store([1, 2, 3], "length", 3);
+store(3);
+store([1, 2, 3], 3, 3);
+store(0, 0, 1);
+%OptimizeFunctionOnNextCall(store);
+var a = [1, 2, 3];
+store(a, 1, 1);
+assertEquals(1, a[1]);
+store(0, 0, 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-336820.js b/deps/v8/test/mjsunit/regress/regress-336820.js
new file mode 100644
index 0000000000..9b46629604
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-336820.js
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+assertThrows((function() {
+ s = "Hello World!\n";
+ while (true) {
+ x = new Array();
+ x[0] = s;
+ x[1000] = s;
+ x[1000000] = s;
+ s = x.join("::");
+ }}), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-340125.js b/deps/v8/test/mjsunit/regress/regress-340125.js
new file mode 100644
index 0000000000..a3e40a8d5a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-340125.js
@@ -0,0 +1,30 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+var a = new Int8Array(2);
+var b = a.subarray(2, 4);
+assertThrows(function () { a.set(b, 1e10); }, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-45469.js b/deps/v8/test/mjsunit/regress/regress-45469.js
index 832a73f063..1abe0f9d77 100644
--- a/deps/v8/test/mjsunit/regress/regress-45469.js
+++ b/deps/v8/test/mjsunit/regress/regress-45469.js
@@ -42,5 +42,3 @@ for (var i = 0; i < 15; i++) {
var res = re.test("xx");
assertEquals(i % 3 != 2, res, "testres" + i);
}
-
-
diff --git a/deps/v8/test/mjsunit/regress/regress-483.js b/deps/v8/test/mjsunit/regress/regress-483.js
index db93f59d0c..7370134d6d 100644
--- a/deps/v8/test/mjsunit/regress/regress-483.js
+++ b/deps/v8/test/mjsunit/regress/regress-483.js
@@ -32,4 +32,3 @@ function X() {
X.prototype.x = {x:1}
new X()
-
diff --git a/deps/v8/test/mjsunit/regress/regress-490.js b/deps/v8/test/mjsunit/regress/regress-490.js
index 8dd8959171..6ce89db2f0 100644
--- a/deps/v8/test/mjsunit/regress/regress-490.js
+++ b/deps/v8/test/mjsunit/regress/regress-490.js
@@ -42,6 +42,9 @@ for (var i = 0; i < 10; i++) {
var b = '';
for (var j = 0; j < 10; j++) {
b += '$1';
+
+ // TODO(machenbach): Do we need all these replacements? Wouldn't corner
+ // cases like smallest and biggest suffice?
a.replace(/^(.*)/, b);
}
a += a;
diff --git a/deps/v8/test/mjsunit/regress/regress-588599.js b/deps/v8/test/mjsunit/regress/regress-588599.js
index a1c16e245a..eece4926e3 100644
--- a/deps/v8/test/mjsunit/regress/regress-588599.js
+++ b/deps/v8/test/mjsunit/regress/regress-588599.js
@@ -28,4 +28,3 @@
assertFalse(Infinity == -Infinity);
assertEquals(Infinity, 1 / 1e-9999);
assertEquals(-Infinity, 1 / -1e-9999);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-619.js b/deps/v8/test/mjsunit/regress/regress-619.js
index 4d3e66b298..c18a8ac964 100644
--- a/deps/v8/test/mjsunit/regress/regress-619.js
+++ b/deps/v8/test/mjsunit/regress/regress-619.js
@@ -58,4 +58,3 @@ for(var i = 0; i < 1024; i++) {
for(var i = 0; i < 1024; i++) {
assertEquals(i, obj[i]);
}
-
diff --git a/deps/v8/test/mjsunit/regress/regress-634-debug.js b/deps/v8/test/mjsunit/regress/regress-634-debug.js
new file mode 100644
index 0000000000..17ca828c68
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-634-debug.js
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ %SetAllocationTimeout(1, 0, false);
+ a = new Array(0);
+ assertEquals(0, a.length);
+ assertEquals(0, a.length);
+ %SetAllocationTimeout(-1, -1, true);
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-670147.js b/deps/v8/test/mjsunit/regress/regress-670147.js
index b5b00d071a..916c13db31 100644
--- a/deps/v8/test/mjsunit/regress/regress-670147.js
+++ b/deps/v8/test/mjsunit/regress/regress-670147.js
@@ -31,4 +31,3 @@ function XXX(x) {
}
assertFalse(XXX('Hello'));
-
diff --git a/deps/v8/test/mjsunit/regress/regress-674753.js b/deps/v8/test/mjsunit/regress/regress-674753.js
index 361b457e47..b3704ea96a 100644
--- a/deps/v8/test/mjsunit/regress/regress-674753.js
+++ b/deps/v8/test/mjsunit/regress/regress-674753.js
@@ -84,4 +84,3 @@ assertFalse(typeof 'x' == 'object'); // bug #674753
assertFalse(typeof 'x' === 'object');
assertFalse(typeof Object == 'object');
assertFalse(typeof Object === 'object');
-
diff --git a/deps/v8/test/mjsunit/regress/regress-675.js b/deps/v8/test/mjsunit/regress/regress-675.js
index 19ca646f61..bef7b27a41 100644
--- a/deps/v8/test/mjsunit/regress/regress-675.js
+++ b/deps/v8/test/mjsunit/regress/regress-675.js
@@ -58,4 +58,3 @@ this.y = 42;
// Check that IC bails out.
assertEquals(42, g());
-
diff --git a/deps/v8/test/mjsunit/regress/regress-678525.js b/deps/v8/test/mjsunit/regress/regress-678525.js
index 11eaf74fc8..49059c6f14 100644
--- a/deps/v8/test/mjsunit/regress/regress-678525.js
+++ b/deps/v8/test/mjsunit/regress/regress-678525.js
@@ -55,5 +55,3 @@ assertEquals('\027', '\27');
assertEquals(73, '\111'.charCodeAt(0));
assertEquals(105, '\151'.charCodeAt(0));
-
-
diff --git a/deps/v8/test/mjsunit/regress/regress-734862.js b/deps/v8/test/mjsunit/regress/regress-734862.js
index 6239047310..40e5b1dcdb 100644
--- a/deps/v8/test/mjsunit/regress/regress-734862.js
+++ b/deps/v8/test/mjsunit/regress/regress-734862.js
@@ -34,4 +34,3 @@ assertTrue(catcher(null, 'foo') instanceof TypeError);
assertTrue(catcher(void 0, 'foo') instanceof TypeError);
assertTrue(catcher(null, 123) instanceof TypeError);
assertTrue(catcher(void 0, 123) instanceof TypeError);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-74.js b/deps/v8/test/mjsunit/regress/regress-74.js
index f22b33c155..bdc3b5cf79 100644
--- a/deps/v8/test/mjsunit/regress/regress-74.js
+++ b/deps/v8/test/mjsunit/regress/regress-74.js
@@ -38,4 +38,3 @@ function test() {
}
test();
-
diff --git a/deps/v8/test/mjsunit/regress/regress-753.js b/deps/v8/test/mjsunit/regress/regress-753.js
index 4621de6ba4..0253962f50 100644
--- a/deps/v8/test/mjsunit/regress/regress-753.js
+++ b/deps/v8/test/mjsunit/regress/regress-753.js
@@ -33,4 +33,3 @@
var obj = {a1: {b1: [1,2,3,4], b2: {c1: 1, c2: 2}},a2: 'a2'};
assertEquals(JSON.stringify(obj, null, 5.99999), JSON.stringify(obj, null, 5));
-
diff --git a/deps/v8/test/mjsunit/regress/regress-806473.js b/deps/v8/test/mjsunit/regress/regress-806473.js
index 6d6485d6e9..91c9a9f60b 100644
--- a/deps/v8/test/mjsunit/regress/regress-806473.js
+++ b/deps/v8/test/mjsunit/regress/regress-806473.js
@@ -56,5 +56,3 @@ for (var j = 0; j < 10; j++) {
assertEquals(10, i);
assertEquals(10, j);
-
-
diff --git a/deps/v8/test/mjsunit/regress/regress-85177.js b/deps/v8/test/mjsunit/regress/regress-85177.js
index 275bbe7a9b..aa938f5e24 100644
--- a/deps/v8/test/mjsunit/regress/regress-85177.js
+++ b/deps/v8/test/mjsunit/regress/regress-85177.js
@@ -62,4 +62,3 @@ function f(){
f();
%OptimizeFunctionOnNextCall(f);
f();
-
diff --git a/deps/v8/test/mjsunit/regress/regress-892742.js b/deps/v8/test/mjsunit/regress/regress-892742.js
index 78a57b2ad5..fd79fc0fac 100644
--- a/deps/v8/test/mjsunit/regress/regress-892742.js
+++ b/deps/v8/test/mjsunit/regress/regress-892742.js
@@ -45,4 +45,3 @@ function h() {
assertEquals(1, f());
assertEquals(undefined, g());
assertEquals(undefined, h());
-
diff --git a/deps/v8/test/mjsunit/regress/regress-925537.js b/deps/v8/test/mjsunit/regress/regress-925537.js
index d50c5689a5..6d75504693 100644
--- a/deps/v8/test/mjsunit/regress/regress-925537.js
+++ b/deps/v8/test/mjsunit/regress/regress-925537.js
@@ -39,4 +39,3 @@ assertClose( Math.PI / 4, Math.atan2(pinf, pinf));
assertClose(-Math.PI / 4, Math.atan2(ninf, pinf));
assertClose( 3 * Math.PI / 4, Math.atan2(pinf, ninf));
assertClose(-3 * Math.PI / 4, Math.atan2(ninf, ninf));
-
diff --git a/deps/v8/test/mjsunit/regress/regress-94873.js b/deps/v8/test/mjsunit/regress/regress-94873.js
index 41ca9921c6..b61bc0c15d 100644
--- a/deps/v8/test/mjsunit/regress/regress-94873.js
+++ b/deps/v8/test/mjsunit/regress/regress-94873.js
@@ -75,4 +75,4 @@ function a(x, y) {
}
Debug.setBreakPoint(a, 0, 0);
-new a(1, 2); \ No newline at end of file
+new a(1, 2);
diff --git a/deps/v8/test/mjsunit/regress/regress-990205.js b/deps/v8/test/mjsunit/regress/regress-990205.js
index b3024c23fd..7eb66f0616 100644
--- a/deps/v8/test/mjsunit/regress/regress-990205.js
+++ b/deps/v8/test/mjsunit/regress/regress-990205.js
@@ -36,4 +36,3 @@ function f() {
};
assertThrows("f()");
-
diff --git a/deps/v8/test/mjsunit/regress/regress-995.js b/deps/v8/test/mjsunit/regress/regress-995.js
index e88121a20c..6f3dac1330 100644
--- a/deps/v8/test/mjsunit/regress/regress-995.js
+++ b/deps/v8/test/mjsunit/regress/regress-995.js
@@ -54,4 +54,3 @@ function h(value) {
}
}
h(undefined);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-998565.js b/deps/v8/test/mjsunit/regress/regress-998565.js
index 260b791186..840c045c0c 100644
--- a/deps/v8/test/mjsunit/regress/regress-998565.js
+++ b/deps/v8/test/mjsunit/regress/regress-998565.js
@@ -48,4 +48,4 @@ Debug.setBreakPoint(f, 0, 0);
f();
// Make sure that the debug event listener vas invoked.
-assertTrue(listenerCalled); \ No newline at end of file
+assertTrue(listenerCalled);
diff --git a/deps/v8/test/mjsunit/regress/regress-add-minus-zero.js b/deps/v8/test/mjsunit/regress/regress-add-minus-zero.js
index 0b4af75424..cb77cdfb61 100644
--- a/deps/v8/test/mjsunit/regress/regress-add-minus-zero.js
+++ b/deps/v8/test/mjsunit/regress/regress-add-minus-zero.js
@@ -35,4 +35,3 @@ assertEquals("Infinity", String(1/f()));
assertEquals("Infinity", String(1/f()));
%OptimizeFunctionOnNextCall(f);
assertEquals("Infinity", String(1/f()));
-
diff --git a/deps/v8/test/mjsunit/regress/regress-array-pop-deopt.js b/deps/v8/test/mjsunit/regress/regress-array-pop-deopt.js
new file mode 100644
index 0000000000..9a0d35d3aa
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-array-pop-deopt.js
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var o = [6,7,8,9];
+
+function f(b) {
+ var v = o.pop() + b;
+ return v;
+}
+
+assertEquals(10, f(1));
+assertEquals(9, f(1));
+assertEquals(8, f(1));
+%OptimizeFunctionOnNextCall(f);
+assertEquals("61", f("1"));
diff --git a/deps/v8/test/mjsunit/regress/regress-array-pop-nonconfigurable.js b/deps/v8/test/mjsunit/regress/regress-array-pop-nonconfigurable.js
index 129e1980a4..823bb6b1a8 100644
--- a/deps/v8/test/mjsunit/regress/regress-array-pop-nonconfigurable.js
+++ b/deps/v8/test/mjsunit/regress/regress-array-pop-nonconfigurable.js
@@ -28,4 +28,3 @@
var a = [];
Object.defineProperty(a, 0, {});
assertThrows(function() { a.pop(); });
-
diff --git a/deps/v8/test/mjsunit/regress/regress-binop-nosse2.js b/deps/v8/test/mjsunit/regress/regress-binop-nosse2.js
index c6cbaf7ebf..29c8a048fc 100644
--- a/deps/v8/test/mjsunit/regress/regress-binop-nosse2.js
+++ b/deps/v8/test/mjsunit/regress/regress-binop-nosse2.js
@@ -165,4 +165,3 @@ assertEquals(t2(1.3,null), 1.3/0);
assertEquals(t2(undefined,2), NaN/2);
assertEquals(t2(1,1<<30), 1/(1<<30));
assertEquals(t2(1,2), 1/2);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-builtin-array-op.js b/deps/v8/test/mjsunit/regress/regress-builtin-array-op.js
index 1e37af3648..ea5891f7df 100644
--- a/deps/v8/test/mjsunit/regress/regress-builtin-array-op.js
+++ b/deps/v8/test/mjsunit/regress/regress-builtin-array-op.js
@@ -35,4 +35,3 @@ var a = [2,3,1];
a[100000] = 0;
a.join();
assertEquals("hest", foo);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js b/deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js
new file mode 100644
index 0000000000..3791c35f71
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js
@@ -0,0 +1,21 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ var o = {x:1};
+ var y = {y:2.5, x:0};
+ var result;
+ for (var i = 0; i < 2; i++) {
+ result = o.x + 3;
+ o = y;
+ }
+ return result;
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+assertEquals(3, f());
diff --git a/deps/v8/test/mjsunit/regress/regress-clobbered-fp-regs.js b/deps/v8/test/mjsunit/regress/regress-clobbered-fp-regs.js
new file mode 100644
index 0000000000..7795ae9a30
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-clobbered-fp-regs.js
@@ -0,0 +1,54 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --allow-natives-syntax
+
+function store(a, x, y) {
+ var f1 = 0.1 * y;
+ var f2 = 0.2 * y;
+ var f3 = 0.3 * y;
+ var f4 = 0.4 * y;
+ var f5 = 0.5 * y;
+ var f6 = 0.6 * y;
+ var f7 = 0.7 * y;
+ var f8 = 0.8 * y;
+ a[0] = x;
+ var sum = (f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8);
+ assertEquals(1, y);
+ var expected = 3.6;
+ if (Math.abs(expected - sum) > 0.01) {
+ assertEquals(expected, sum);
+ }
+}
+
+// Generate TransitionElementsKindStub.
+store([1], 1, 1);
+store([1], 1.1, 1);
+store([1], 1.1, 1);
+%OptimizeFunctionOnNextCall(store);
+// This will trap on allocation site in TransitionElementsKindStub.
+store([1], 1, 1)
diff --git a/deps/v8/test/mjsunit/regress/regress-context-osr.js b/deps/v8/test/mjsunit/regress/regress-context-osr.js
new file mode 100644
index 0000000000..b74907de6a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-context-osr.js
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+function f() {
+ try { } catch (e) { }
+}
+
+for (this.x = 0; this.x < 1; ++this.x) {
+ for (this.y = 0; this.y < 1; ++this.y) {
+ for (this.ll = 0; this.ll < 70670; ++this.ll) {
+ f();
+ }
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-convert-hole2.js b/deps/v8/test/mjsunit/regress/regress-convert-hole2.js
index b434ed3996..02ef4dd1dd 100644
--- a/deps/v8/test/mjsunit/regress/regress-convert-hole2.js
+++ b/deps/v8/test/mjsunit/regress/regress-convert-hole2.js
@@ -24,7 +24,7 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --notrack-allocation-sites
+// Flags: --allow-natives-syntax
// Test adding undefined from hole in double-holey to string.
var a = [1.5, , 1.8];
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-158185.js b/deps/v8/test/mjsunit/regress/regress-crbug-158185.js
index 99f19c72fd..5cb5900c8a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-158185.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-158185.js
@@ -36,4 +36,3 @@ assertEquals("12A",
assertEquals(1, JSON.parse('{"0":1}')[0]);
assertEquals(undefined, JSON.parse('{"00":1}')[0]);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-178790.js b/deps/v8/test/mjsunit/regress/regress-crbug-178790.js
index 57071eaa08..25cc96b852 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-178790.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-178790.js
@@ -49,4 +49,3 @@ for (var i = 0; i < 1000; i++) {
r3 = "(" + r3 + ")a";
}
"test".match(RegExp(r3));
-
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-18639.js b/deps/v8/test/mjsunit/regress/regress-crbug-18639.js
index 4f4bb7c796..6c8e6ad2c6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-18639.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-18639.js
@@ -35,4 +35,4 @@ try {
((0).toLocaleString)();
} catch (e) {
assertInstanceof(e, TypeError);
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-222893.js b/deps/v8/test/mjsunit/regress/regress-crbug-222893.js
index d5baa7b257..39363bc912 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-222893.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-222893.js
@@ -61,4 +61,3 @@ function g(a, b) {
g.apply(this, array);
g("a", "b", "c");
assertNull(error);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-233737.js b/deps/v8/test/mjsunit/regress/regress-crbug-233737.js
index 835726b224..38b44e056f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-233737.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-233737.js
@@ -39,4 +39,4 @@ function hole(i) {
assertEquals(1, hole(0));
assertEquals(1, hole(0));
%OptimizeFunctionOnNextCall(hole);
-assertEquals(0, hole(1)); \ No newline at end of file
+assertEquals(0, hole(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-244461.js b/deps/v8/test/mjsunit/regress/regress-crbug-244461.js
index 9c7c2b6c43..7b465482e0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-244461.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-244461.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays
-// Flags: --track-allocation-sites
function foo(arg) {
var a = arg();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-245480.js b/deps/v8/test/mjsunit/regress/regress-crbug-245480.js
index 4769486403..ec8850905b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-245480.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-245480.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
-// Flags: --track-allocation-sites --noalways-opt
+// Flags: --noalways-opt
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile
@@ -87,4 +87,4 @@ function g(a) {
var b = [0];
g(b);
g(b);
-assertEquals(undefined, g(a)); \ No newline at end of file
+assertEquals(undefined, g(a));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-259300.js b/deps/v8/test/mjsunit/regress/regress-crbug-259300.js
index c57b0e6f91..eac494a0e6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-259300.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-259300.js
@@ -46,4 +46,3 @@ Debug.setListener(listener);
eval("debugger");
Debug.setListener(null);
assertTrue(listened);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-306220.js b/deps/v8/test/mjsunit/regress/regress-crbug-306220.js
new file mode 100644
index 0000000000..a481ee9dba
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-306220.js
@@ -0,0 +1,38 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var CustomError = function(x) { this.x = x; };
+CustomError.prototype = new Error();
+CustomError.prototype.x = "prototype";
+
+Object.defineProperties(CustomError.prototype, {
+ 'message': {
+ 'get': function() { return this.x; }
+ }
+});
+
+assertEquals("Error: instance", String(new CustomError("instance")));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-315252.js b/deps/v8/test/mjsunit/regress/regress-crbug-315252.js
new file mode 100644
index 0000000000..51454bf285
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-315252.js
@@ -0,0 +1,61 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f(a, b, c) {
+ this.a = a;
+ this.b = b;
+ this.c = c;
+}
+var o3 = new f(1, 2, 3.5);
+var o4 = new f(1, 2.5, 3);
+var o1 = new f(1.5, 2, 3);
+var o2 = new f(1.5, 2, 3);
+function migrate(o) {
+ return o.a;
+}
+// Use migrate to stabilize o1, o2 and o4 in [double, double, smi].
+migrate(o4);
+migrate(o1);
+migrate(o2);
+function store_transition(o) {
+ o.d = 1;
+}
+// Optimize "store_transition" to transition from [double, double, smi] to
+// [double, double, smi, smi]. This adds a dependency on the
+// [double, double, smi] map.
+store_transition(o4);
+store_transition(o1);
+store_transition(o2);
+%OptimizeFunctionOnNextCall(store_transition);
+// Pass in a deprecated object of format [smi, smi, double]. This will migrate
+// the instance, forcing a merge with [double, double, smi], ending up with
+// [double, double, double], which deprecates [double, double, smi] and
+// deoptimizes all dependencies of [double, double, smi], including
+// store_transition itself.
+store_transition(o3);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-3184.js b/deps/v8/test/mjsunit/regress/regress-crbug-3184.js
index ed78183f78..054668ea55 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-3184.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-3184.js
@@ -80,4 +80,3 @@ Object.extend( Array.prototype,
});
assertEquals("Test1 test1", "test1 test1".cap());
assertEquals("Test2 Test2", "test2 test2".cap(true));
-
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-320922.js b/deps/v8/test/mjsunit/regress/regress-crbug-320922.js
new file mode 100644
index 0000000000..4a5b5813e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-320922.js
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var string = "hello world";
+var expected = "Hello " + "world";
+function Capitalize() {
+ %_OneByteSeqStringSetChar(string, 0, 0x48);
+}
+Capitalize();
+assertEquals(expected, string);
+Capitalize();
+assertEquals(expected, string);
+
+var twobyte = "\u20ACello world";
+
+function TwoByteCapitalize() {
+ %_TwoByteSeqStringSetChar(twobyte, 0, 0x48);
+}
+TwoByteCapitalize();
+assertEquals(expected, twobyte);
+TwoByteCapitalize();
+assertEquals(expected, twobyte);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-323942.js b/deps/v8/test/mjsunit/regress/regress-crbug-323942.js
new file mode 100644
index 0000000000..15af494b0f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-323942.js
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+"use strict";
+
+// Function is defined on the prototype chain.
+var holder = { f: function() { return 42; } };
+var receiver = { };
+receiver.__proto__ = { };
+receiver.__proto__.__proto__ = holder;
+
+// Inline two levels.
+function h(o) { return o.f.apply(this, arguments); }
+function g(o) { return h(o); }
+
+// Collect type information for apply call.
+assertEquals(42, g(receiver));
+assertEquals(42, g(receiver));
+
+// Sneakily remove the function from the prototype chain.
+// The receiver map does not change.
+receiver.__proto__.__proto__ = {};
+
+// Lookup of o.f during graph creation fails.
+%OptimizeFunctionOnNextCall(g);
+
+assertThrows(function() { g(receiver); });
+
+// Put function back.
+receiver.__proto__.__proto__ = holder;
+assertEquals(42, g(receiver));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-325225.js b/deps/v8/test/mjsunit/regress/regress-crbug-325225.js
new file mode 100644
index 0000000000..798d54839d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-325225.js
@@ -0,0 +1,46 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function f1(a) {
+ a[0](0);
+}
+
+function do1() {
+ f1([f1]);
+}
+
+assertThrows(do1, TypeError);
+
+function f2(a) {
+ a[0](true);
+}
+
+function do2() {
+ f2([function(a) { return f2("undefined", typeof f2(42, 0)); }]);
+}
+
+assertThrows(do2, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-329709.js b/deps/v8/test/mjsunit/regress/regress-crbug-329709.js
new file mode 100644
index 0000000000..c5316f391c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-329709.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function boom(x) {
+ switch(x) {
+ case 1: return "one";
+ case 1500000000: return "non-smi int32";
+ default: return "default";
+ }
+}
+
+assertEquals("one", boom(1));
+assertEquals("one", boom(1));
+%OptimizeFunctionOnNextCall(boom)
+assertEquals("non-smi int32", boom(1500000000));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-336148.js b/deps/v8/test/mjsunit/regress/regress-crbug-336148.js
new file mode 100644
index 0000000000..8157c9fcc1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-336148.js
@@ -0,0 +1,56 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f(o) {
+ var a = 1;
+ if (true) return o.v && a;
+}
+
+f({});
+f({});
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1, f({ v: 1 }));
+
+
+function f1() { return 1 && 2; };
+function f2() { return 1 || 2; };
+function f3() { return 0 && 2; };
+function f4() { return 0 || 2; };
+
+function test() {
+ assertEquals(2, f1());
+ assertEquals(1, f2());
+ assertEquals(0, f3());
+ assertEquals(2, f4());
+}
+
+test();
+test();
+[f1, f2, f3, f4].forEach(function(f) { %OptimizeFunctionOnNextCall(f); });
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-340064.js b/deps/v8/test/mjsunit/regress/regress-crbug-340064.js
new file mode 100644
index 0000000000..f2ab1d6675
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-340064.js
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f(v) {
+ return v.length;
+}
+
+assertEquals(4, f("test"));
+assertEquals(4, f("test"));
+assertEquals(undefined, f(true));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(undefined, f(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-frame-details-null-receiver.js b/deps/v8/test/mjsunit/regress/regress-frame-details-null-receiver.js
index d15ed4d00a..ffe5fbb84c 100644
--- a/deps/v8/test/mjsunit/regress/regress-frame-details-null-receiver.js
+++ b/deps/v8/test/mjsunit/regress/regress-frame-details-null-receiver.js
@@ -49,4 +49,3 @@ Debug.clearBreakOnException();
Debug.setListener(null);
assertTrue(listened);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-is-contextual.js b/deps/v8/test/mjsunit/regress/regress-is-contextual.js
new file mode 100644
index 0000000000..844f4a2264
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-is-contextual.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CallIC accumulates feedback that string index is out of bounds, then
+// misses
+function foo(index) {
+ return text.charAt(index);
+}
+
+var text = "hi there";
+foo(0);
+foo(0);
+foo(100); // Accumulate feedback that index is out of bounds.
+text = false;
+
+// This line ASSERTS in debug without fix.
+assertThrows(function () { foo(); }, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js b/deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js
index 4b355ae1ac..905ab40694 100644
--- a/deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js
+++ b/deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js
@@ -26,9 +26,9 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var a = [];
-var new_space_string = "";
-for (var i = 0; i < 128; i++) {
- new_space_string += String.fromCharCode((Math.random() * 26 + 65) | 0);
+var new_space_string = "a";
+for (var i = 0; i < 8; i++) {
+ new_space_string += new_space_string;
}
for (var i = 0; i < 10000; i++) a.push(new_space_string);
@@ -40,12 +40,12 @@ json2 = JSON.stringify(a);
assertTrue(json1 == json2, "GC caused JSON.stringify to fail.");
// Check that the slow path of JSON.stringify works correctly wrt GC.
-for (var i = 0; i < 100000; i++) {
+for (var i = 0; i < 10000; i++) {
var s = i.toString();
assertEquals('"' + s + '"', JSON.stringify(s, null, 0));
}
-for (var i = 0; i < 100000; i++) {
+for (var i = 0; i < 10000; i++) {
var s = i.toString() + "\u2603";
assertEquals('"' + s + '"', JSON.stringify(s, null, 0));
}
diff --git a/deps/v8/test/mjsunit/regress/regress-keyed-access-string-length.js b/deps/v8/test/mjsunit/regress/regress-keyed-access-string-length.js
new file mode 100644
index 0000000000..f2ead686c6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-keyed-access-string-length.js
@@ -0,0 +1,37 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f(i) {
+ return "abc"[i];
+}
+
+f("length");
+f("length");
+%OptimizeFunctionOnNextCall(f);
+f("length");
diff --git a/deps/v8/test/mjsunit/regress/regress-lookup-transition.js b/deps/v8/test/mjsunit/regress/regress-lookup-transition.js
new file mode 100644
index 0000000000..9b32939306
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-lookup-transition.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --expose-gc
+
+var proxy = Proxy.create({ getPropertyDescriptor:function(key) {
+ gc();
+}});
+
+function f() { this.x = 23; }
+f.prototype = proxy;
+new f();
+new f();
diff --git a/deps/v8/test/mjsunit/regress/regress-map-invalidation-1.js b/deps/v8/test/mjsunit/regress/regress-map-invalidation-1.js
index bcc6bbb615..430972b75a 100644
--- a/deps/v8/test/mjsunit/regress/regress-map-invalidation-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-map-invalidation-1.js
@@ -30,7 +30,7 @@
var c = { x: 2, y: 1 };
function h() {
- %MigrateInstance(c);
+ %TryMigrateInstance(c);
return 2;
}
%NeverOptimizeFunction(h);
@@ -45,4 +45,3 @@ function f() {
}
f();
-
diff --git a/deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js b/deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js
index 1f896a495f..7674e425cb 100644
--- a/deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js
@@ -47,7 +47,9 @@ function g() {
var fun = g();
fun(false, c);
fun(false, c);
+%OptimizeFunctionOnNextCall(fun);
fun(false, c);
+%TryMigrateInstance(c);
%OptimizeFunctionOnNextCall(fun);
fun(false, c);
fun(true, c);
diff --git a/deps/v8/test/mjsunit/regress/regress-param-local-type.js b/deps/v8/test/mjsunit/regress/regress-param-local-type.js
new file mode 100644
index 0000000000..bf26090089
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-param-local-type.js
@@ -0,0 +1,58 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test that we do not confuse the first local and the first parameter
+// when gathering type information.
+
+function f(a) { // First parameter is tagged.
+ var s = ''; // First local has string type.
+ var n = 0;
+ var i = 1;
+ n = i + a;
+}
+
+f(1);
+f(1);
+%OptimizeFunctionOnNextCall(f);
+f(1);
+assertOptimized(f);
+
+
+function g() { // 0th parameter (receiver) is tagged.
+ var s = ''; // First local has string type.
+ var n = 0;
+ var i = 1;
+ n = i + this;
+}
+
+g.call(1);
+g.call(1);
+%OptimizeFunctionOnNextCall(g);
+g.call(1);
+assertOptimized(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
index be54be6740..c85cf56e0c 100644
--- a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
+++ b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
@@ -57,4 +57,3 @@ StringFromCharCode(0x7C, 0x7C);
%OptimizeFunctionOnNextCall(StringFromCharCode);
StringFromCharCode(0x7C, 0x7C);
StringFromCharCode(0xFFF, 0xFFF);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js
index 6acc2f285e..6a5ba9195c 100644
--- a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js
+++ b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex2.js
@@ -32,4 +32,3 @@ String.fromCharCode(0x7C, 0x7C);
%OptimizeFunctionOnNextCall(String.fromCharCode);
String.fromCharCode(0x7C, 0x7C);
String.fromCharCode(0xFFF, 0xFFF);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
index e2592a1615..43d2b08352 100644
--- a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
+++ b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
@@ -42,4 +42,3 @@ test();
test();
%OptimizeFunctionOnNextCall(test);
test();
-
diff --git a/deps/v8/test/mjsunit/regress/regress-transcendental.js b/deps/v8/test/mjsunit/regress/regress-transcendental.js
index b5dbcb48af..4065f50bbe 100644
--- a/deps/v8/test/mjsunit/regress/regress-transcendental.js
+++ b/deps/v8/test/mjsunit/regress/regress-transcendental.js
@@ -46,4 +46,3 @@ function test(f, x, name) {
test(Math.tan, -1.57079632679489660000, "Math.tan");
test(Math.sin, 6.283185307179586, "Math.sin");
-
diff --git a/deps/v8/test/mjsunit/regress/setvalueof-deopt.js b/deps/v8/test/mjsunit/regress/setvalueof-deopt.js
new file mode 100644
index 0000000000..8c42c8a20b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/setvalueof-deopt.js
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function g(x, y) {
+ return y;
+}
+
+function f(deopt) {
+ return g(%_SetValueOf(1, 1), deopt + 0);
+}
+
+f(0);
+f(0);
+f(0);
+%OptimizeFunctionOnNextCall(f);
+assertEquals("result0", f("result"));
diff --git a/deps/v8/test/mjsunit/regress/string-set-char-deopt.js b/deps/v8/test/mjsunit/regress/string-set-char-deopt.js
new file mode 100644
index 0000000000..9f6d434538
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/string-set-char-deopt.js
@@ -0,0 +1,85 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+(function OneByteSeqStringSetCharDeoptOsr() {
+ function deopt() {
+ %DeoptimizeFunction(f);
+ }
+
+ function f(string, osr) {
+ var world = " world";
+ %_OneByteSeqStringSetChar(string, 0, (deopt(), 0x48));
+
+ if (osr) while (%GetOptimizationStatus(f) == 2) {}
+
+ return string + world;
+ }
+
+ assertEquals("Hello " + "world", f("hello", false));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("Hello " + "world", f("hello", true));
+})();
+
+
+(function OneByteSeqStringSetCharDeopt() {
+ function deopt() {
+ %DeoptimizeFunction(f);
+ }
+
+ function g(x) {
+ }
+
+ function f(string) {
+ g(%_OneByteSeqStringSetChar(string, 0, (deopt(), 0x48)));
+ return string;
+ }
+
+ assertEquals("Hell" + "o", f("hello"));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("Hell" + "o", f("hello"));
+})();
+
+
+(function TwoByteSeqStringSetCharDeopt() {
+ function deopt() {
+ %DeoptimizeFunction(f);
+ }
+
+ function g(x) {
+ }
+
+ function f(string) {
+ g(%_TwoByteSeqStringSetChar(string, 0, (deopt(), 0x48)));
+ return string;
+ }
+
+ assertEquals("Hell" + "o", f("\u20ACello"));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("Hell" + "o", f("\u20ACello"));
+})();
diff --git a/deps/v8/test/mjsunit/set-prototype-of.js b/deps/v8/test/mjsunit/set-prototype-of.js
new file mode 100644
index 0000000000..02bd5e2ee6
--- /dev/null
+++ b/deps/v8/test/mjsunit/set-prototype-of.js
@@ -0,0 +1,170 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-symbols
+
+
+function getObjects() {
+ function func() {}
+ return [
+ func,
+ new func(),
+ {x: 5},
+ /regexp/,
+ ['array'],
+ // new Error(),
+ new Date(),
+ new Number(1),
+ new Boolean(true),
+ new String('str'),
+ Object(Symbol())
+ ];
+}
+
+
+var coercibleValues = [
+ 1,
+ true,
+ 'string',
+ Symbol()
+];
+
+
+var nonCoercibleValues = [
+ undefined,
+ null
+];
+
+
+var valuesWithoutNull = coercibleValues.concat(undefined);
+
+
+function TestSetPrototypeOfCoercibleValues() {
+ for (var i = 0; i < coercibleValues.length; i++) {
+ var value = coercibleValues[i];
+ assertThrows(function() {
+ Object.getPrototypeOf(value);
+ }, TypeError);
+
+ assertEquals(Object.setPrototypeOf(value, {}), value);
+
+ assertThrows(function() {
+ Object.getPrototypeOf(value);
+ }, TypeError);
+ }
+}
+TestSetPrototypeOfCoercibleValues();
+
+
+function TestSetPrototypeOfNonCoercibleValues() {
+ for (var i = 0; i < nonCoercibleValues.length; i++) {
+ var value = nonCoercibleValues[i];
+ assertThrows(function() {
+ Object.setPrototypeOf(value, {});
+ }, TypeError);
+ }
+}
+TestSetPrototypeOfNonCoercibleValues();
+
+
+function TestSetPrototypeToNonObject(proto) {
+ var objects = getObjects();
+ for (var i = 0; i < objects.length; i++) {
+ var object = objects[i];
+ for (var j = 0; j < valuesWithoutNull.length; j++) {
+ var proto = valuesWithoutNull[j];
+ assertThrows(function() {
+ Object.setPrototypeOf(object, proto);
+ }, TypeError);
+ }
+ }
+}
+TestSetPrototypeToNonObject();
+
+
+function TestSetPrototypeOf(object, proto) {
+ assertEquals(Object.setPrototypeOf(object, proto), object);
+ assertEquals(Object.getPrototypeOf(object), proto);
+}
+
+
+function TestSetPrototypeOfForObjects() {
+ var objects1 = getObjects();
+ var objects2 = getObjects();
+ for (var i = 0; i < objects1.length; i++) {
+ for (var j = 0; j < objects2.length; j++) {
+ TestSetPrototypeOf(objects1[i], objects2[j]);
+ }
+ }
+}
+TestSetPrototypeOfForObjects();
+
+
+function TestSetPrototypeToNull() {
+ var objects = getObjects();
+ for (var i = 0; i < objects.length; i++) {
+ TestSetPrototypeOf(objects[i], null);
+ }
+}
+TestSetPrototypeToNull();
+
+
+function TestSetPrototypeOfNonExtensibleObject() {
+ var objects = getObjects();
+ var proto = {};
+ for (var i = 0; i < objects.length; i++) {
+ var object = objects[i];
+ Object.preventExtensions(object);
+ assertThrows(function() {
+ Object.setPrototypeOf(object, proto);
+ }, TypeError);
+ }
+}
+TestSetPrototypeOfNonExtensibleObject();
+
+
+function TestLookup() {
+ var object = {};
+ assertFalse('x' in object);
+ assertFalse('y' in object);
+
+ var oldProto = {
+ x: 'old x',
+ y: 'old y'
+ };
+ Object.setPrototypeOf(object, oldProto);
+ assertEquals(object.x, 'old x');
+ assertEquals(object.y, 'old y');
+
+ var newProto = {
+ x: 'new x'
+ };
+ Object.setPrototypeOf(object, newProto);
+ assertEquals(object.x, 'new x');
+ assertFalse('y' in object);
+}
+TestLookup();
diff --git a/deps/v8/test/mjsunit/setters-on-elements.js b/deps/v8/test/mjsunit/setters-on-elements.js
new file mode 100644
index 0000000000..dd3fabf309
--- /dev/null
+++ b/deps/v8/test/mjsunit/setters-on-elements.js
@@ -0,0 +1,199 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --max-opt-count=100 --noalways-opt
+
+// We specify max-opt-count because we opt/deopt the same function many
+// times.
+
+// It's nice to run this in other browsers too.
+var standalone = false;
+if (standalone) {
+ assertTrue = function(val) {
+ if (val != true) {
+ print("FAILURE");
+ }
+ }
+
+ assertFalse = function(val) {
+ if (val != false) {
+ print("FAILURE");
+ }
+ }
+
+ assertEquals = function(expected, val) {
+ if (expected !== val) {
+ print("FAILURE");
+ }
+ }
+
+ empty_func = function(name) { }
+ assertUnoptimized = empty_func;
+ assertOptimized = empty_func;
+
+ optimize = empty_func;
+ clearFunctionTypeFeedback = empty_func;
+ deoptimizeFunction = empty_func;
+} else {
+ optimize = function(name) {
+ %OptimizeFunctionOnNextCall(name);
+ }
+ clearFunctionTypeFeedback = function(name) {
+ %ClearFunctionTypeFeedback(name);
+ }
+ deoptimizeFunction = function(name) {
+ %DeoptimizeFunction(name);
+ }
+}
+
+function base_setter_test(create_func, index, store_value) {
+ var calls = 0;
+
+ // Testcase: setter in prototype chain
+ foo = function(a) { a[index] = store_value; }
+ var a = create_func();
+ var ap = [];
+ ap.__defineSetter__(index, function() { calls++; });
+
+ foo(a);
+ foo(a);
+ foo(a);
+ delete a[index];
+
+ assertEquals(0, calls);
+ a.__proto__ = ap;
+ foo(a);
+ assertEquals(1, calls);
+ optimize(foo);
+ foo(a);
+ assertEquals(2, calls);
+ assertOptimized(foo);
+
+ // Testcase: setter added on prototype chain object already in place.
+ clearFunctionTypeFeedback(foo);
+ deoptimizeFunction(foo);
+ clearFunctionTypeFeedback(foo);
+ calls = 0;
+ a = create_func();
+ var apap = [];
+ a.__proto__ = apap;
+ foo(a);
+ foo(a);
+ foo(a);
+ delete a[index];
+ apap.__defineSetter__(index, function() { calls++; });
+ foo(a);
+ foo(a);
+ foo(a);
+ assertEquals(3, calls);
+
+ // Testcase: setter "deep" in prototype chain.
+ clearFunctionTypeFeedback(foo);
+ deoptimizeFunction(foo);
+ clearFunctionTypeFeedback(foo);
+ calls = 0;
+
+ a = create_func();
+ var ap2 = [];
+ a.__proto__ = ap2;
+ foo(a);
+ foo(a);
+ foo(a);
+ delete a[index];
+
+ assertEquals(0, calls);
+
+ ap2.__proto__ = ap; // "sneak" in a callback.
+ // The sneak case should be caught by unoptimized code too.
+ assertUnoptimized(foo);
+ foo(a);
+ foo(a);
+ foo(a);
+ assertEquals(3, calls);
+
+ // Testcase: setter added after optimization (feedback is monomorphic)
+ clearFunctionTypeFeedback(foo);
+ deoptimizeFunction(foo);
+ clearFunctionTypeFeedback(foo);
+ calls = 0;
+
+ a = create_func();
+ ap2 = [];
+ a.__proto__ = ap2;
+ foo(a);
+ foo(a);
+ foo(a);
+ optimize(foo);
+ foo(a);
+ assertOptimized(foo);
+ delete a[index];
+ ap2.__proto__ = ap;
+ foo(a);
+ assertUnoptimized(foo); // map shape change should deopt foo.
+ assertEquals(1, calls);
+
+ // Testcase: adding additional setters to a prototype chain that already has
+ // one shouldn't deopt anything. (ie, we aren't changing the map shape).
+ clearFunctionTypeFeedback(foo);
+ calls = 0;
+
+ a = create_func();
+ a.__proto__ = ap2;
+ bar = function(a) { a[index+1] = store_value; }
+ bar(a);
+ bar(a);
+ bar(a); // store should be generic
+ optimize(bar);
+ bar(a);
+ assertOptimized(bar);
+ assertEquals(0, calls);
+ delete a[index+1];
+ ap2.__defineSetter__(index+1, function() { calls++; });
+ bar(a);
+ assertOptimized(bar);
+ assertEquals(1, calls);
+}
+
+// Verify that map transitions don't confuse us.
+create_func_smi = function() { return [,,,,,,5]; }
+create_func_double = function() { return [0,,3.2,,,,5.5]; }
+create_func_fast = function() { return [,,,,,,true]; }
+create_func_dictionary = function() { var a = []; a.length = 100000; return a; }
+
+var cf = [create_func_smi,
+ create_func_double,
+ create_func_fast,
+ create_func_dictionary];
+
+var values = [3, 3.5, true];
+
+for(var c = 0; c < 3; c++) {
+ for(var s = 0; s < 3; s++) {
+ base_setter_test(cf[c], 0, values[s]);
+ base_setter_test(cf[c], 1, values[s]);
+ }
+}
diff --git a/deps/v8/test/mjsunit/shift-for-integer-div.js b/deps/v8/test/mjsunit/shift-for-integer-div.js
index aaa67e97fe..884202d313 100644
--- a/deps/v8/test/mjsunit/shift-for-integer-div.js
+++ b/deps/v8/test/mjsunit/shift-for-integer-div.js
@@ -60,7 +60,7 @@ divn1(2);
divn1(2);
%OptimizeFunctionOnNextCall(divn1);
assertEquals(-2, divn1(2));
-assertEquals(two_31, divn1(-two_31));
+assertEquals(-two_31, divn1(two_31));
//Check for truncating to int32 case
@@ -85,3 +85,14 @@ divn4t(8);
assertEquals(1, divn4t(-5));
assertEquals(-1, divn4t(5));
assertOptimized(divn4t);
+
+// Check kMinInt case.
+function div_by_two(x) {
+ return (x / 2) | 0;
+}
+
+div_by_two(12);
+div_by_two(34);
+%OptimizeFunctionOnNextCall(div_by_two);
+div_by_two(56);
+assertEquals(-(1 << 30), div_by_two(1 << 31));
diff --git a/deps/v8/test/mjsunit/simple-constructor.js b/deps/v8/test/mjsunit/simple-constructor.js
index e9ae921009..391ef3d6d1 100755
--- a/deps/v8/test/mjsunit/simple-constructor.js
+++ b/deps/v8/test/mjsunit/simple-constructor.js
@@ -137,4 +137,3 @@ assertEquals(2, o.y);
o = new g();
assertEquals(1, o.x);
assertEquals(2, o.y);
-
diff --git a/deps/v8/test/mjsunit/sin-cos.js b/deps/v8/test/mjsunit/sin-cos.js
index e38dfdf814..02ae57ba27 100644
--- a/deps/v8/test/mjsunit/sin-cos.js
+++ b/deps/v8/test/mjsunit/sin-cos.js
@@ -27,6 +27,24 @@
// Test Math.sin and Math.cos.
+// Flags: --allow-natives-syntax
+
+assertEquals("-Infinity", String(1/Math.sin(-0)));
+assertEquals(1, Math.cos(-0));
+assertEquals("-Infinity", String(1/Math.tan(-0)));
+
+// Assert that minus zero does not cause deopt.
+function no_deopt_on_minus_zero(x) {
+ return Math.sin(x) + Math.cos(x) + Math.tan(x);
+}
+
+no_deopt_on_minus_zero(1);
+no_deopt_on_minus_zero(1);
+%OptimizeFunctionOnNextCall(no_deopt_on_minus_zero);
+no_deopt_on_minus_zero(-0);
+assertOptimized(no_deopt_on_minus_zero);
+
+
function sinTest() {
assertEquals(0, Math.sin(0));
assertEquals(1, Math.sin(Math.PI / 2));
@@ -42,9 +60,128 @@ cosTest();
// By accident, the slow case for sine and cosine were both sine at
// some point. This is a regression test for that issue.
-var x = Math.pow(2, 70);
+var x = Math.pow(2, 30);
assertTrue(Math.sin(x) != Math.cos(x));
// Ensure that sine and log are not the same.
x = 0.5;
assertTrue(Math.sin(x) != Math.log(x));
+
+// Test against approximation by series.
+var factorial = [1];
+var accuracy = 50;
+for (var i = 1; i < accuracy; i++) {
+ factorial[i] = factorial[i-1] * i;
+}
+
+// We sum up in the reverse order for higher precision, as we expect the terms
+// to grow smaller for x reasonably close to 0.
+function precision_sum(array) {
+ var result = 0;
+ while (array.length > 0) {
+ result += array.pop();
+ }
+ return result;
+}
+
+function sin(x) {
+ var sign = 1;
+ var x2 = x*x;
+ var terms = [];
+ for (var i = 1; i < accuracy; i += 2) {
+ terms.push(sign * x / factorial[i]);
+ x *= x2;
+ sign *= -1;
+ }
+ return precision_sum(terms);
+}
+
+function cos(x) {
+ var sign = -1;
+ var x2 = x*x;
+ x = x2;
+ var terms = [1];
+ for (var i = 2; i < accuracy; i += 2) {
+ terms.push(sign * x / factorial[i]);
+ x *= x2;
+ sign *= -1;
+ }
+ return precision_sum(terms);
+}
+
+function abs_error(fun, ref, x) {
+ return Math.abs(ref(x) - fun(x));
+}
+
+var test_inputs = [];
+for (var i = -10000; i < 10000; i += 177) test_inputs.push(i/1257);
+var epsilon = 0.0000001;
+
+test_inputs.push(0);
+test_inputs.push(0 + epsilon);
+test_inputs.push(0 - epsilon);
+test_inputs.push(Math.PI/2);
+test_inputs.push(Math.PI/2 + epsilon);
+test_inputs.push(Math.PI/2 - epsilon);
+test_inputs.push(Math.PI);
+test_inputs.push(Math.PI + epsilon);
+test_inputs.push(Math.PI - epsilon);
+test_inputs.push(- 2*Math.PI);
+test_inputs.push(- 2*Math.PI + epsilon);
+test_inputs.push(- 2*Math.PI - epsilon);
+
+var squares = [];
+for (var i = 0; i < test_inputs.length; i++) {
+ var x = test_inputs[i];
+ var err_sin = abs_error(Math.sin, sin, x);
+ var err_cos = abs_error(Math.cos, cos, x)
+ assertEqualsDelta(0, err_sin, 1E-13);
+ assertEqualsDelta(0, err_cos, 1E-13);
+ squares.push(err_sin*err_sin + err_cos*err_cos);
+}
+
+// Sum squares up by adding them pairwise, to avoid losing precision.
+while (squares.length > 1) {
+ var reduced = [];
+ if (squares.length % 2 == 1) reduced.push(squares.pop());
+ // Remaining number of elements is even.
+ while(squares.length > 1) reduced.push(squares.pop() + squares.pop());
+ squares = reduced;
+}
+
+var err_rms = Math.sqrt(squares[0] / test_inputs.length / 2);
+assertEqualsDelta(0, err_rms, 1E-14);
+
+assertEquals(-1, Math.cos({ valueOf: function() { return Math.PI; } }));
+assertEquals(0, Math.sin("0x00000"));
+assertEquals(1, Math.cos("0x00000"));
+assertTrue(isNaN(Math.sin(Infinity)));
+assertTrue(isNaN(Math.cos("-Infinity")));
+assertEquals("Infinity", String(Math.tan(Math.PI/2)));
+assertEquals("-Infinity", String(Math.tan(-Math.PI/2)));
+assertEquals("-Infinity", String(1/Math.sin("-0")));
+
+// Assert that the remainder after division by pi is reasonably precise.
+function assertError(expected, x, epsilon) {
+ assertTrue(Math.abs(x - expected) < epsilon);
+}
+
+assertEqualsDelta(0.9367521275331447, Math.cos(1e06), 1e-15);
+assertEqualsDelta(0.8731196226768560, Math.cos(1e10), 1e-08);
+assertEqualsDelta(0.9367521275331447, Math.cos(-1e06), 1e-15);
+assertEqualsDelta(0.8731196226768560, Math.cos(-1e10), 1e-08);
+assertEqualsDelta(-0.3499935021712929, Math.sin(1e06), 1e-15);
+assertEqualsDelta(-0.4875060250875106, Math.sin(1e10), 1e-08);
+assertEqualsDelta(0.3499935021712929, Math.sin(-1e06), 1e-15);
+assertEqualsDelta(0.4875060250875106, Math.sin(-1e10), 1e-08);
+assertEqualsDelta(0.7796880066069787, Math.sin(1e16), 1e-05);
+assertEqualsDelta(-0.6261681981330861, Math.cos(1e16), 1e-05);
+
+// Assert that remainder calculation terminates.
+for (var i = -1024; i < 1024; i++) {
+ assertFalse(isNaN(Math.sin(Math.pow(2, i))));
+}
+
+assertFalse(isNaN(Math.cos(1.57079632679489700)));
+assertFalse(isNaN(Math.cos(-1e-100)));
+assertFalse(isNaN(Math.cos(-1e-323)));
diff --git a/deps/v8/test/mjsunit/smi-mul-const.js b/deps/v8/test/mjsunit/smi-mul-const.js
new file mode 100644
index 0000000000..ca627fc27e
--- /dev/null
+++ b/deps/v8/test/mjsunit/smi-mul-const.js
@@ -0,0 +1,87 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --noalways-opt
+
+function check(func, input, expected) {
+ func(-1);
+ func(-1);
+ %OptimizeFunctionOnNextCall(func);
+ assertEquals(expected, func(input));
+ assertOptimized(func);
+}
+
+function mul_by_neg_1(a) { return a * -1; }
+function mul_by_0(a) { return a * 0; }
+function mul_by_1(a) { return a * 1; }
+function mul_by_2(a) { return a * 2; }
+
+check(mul_by_neg_1, 2, -2);
+check(mul_by_0, 2, 0);
+check(mul_by_1, 2, 2);
+check(mul_by_2, 2, 4);
+
+function limit_range(a) {
+ // Limit the range of 'a' to enable no-overflow optimizations.
+ return Math.max(Math.min(a | 0, 10), -10);
+}
+
+function mul_by_neg_127(a) { return limit_range(a) * -127; }
+function mul_by_neg_128(a) { return limit_range(a) * -128; }
+function mul_by_neg_129(a) { return limit_range(a) * -129; }
+function mul_by_1023(a) { return limit_range(a) * 1023; }
+function mul_by_1024(a) { return limit_range(a) * 1024; }
+function mul_by_1025(a) { return limit_range(a) * 1025; }
+
+check(mul_by_neg_127, 2, -254);
+check(mul_by_neg_128, 2, -256);
+check(mul_by_neg_129, 2, -258);
+check(mul_by_1023, 2, 2046);
+check(mul_by_1024, 2, 2048);
+check(mul_by_1025, 2, 2050);
+
+// Deopt on minus zero.
+assertEquals(-0, mul_by_neg_128(0));
+assertUnoptimized(mul_by_neg_128);
+assertEquals(-0, mul_by_2(-0));
+assertUnoptimized(mul_by_2);
+
+// Deopt on overflow.
+
+// 2^30 is a smi boundary on arm and ia32.
+var two_30 = 1 << 30;
+// 2^31 is a smi boundary on arm64 and x64.
+var two_31 = 2 * two_30;
+
+// TODO(rmcilroy): replace after r16361 with: if (%IsValidSmi(two_31)) {
+if (true) {
+ assertEquals(two_31, mul_by_neg_1(-two_31));
+ assertUnoptimized(mul_by_neg_1);
+} else {
+ assertEquals(two_30, mul_by_neg_1(-two_30));
+ assertUnoptimized(mul_by_neg_1);
+}
diff --git a/deps/v8/test/mjsunit/sparse-array.js b/deps/v8/test/mjsunit/sparse-array.js
index 0952f2caaa..114ab0aafd 100644
--- a/deps/v8/test/mjsunit/sparse-array.js
+++ b/deps/v8/test/mjsunit/sparse-array.js
@@ -37,5 +37,3 @@ for (var repetitions = 0; repetitions < 20; repetitions++) {
array[i * stride] = i;
}
}
-
-
diff --git a/deps/v8/test/mjsunit/stack-traces-2.js b/deps/v8/test/mjsunit/stack-traces-2.js
index 165c4dfcec..3bec963701 100644
--- a/deps/v8/test/mjsunit/stack-traces-2.js
+++ b/deps/v8/test/mjsunit/stack-traces-2.js
@@ -84,4 +84,4 @@ testNotOmittedBuiltin(function(){ [thrower, 2].sort(function (a,b) {
}, "QuickSort");
// Not omitted even though ADD from runtime.js is a non-native builtin.
-testNotOmittedBuiltin(function(){ thrower + 2; }, "ADD"); \ No newline at end of file
+testNotOmittedBuiltin(function(){ thrower + 2; }, "ADD");
diff --git a/deps/v8/test/mjsunit/stack-traces-custom-lazy.js b/deps/v8/test/mjsunit/stack-traces-custom-lazy.js
index 91d97f3739..02a465e658 100644
--- a/deps/v8/test/mjsunit/stack-traces-custom-lazy.js
+++ b/deps/v8/test/mjsunit/stack-traces-custom-lazy.js
@@ -46,4 +46,3 @@ function testPrepareStackTrace(closure) {
testPrepareStackTrace(function() { throw new Error("foo"); });
testPrepareStackTrace(function f() { f(); });
-
diff --git a/deps/v8/test/mjsunit/store-dictionary.js b/deps/v8/test/mjsunit/store-dictionary.js
index 45e254bd2b..377352f39e 100644
--- a/deps/v8/test/mjsunit/store-dictionary.js
+++ b/deps/v8/test/mjsunit/store-dictionary.js
@@ -62,4 +62,3 @@ assertEquals(42, o.x);
delete o.x;
store(o);
assertEquals(42, o.x);
-
diff --git a/deps/v8/test/mjsunit/str-to-num.js b/deps/v8/test/mjsunit/str-to-num.js
index cbec87fab9..8c9a307001 100644
--- a/deps/v8/test/mjsunit/str-to-num.js
+++ b/deps/v8/test/mjsunit/str-to-num.js
@@ -211,4 +211,4 @@ assertTrue(isNaN(toNumber("+0xFF")));
assertTrue(isNaN(toNumber("+0x012")));
assertTrue(isNaN(toNumber("-0x0")));
assertTrue(isNaN(toNumber("-0xFF")));
-assertTrue(isNaN(toNumber("-0x012"))); \ No newline at end of file
+assertTrue(isNaN(toNumber("-0x012")));
diff --git a/deps/v8/test/mjsunit/strict-equals.js b/deps/v8/test/mjsunit/strict-equals.js
index d080ce8c4c..d5577c7a35 100644
--- a/deps/v8/test/mjsunit/strict-equals.js
+++ b/deps/v8/test/mjsunit/strict-equals.js
@@ -84,7 +84,3 @@ assertFalse(0 === new Number(0));
assertFalse(1 === new Number(1));
assertTrue(4.2 === 4.2);
assertTrue(4.2 === Number(4.2));
-
-
-
-
diff --git a/deps/v8/test/mjsunit/string-natives.js b/deps/v8/test/mjsunit/string-natives.js
index b1ec875420..7a9009bfd1 100644
--- a/deps/v8/test/mjsunit/string-natives.js
+++ b/deps/v8/test/mjsunit/string-natives.js
@@ -29,15 +29,23 @@
function test() {
var s1 = %NewString(26, true);
+ for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(s1, i, 65);
+ assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAA", s1);
+ %_OneByteSeqStringSetChar(s1, 25, 66);
+ assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAB", s1);
for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(s1, i, i+65);
assertEquals("ABCDEFGHIJKLMNOPQRSTUVWXYZ", s1);
s1 = %TruncateString(s1, 13);
assertEquals("ABCDEFGHIJKLM", s1);
var s2 = %NewString(26, false);
+ for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(s2, i, 65);
+ assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAA", s2);
+ %_TwoByteSeqStringSetChar(s2, 25, 66);
+ assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAB", s2);
for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(s2, i, i+65);
assertEquals("ABCDEFGHIJKLMNOPQRSTUVWXYZ", s2);
- s2 = %TruncateString(s1, 13);
+ s2 = %TruncateString(s2, 13);
assertEquals("ABCDEFGHIJKLM", s2);
var s3 = %NewString(26, false);
@@ -69,4 +77,3 @@ test();
test();
%OptimizeFunctionOnNextCall(test);
test();
-
diff --git a/deps/v8/test/mjsunit/string-replace-gc.js b/deps/v8/test/mjsunit/string-replace-gc.js
index 73b310f9c0..2f1efd8813 100644
--- a/deps/v8/test/mjsunit/string-replace-gc.js
+++ b/deps/v8/test/mjsunit/string-replace-gc.js
@@ -53,4 +53,3 @@ var moving_string = "b" + "c";
var bar = foo.replace(/[a]/g, moving_string);
print(bar.length);
-
diff --git a/deps/v8/test/mjsunit/string-search.js b/deps/v8/test/mjsunit/string-search.js
index 4de17bca23..037725b95e 100644
--- a/deps/v8/test/mjsunit/string-search.js
+++ b/deps/v8/test/mjsunit/string-search.js
@@ -37,4 +37,3 @@ for (var i = 0; i < 100; i++) {
var r = s.search(s);
assertEquals(0, r);
}
-
diff --git a/deps/v8/test/mjsunit/string-slices.js b/deps/v8/test/mjsunit/string-slices.js
index 5b1dc360ab..2fec04b0b0 100755
--- a/deps/v8/test/mjsunit/string-slices.js
+++ b/deps/v8/test/mjsunit/string-slices.js
@@ -222,4 +222,15 @@ function test_crankshaft() {
test_crankshaft();
%OptimizeFunctionOnNextCall(test_crankshaft);
-test_crankshaft(); \ No newline at end of file
+test_crankshaft();
+
+var s1 = "12345678901234567890";
+var s2 = "abcdefghijklmnopqrstuvwxyz";
+var c1 = s1 + s2;
+var c2 = s1 + c1 + s2;
+assertEquals("234567890123456789", c1.substring(1, 19));
+assertEquals("bcdefghijklmno", c1.substring(21, 35));
+assertEquals("2345678901234567890abcdefghijklmno", c1.substring(1, 35));
+assertEquals("234567890123456789", c2.substring(1, 19));
+assertEquals("bcdefghijklmno", c2.substring(41, 55));
+assertEquals("2345678901234567890abcdefghijklmno", c2.substring(21, 55));
diff --git a/deps/v8/test/mjsunit/string-split.js b/deps/v8/test/mjsunit/string-split.js
index 1308244cab..efd0ef3eae 100644
--- a/deps/v8/test/mjsunit/string-split.js
+++ b/deps/v8/test/mjsunit/string-split.js
@@ -145,3 +145,22 @@ for (var i = 0; i < 128; i++) {
assertEquals(1, split_chars[i].length);
assertEquals(i, split_chars[i].charCodeAt(0));
}
+
+// Check that the separator is converted to string before returning due to
+// limit == 0.
+var counter = 0;
+var separator = { toString: function() { counter++; return "b"; }};
+assertEquals([], "abc".split(separator, 0));
+assertEquals(1, counter);
+
+// Check that the subject is converted to string before the separator.
+counter = 0;
+var subject = { toString: function() { assertEquals(0, counter);
+ counter++;
+ return "abc"; }};
+separator = { toString: function() { assertEquals(1, counter);
+ counter++;
+ return "b"; }};
+
+assertEquals(["a", "c"], String.prototype.split.call(subject, separator));
+assertEquals(2, counter);
diff --git a/deps/v8/test/mjsunit/switch-opt.js b/deps/v8/test/mjsunit/switch-opt.js
new file mode 100644
index 0000000000..c1d5d38374
--- /dev/null
+++ b/deps/v8/test/mjsunit/switch-opt.js
@@ -0,0 +1,221 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ var result = [];
+ var x = 0;
+
+ function branch(b) {
+ if (b == "deopt") {
+ %DeoptimizeFunction(f);
+ return "c";
+ }
+
+ return b ? "a" : "b";
+ }
+
+ function f(label, b1, b2, b3) {
+ switch (label) {
+ case "string":
+ result.push(1);
+ break;
+ case branch(b1) + branch(b2):
+ result.push(2);
+ break;
+ case 10:
+ result.push(3);
+ break;
+ default:
+ branch(b3);
+ result.push(4);
+ break;
+ case x++:
+ branch(b3);
+ result.push(5);
+ break;
+ }
+ }
+
+ function assertResult(r, label, b1, b2, b3) {
+ f(label, b1, b2, b3);
+ assertEquals(result, r);
+ result = [];
+ }
+
+ // Warmup.
+ assertResult([2], "aa", true, true);
+ assertResult([2], "ab", true, false);
+ assertResult([2], "ba", false, true);
+ assertResult([2], "bb", false, false);
+ assertEquals(0, x);
+ assertResult([4], "other");
+ assertEquals(1, x);
+ assertResult([5], 1, true, true);
+ assertResult([4], 1, true, true);
+ assertResult([5], 3, true, true);
+ assertResult([4], 3, true, true);
+ assertResult([5], 5, true, true);
+ assertResult([4], 5, true, true);
+ assertEquals(7, x);
+
+ // Test regular behavior.
+ %OptimizeFunctionOnNextCall(f);
+ assertResult([2], "aa", true, true);
+ assertResult([1], "string");
+ assertResult([4], "other");
+ assertEquals(8, x);
+ assertResult([5], 8);
+ assertEquals(9, x);
+
+ // Test deopt at the beginning of the case label evaluation.
+ assertResult([2], "ca", "deopt", true);
+ %OptimizeFunctionOnNextCall(f);
+ assertResult([4], "ca", "deopt", false);
+ assertEquals(10, x);
+ %OptimizeFunctionOnNextCall(f);
+
+ // Test deopt in the middle of the case label evaluation.
+ assertResult([2], "ac", true, "deopt");
+ %OptimizeFunctionOnNextCall(f);
+ assertResult([4], "ac", false, "deopt");
+ assertEquals(11, x);
+
+ // Test deopt in the default case.
+ %OptimizeFunctionOnNextCall(f);
+ print("here");
+ assertResult([4], 10000, false, false, "deopt");
+ assertEquals(12, x);
+
+ // Test deopt in the default case.
+ %OptimizeFunctionOnNextCall(f);
+ assertResult([4], 10000, false, false, "deopt");
+ assertEquals(13, x);
+
+ // Test deopt in x++ case.
+ %OptimizeFunctionOnNextCall(f);
+ assertResult([5], 13, false, false, "deopt");
+ assertEquals(14, x);
+})();
+
+
+(function() {
+ var result = [];
+ var x = 0;
+
+ function branch(b) {
+ if (b == "deopt") {
+ %DeoptimizeFunction(f);
+ return "c";
+ }
+
+ return b ? "a" : "b";
+ }
+
+ function f(label, b1, b2, b3) {
+ switch (label) {
+ case "string":
+ result.push(1);
+ break;
+ case branch(b1) + branch(b2):
+ result.push(2);
+ // Fall through.
+ case 10:
+ result.push(3);
+ break;
+ default:
+ branch(b3);
+ result.push(4);
+ // Fall through.
+ case x++:
+ branch(b3);
+ result.push(5);
+ break;
+ }
+ }
+
+ function assertResult(r, label, b1, b2, b3) {
+ f(label, b1, b2, b3);
+ assertEquals(r, result);
+ result = [];
+ }
+
+ // Warmup.
+ assertResult([2,3], "aa", true, true);
+ assertResult([2,3], "ab", true, false);
+ assertResult([2,3], "ba", false, true);
+ assertResult([2,3], "bb", false, false);
+ assertEquals(0, x);
+ assertResult([4,5], "other");
+ assertEquals(1, x);
+ assertResult([5], 1, true, true);
+ assertResult([4,5], 1, true, true);
+ assertResult([5], 3, true, true);
+ assertResult([4,5], 3, true, true);
+ assertResult([5], 5, true, true);
+ assertResult([4,5], 5, true, true);
+ assertEquals(7, x);
+
+ // Test regular behavior.
+ %OptimizeFunctionOnNextCall(f);
+ assertResult([2,3], "aa", true, true);
+ assertResult([1], "string");
+ assertResult([4,5], "other");
+ assertEquals(8, x);
+ assertResult([5], 8);
+ assertEquals(9, x);
+
+ // Test deopt at the beginning of the case label evaluation.
+ assertResult([2,3], "ca", "deopt", true);
+ %OptimizeFunctionOnNextCall(f);
+ assertResult([4,5], "ca", "deopt", false);
+ assertEquals(10, x);
+ %OptimizeFunctionOnNextCall(f);
+
+ // Test deopt in the middle of the case label evaluation.
+ assertResult([2,3], "ac", true, "deopt");
+ %OptimizeFunctionOnNextCall(f);
+ assertResult([4,5], "ac", false, "deopt");
+ assertEquals(11, x);
+
+ // Test deopt in the default case.
+ %OptimizeFunctionOnNextCall(f);
+ print("here");
+ assertResult([4,5], 10000, false, false, "deopt");
+ assertEquals(12, x);
+
+ // Test deopt in the default case.
+ %OptimizeFunctionOnNextCall(f);
+ assertResult([4,5], 10000, false, false, "deopt");
+ assertEquals(13, x);
+
+ // Test deopt in x++ case.
+ %OptimizeFunctionOnNextCall(f);
+ assertResult([5], 13, false, false, "deopt");
+ assertEquals(14, x);
+})();
diff --git a/deps/v8/test/mjsunit/third_party/array-isarray.js b/deps/v8/test/mjsunit/third_party/array-isarray.js
deleted file mode 100644
index 0fc42a3f27..0000000000
--- a/deps/v8/test/mjsunit/third_party/array-isarray.js
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2009 Apple Computer, Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder(s) nor the names of any
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Based on LayoutTests/fast/js/resources/Array-isArray.js
-
-assertTrue(Array.isArray([]));
-assertTrue(Array.isArray(new Array));
-assertTrue(Array.isArray(Array()));
-assertTrue(Array.isArray('abc'.match(/(a)*/g)));
-assertFalse((function(){ return Array.isArray(arguments); })());
-assertFalse(Array.isArray());
-assertFalse(Array.isArray(null));
-assertFalse(Array.isArray(undefined));
-assertFalse(Array.isArray(true));
-assertFalse(Array.isArray(false));
-assertFalse(Array.isArray('a string'));
-assertFalse(Array.isArray({}));
-assertFalse(Array.isArray({length: 5}));
-assertFalse(Array.isArray({__proto__: Array.prototype, length:1, 0:1, 1:2}));
-
diff --git a/deps/v8/test/mjsunit/third_party/array-splice-webkit.js b/deps/v8/test/mjsunit/third_party/array-splice-webkit.js
deleted file mode 100644
index 974ac55e6b..0000000000
--- a/deps/v8/test/mjsunit/third_party/array-splice-webkit.js
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder(s) nor the names of any
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Simple splice tests based on webkit layout tests.
-var arr = ['a','b','c','d'];
-assertArrayEquals(['a','b','c','d'], arr);
-assertArrayEquals(['c','d'], arr.splice(2));
-assertArrayEquals(['a','b'], arr);
-assertArrayEquals(['a','b'], arr.splice(0));
-assertArrayEquals([], arr)
-
-arr = ['a','b','c','d'];
-assertEquals([], arr.splice())
-assertArrayEquals(['a','b','c','d'], arr);
-assertArrayEquals(['a','b','c','d'], arr.splice(undefined))
-assertArrayEquals([], arr);
-
-arr = ['a','b','c','d'];
-assertArrayEquals(['a','b','c','d'], arr.splice(null))
-assertArrayEquals([], arr);
-
-arr = ['a','b','c','d'];
-assertArrayEquals([], arr.splice(100))
-assertArrayEquals(['a','b','c','d'], arr);
-assertArrayEquals(['d'], arr.splice(-1))
-assertArrayEquals(['a','b','c'], arr);
-
-assertArrayEquals([], arr.splice(2, undefined))
-assertArrayEquals([], arr.splice(2, null))
-assertArrayEquals([], arr.splice(2, -1))
-assertArrayEquals([], arr.splice(2, 0))
-assertArrayEquals(['a','b','c'], arr);
-assertArrayEquals(['c'], arr.splice(2, 100))
-assertArrayEquals(['a','b'], arr);
diff --git a/deps/v8/test/mjsunit/third_party/string-trim.js b/deps/v8/test/mjsunit/third_party/string-trim.js
deleted file mode 100644
index 234dff6dcd..0000000000
--- a/deps/v8/test/mjsunit/third_party/string-trim.js
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (c) 2009 Apple Computer, Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder(s) nor the names of any
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Based on LayoutTests/fast/js/script-tests/string-trim.js
-
-// References to trim(), trimLeft() and trimRight() functions for
-// testing Function's *.call() and *.apply() methods.
-
-var trim = String.prototype.trim;
-var trimLeft = String.prototype.trimLeft;
-var trimRight = String.prototype.trimRight;
-
-var testString = 'foo bar';
-var trimString = '';
-var leftTrimString = '';
-var rightTrimString = '';
-var wsString = '';
-
-var whitespace = [
- {s : '\u0009', t : 'HORIZONTAL TAB'},
- {s : '\u000A', t : 'LINE FEED OR NEW LINE'},
- {s : '\u000B', t : 'VERTICAL TAB'},
- {s : '\u000C', t : 'FORMFEED'},
- {s : '\u000D', t : 'CARRIAGE RETURN'},
- {s : '\u0020', t : 'SPACE'},
- {s : '\u00A0', t : 'NO-BREAK SPACE'},
- {s : '\u2000', t : 'EN QUAD'},
- {s : '\u2001', t : 'EM QUAD'},
- {s : '\u2002', t : 'EN SPACE'},
- {s : '\u2003', t : 'EM SPACE'},
- {s : '\u2004', t : 'THREE-PER-EM SPACE'},
- {s : '\u2005', t : 'FOUR-PER-EM SPACE'},
- {s : '\u2006', t : 'SIX-PER-EM SPACE'},
- {s : '\u2007', t : 'FIGURE SPACE'},
- {s : '\u2008', t : 'PUNCTUATION SPACE'},
- {s : '\u2009', t : 'THIN SPACE'},
- {s : '\u200A', t : 'HAIR SPACE'},
- {s : '\u3000', t : 'IDEOGRAPHIC SPACE'},
- {s : '\u2028', t : 'LINE SEPARATOR'},
- {s : '\u2029', t : 'PARAGRAPH SEPARATOR'},
- {s : '\u200B', t : 'ZERO WIDTH SPACE (category Cf)'}
-];
-
-for (var i = 0; i < whitespace.length; i++) {
- assertEquals(whitespace[i].s.trim(), '');
- assertEquals(whitespace[i].s.trimLeft(), '');
- assertEquals(whitespace[i].s.trimRight(), '');
- wsString += whitespace[i].s;
-}
-
-trimString = wsString + testString + wsString;
-leftTrimString = testString + wsString; // Trimmed from the left.
-rightTrimString = wsString + testString; // Trimmed from the right.
-
-assertEquals(wsString.trim(), '');
-assertEquals(wsString.trimLeft(), '');
-assertEquals(wsString.trimRight(), '');
-
-assertEquals(trimString.trim(), testString);
-assertEquals(trimString.trimLeft(), leftTrimString);
-assertEquals(trimString.trimRight(), rightTrimString);
-
-assertEquals(leftTrimString.trim(), testString);
-assertEquals(leftTrimString.trimLeft(), leftTrimString);
-assertEquals(leftTrimString.trimRight(), testString);
-
-assertEquals(rightTrimString.trim(), testString);
-assertEquals(rightTrimString.trimLeft(), testString);
-assertEquals(rightTrimString.trimRight(), rightTrimString);
-
-var testValues = [0, Infinity, NaN, true, false, ({}), ['an','array'],
- ({toString:function(){return 'wibble'}})
-];
-
-for (var i = 0; i < testValues.length; i++) {
- assertEquals(trim.call(testValues[i]), String(testValues[i]));
- assertEquals(trimLeft.call(testValues[i]), String(testValues[i]));
- assertEquals(trimRight.call(testValues[i]), String(testValues[i]));
-}
diff --git a/deps/v8/test/mjsunit/to-precision.js b/deps/v8/test/mjsunit/to-precision.js
index 04c7d7614e..ded71853b4 100644
--- a/deps/v8/test/mjsunit/to-precision.js
+++ b/deps/v8/test/mjsunit/to-precision.js
@@ -79,4 +79,3 @@ assertEquals("-91.123", (-91.1234).toPrecision(5));
assertEquals("-91.1234", (-91.1234).toPrecision(6));
assertEquals("-91.12340", (-91.1234).toPrecision(7));
assertEquals("-91.123400", (-91.1234).toPrecision(8));
-
diff --git a/deps/v8/test/mjsunit/tobool.js b/deps/v8/test/mjsunit/tobool.js
index 65bffb624f..44a5a3cd40 100644
--- a/deps/v8/test/mjsunit/tobool.js
+++ b/deps/v8/test/mjsunit/tobool.js
@@ -32,5 +32,3 @@ assertTrue(!!new Boolean(false), "new Boolean(false)");
assertTrue(!!new Number(-1), "new Number(-1)");
assertTrue(!!new Number(0), "new Number(0)");
assertTrue(!!new Number(1), "new Number(1)");
-
-
diff --git a/deps/v8/test/mjsunit/tools/profile.js b/deps/v8/test/mjsunit/tools/profile.js
index 4df1a08f92..0f79aeaa83 100644
--- a/deps/v8/test/mjsunit/tools/profile.js
+++ b/deps/v8/test/mjsunit/tools/profile.js
@@ -345,4 +345,3 @@ function assertNodeWeights(root, path, selfTicks, totalTicks) {
}
})();
-
diff --git a/deps/v8/test/mjsunit/touint32.js b/deps/v8/test/mjsunit/touint32.js
index f06bddfcf5..9c4cc36016 100644
--- a/deps/v8/test/mjsunit/touint32.js
+++ b/deps/v8/test/mjsunit/touint32.js
@@ -68,5 +68,3 @@ assertEquals(2147483649, ToUInt32('2147483649'), "'2147483649'");
assertEquals(4294967295, ToUInt32('4294967295'), "'4294967295'");
assertEquals(0, ToUInt32('4294967296'), "'4294967296'");
assertEquals(1, ToUInt32('4294967297'), "'4294967297'");
-
-
diff --git a/deps/v8/test/mjsunit/transcendentals.js b/deps/v8/test/mjsunit/transcendentals.js
index 78e6c48072..2a75d6da82 100644
--- a/deps/v8/test/mjsunit/transcendentals.js
+++ b/deps/v8/test/mjsunit/transcendentals.js
@@ -46,4 +46,3 @@ assertTrue(Math.acos(x) != Math.acos(y));
assertTrue(Math.atan(x) != Math.atan(y));
assertTrue(Math.exp(x) != Math.exp(y));
-
diff --git a/deps/v8/test/mjsunit/transition-elements-kind.js b/deps/v8/test/mjsunit/transition-elements-kind.js
index 9fac78036e..9acf52c0d2 100644
--- a/deps/v8/test/mjsunit/transition-elements-kind.js
+++ b/deps/v8/test/mjsunit/transition-elements-kind.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --track-allocation-sites
+// Flags: --allow-natives-syntax
// Allocation site for empty double arrays.
function foo() {
diff --git a/deps/v8/test/mjsunit/try-finally-continue.js b/deps/v8/test/mjsunit/try-finally-continue.js
index b55e7acc78..5e04930c5d 100644
--- a/deps/v8/test/mjsunit/try-finally-continue.js
+++ b/deps/v8/test/mjsunit/try-finally-continue.js
@@ -69,4 +69,4 @@ for (x in a) {
}
}
}
-assertEquals(9, f); \ No newline at end of file
+assertEquals(9, f);
diff --git a/deps/v8/test/mjsunit/unicode-string-to-number.js b/deps/v8/test/mjsunit/unicode-string-to-number.js
index 13a7acfbc2..2376a60cac 100644
--- a/deps/v8/test/mjsunit/unicode-string-to-number.js
+++ b/deps/v8/test/mjsunit/unicode-string-to-number.js
@@ -43,4 +43,3 @@ for (var i = 0; i < 7; i++) {
s += s;
}
assertTrue(isNaN(Number(s)), "long-string");
-
diff --git a/deps/v8/test/mjsunit/unicode-test.js b/deps/v8/test/mjsunit/unicode-test.js
index 66a029a7ef..5be1b41562 100644
--- a/deps/v8/test/mjsunit/unicode-test.js
+++ b/deps/v8/test/mjsunit/unicode-test.js
@@ -9165,5 +9165,3 @@ var thai_l_regexp = new RegExp(thai_l_thingy);
var thai_l_regexp2 = new RegExp("[" + thai_l_thingy + "]");
assertTrue(thai_l_regexp.test(thai_l_thingy));
assertTrue(thai_l_regexp2.test(thai_l_thingy));
-
-
diff --git a/deps/v8/test/mjsunit/value-wrapper-accessor.js b/deps/v8/test/mjsunit/value-wrapper-accessor.js
new file mode 100644
index 0000000000..2a51fee991
--- /dev/null
+++ b/deps/v8/test/mjsunit/value-wrapper-accessor.js
@@ -0,0 +1,99 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// When calling user-defined accessors on strings, booleans or
+// numbers, we should create a wrapper object in classic-mode.
+
+// Flags: --allow-natives-syntax
+
+function test(object, prototype) {
+ var result;
+ Object.defineProperty(prototype, "nonstrict", {
+ get: function() { result = this; },
+ set: function(v) { result = this; }
+ });
+ Object.defineProperty(prototype, "strict", {
+ get: function() { "use strict"; result = this; },
+ set: function(v) { "use strict"; result = this; }
+ });
+
+ (function() {
+ function nonstrict(s) {
+ return s.nonstrict;
+ }
+ function strict(s) {
+ return s.strict;
+ }
+
+ nonstrict(object);
+ nonstrict(object);
+ %OptimizeFunctionOnNextCall(nonstrict);
+ result = undefined;
+ nonstrict(object);
+ assertEquals("object", typeof result);
+
+ strict(object);
+ strict(object);
+ %OptimizeFunctionOnNextCall(strict);
+ result = undefined;
+ strict(object);
+ assertEquals(typeof object, typeof result);
+ })();
+
+ (function() {
+ function nonstrict(s) {
+ return s.nonstrict = 10;
+ }
+ function strict(s) {
+ return s.strict = 10;
+ }
+
+ nonstrict(object);
+ nonstrict(object);
+ %OptimizeFunctionOnNextCall(nonstrict);
+ result = undefined;
+ nonstrict(object);
+ // TODO(1475): Support storing to primitive values.
+ // This should return "object" once storing to primitive values is
+ // supported.
+ assertEquals("undefined", typeof result);
+
+ strict(object);
+ strict(object);
+ %OptimizeFunctionOnNextCall(strict);
+ result = undefined;
+ strict(object);
+ // TODO(1475): Support storing to primitive values.
+ // This should return "object" once storing to primitive values is
+ // supported.
+ assertEquals("undefined", typeof result);
+ })();
+}
+
+test(1, Number.prototype);
+test("string", String.prototype);
+test(true, Boolean.prototype);
diff --git a/deps/v8/test/mjsunit/value-wrapper.js b/deps/v8/test/mjsunit/value-wrapper.js
index 76e200f36e..cc81f95559 100644
--- a/deps/v8/test/mjsunit/value-wrapper.js
+++ b/deps/v8/test/mjsunit/value-wrapper.js
@@ -31,6 +31,7 @@
// When running the tests use loops to ensure that the call site moves through
// the different IC states and that both the runtime system and the generated
// IC code is tested.
+
function RunTests() {
for (var i = 0; i < 10; i++) {
assertEquals('object', 'xxx'.TypeOfThis());
diff --git a/deps/v8/test/mjsunit/verify-assert-false.js b/deps/v8/test/mjsunit/verify-assert-false.js
new file mode 100644
index 0000000000..8bea7df399
--- /dev/null
+++ b/deps/v8/test/mjsunit/verify-assert-false.js
@@ -0,0 +1,30 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-trigger-failure
+
+triggerAssertFalse();
diff --git a/deps/v8/test/mjsunit/verify-check-false.js b/deps/v8/test/mjsunit/verify-check-false.js
new file mode 100644
index 0000000000..426bf846a6
--- /dev/null
+++ b/deps/v8/test/mjsunit/verify-check-false.js
@@ -0,0 +1,30 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-trigger-failure
+
+triggerCheckFalse();
diff --git a/deps/v8/test/mjsunit/whitespaces.js b/deps/v8/test/mjsunit/whitespaces.js
new file mode 100644
index 0000000000..78e4ad5d36
--- /dev/null
+++ b/deps/v8/test/mjsunit/whitespaces.js
@@ -0,0 +1,115 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var whitespaces = [
+ // WhiteSpace defined in ECMA-262 5.1, 7.2
+ 0x0009, // Tab TAB
+ 0x000B, // Vertical Tab VT
+ 0x000C, // Form Feed FF
+ 0x0020, // Space SP
+ 0x00A0, // No-break space NBSP
+ 0xFEFF, // Byte Order Mark BOM
+
+ // LineTerminator defined in ECMA-262 5.1, 7.3
+ 0x000A, // Line Feed LF
+ 0x000D, // Carriage Return CR
+ 0x2028, // Line Separator LS
+ 0x2029, // Paragraph Separator PS
+
+ // Unicode 6.3.0 whitespaces (category 'Zs')
+ 0x1680, // Ogham Space Mark
+ 0x180E, // Mongolian Vowel Separator
+ 0x2000, // EN QUAD
+ 0x2001, // EM QUAD
+ 0x2002, // EN SPACE
+ 0x2003, // EM SPACE
+ 0x2004, // THREE-PER-EM SPACE
+ 0x2005, // FOUR-PER-EM SPACE
+ 0x2006, // SIX-PER-EM SPACE
+ 0x2007, // FIGURE SPACE
+ 0x2008, // PUNCTUATION SPACE
+ 0x2009, // THIN SPACE
+ 0x200A, // HAIR SPACE
+ 0x2028, // LINE SEPARATOR
+ 0x2029, // PARAGRAPH SEPARATOR
+ 0x202F, // NARROW NO-BREAK SPACE
+ 0x205F, // MEDIUM MATHEMATICAL SPACE
+ 0x3000, // IDEOGRAPHIC SPACE
+];
+
+// Add single twobyte char to force twobyte representation.
+// Interestingly, snowman is not "white" space :)
+var twobyte = "\u2603";
+var onebyte = "\u007E";
+var twobytespace = "\u2000";
+var onebytespace = "\u0020";
+
+function is_whitespace(c) {
+ return whitespaces.indexOf(c.charCodeAt(0)) > -1;
+}
+
+function test_regexp(str) {
+ var pos_match = str.match(/\s/);
+ var neg_match = str.match(/\S/);
+ var test_char = str[0];
+ var postfix = str[1];
+ if (is_whitespace(test_char)) {
+ assertEquals(test_char, pos_match[0]);
+ assertEquals(postfix, neg_match[0]);
+ } else {
+ assertEquals(test_char, neg_match[0]);
+ assertNull(pos_match);
+ }
+}
+
+function test_trim(c, infix) {
+ var str = c + c + c + infix + c;
+ if (is_whitespace(c)) {
+ assertEquals(infix, str.trim());
+ } else {
+ assertEquals(str, str.trim());
+ }
+}
+
+function test_parseInt(c, postfix) {
+ // Skip if prefix is a digit.
+ if (c >= "0" && c <= "9") return;
+ var str = c + c + "123" + postfix;
+ if (is_whitespace(c)) {
+ assertEquals(123, parseInt(str));
+ } else {
+ assertEquals(NaN, parseInt(str));
+ }
+}
+
+function test_eval(c, content) {
+ if (!is_whitespace(c)) return;
+ var str = c + c + "'" + content + "'" + c + c;
+ assertEquals(content, eval(str));
+}
+
+function test_stringtonumber(c, postfix) {
+ // Skip if prefix is a digit.
+ if (c >= "0" && c <= "9") return;
+ var result = 1 + Number(c + "123" + c + postfix);
+ if (is_whitespace(c)) {
+ assertEquals(124, result);
+ } else {
+ assertEquals(NaN, result);
+ }
+}
+
+for (var i = 0; i < 0x10000; i++) {
+ c = String.fromCharCode(i);
+ test_regexp(c + onebyte);
+ test_regexp(c + twobyte);
+ test_trim(c, onebyte + "trim");
+ test_trim(c, twobyte + "trim");
+ test_parseInt(c, onebyte);
+ test_parseInt(c, twobyte);
+ test_eval(c, onebyte);
+ test_eval(c, twobyte);
+ test_stringtonumber(c, onebytespace);
+ test_stringtonumber(c, twobytespace);
+}
diff --git a/deps/v8/test/mjsunit/with-value.js b/deps/v8/test/mjsunit/with-value.js
index a4da1fad9c..f7a89f3fb6 100644
--- a/deps/v8/test/mjsunit/with-value.js
+++ b/deps/v8/test/mjsunit/with-value.js
@@ -35,4 +35,4 @@
/* This should be fairly easy again. May need some work in the
compiler's VisitWith() function, or perhaps the runtime routine's
PushContextForWith().
-*/ \ No newline at end of file
+*/
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index b27e991b98..197e65137c 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -75,39 +75,39 @@
'js1_2/function/Number': [SKIP],
# TODO(2018): Temporarily allow timeout in debug mode.
- 'js1_5/GC/regress-203278-2': [PASS, ['mode == debug', TIMEOUT, FAIL]],
+ 'js1_5/GC/regress-203278-2': [PASS, ['mode == debug', TIMEOUT, NO_VARIANTS, FAIL]],
##################### SLOW TESTS #####################
# This takes a long time to run (~100 seconds). It should only be run
# by the really patient.
- 'js1_5/GC/regress-324278': [SLOW],
+ 'js1_5/GC/regress-324278': [SKIP],
# This takes a long time to run because our indexOf operation is
# pretty slow - it causes a lot of GCs; see issue
# #926379. We could consider marking this SKIP because it takes a
# while to run to completion.
- 'js1_5/GC/regress-338653': [SLOW],
+ 'js1_5/GC/regress-338653': [SKIP],
# This test is designed to run until it runs out of memory. This takes
# a very long time because it builds strings character by character
# and compiles a lot of regular expressions. We could consider marking
# this SKIP because it takes a while to run to completion.
- 'js1_5/GC/regress-346794': [SLOW],
+ 'js1_5/GC/regress-346794': [SKIP],
# Runs out of memory while trying to build huge string of 'x'
# characters. This takes a long time to run (~32 seconds).
- 'js1_5/GC/regress-348532': [SLOW],
+ 'js1_5/GC/regress-348532': [SKIP],
##################### FLAKY TESTS #####################
# These tests time out in debug mode but pass in product mode
- 'js1_5/Regress/regress-360969-03': [PASS, ['mode == debug', TIMEOUT]],
- 'js1_5/Regress/regress-360969-04': [PASS, ['mode == debug', TIMEOUT]],
- 'js1_5/Regress/regress-360969-05': [PASS, ['mode == debug', TIMEOUT]],
- 'js1_5/Regress/regress-360969-06': [PASS, ['mode == debug', TIMEOUT]],
- 'js1_5/extensions/regress-365527': [PASS, ['mode == debug', TIMEOUT]],
+ 'js1_5/Regress/regress-360969-03': [PASS, ['mode == debug', TIMEOUT, NO_VARIANTS]],
+ 'js1_5/Regress/regress-360969-04': [PASS, ['mode == debug', TIMEOUT, NO_VARIANTS]],
+ 'js1_5/Regress/regress-360969-05': [PASS, ['mode == debug', TIMEOUT, NO_VARIANTS]],
+ 'js1_5/Regress/regress-360969-06': [PASS, ['mode == debug', TIMEOUT, NO_VARIANTS]],
+ 'js1_5/extensions/regress-365527': [PASS, ['mode == debug', TIMEOUT, NO_VARIANTS]],
'js1_5/Regress/regress-280769-3': [PASS, ['mode == debug', FAIL]],
'js1_5/Regress/regress-203278-1': [PASS, ['mode == debug', FAIL]],
@@ -116,7 +116,7 @@
'js1_5/GC/regress-278725': [PASS, ['mode == debug', FAIL]],
# http://b/issue?id=1206983
'js1_5/Regress/regress-367561-03': [PASS, ['mode == debug', FAIL]],
- 'ecma/Date/15.9.5.10-2': [PASS, FAIL, ['mode == debug', TIMEOUT]],
+ 'ecma/Date/15.9.5.10-2': [PASS, FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
# These tests create two Date objects just after each other and
# expects them to match. Sometimes this happens on the border
@@ -166,7 +166,7 @@
'js1_5/Array/regress-99120-01': [PASS, FAIL],
'js1_5/Array/regress-99120-02': [PASS, FAIL],
'js1_5/Regress/regress-347306-01': [PASS, FAIL],
- 'js1_5/Regress/regress-416628': [PASS, FAIL, ['mode == debug', TIMEOUT]],
+ 'js1_5/Regress/regress-416628': [PASS, FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
# The following two tests assume that daylight savings time starts first
@@ -187,7 +187,7 @@
# Tests that sorting arrays of ints is less than 3 times as fast
# as sorting arrays of strings.
- 'js1_5/extensions/regress-371636': [PASS, FAIL, ['mode == debug', TIMEOUT]],
+ 'js1_5/extensions/regress-371636': [PASS, FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
# Tests depend on GC timings. Inherently flaky.
@@ -273,14 +273,18 @@
# PCRE's match limit is reached. SpiderMonkey hangs on the first one,
# JSC returns true somehow. Maybe they up the match limit? There is
# an open V8 bug 676063 about this.
- 'ecma_3/RegExp/regress-330684': [TIMEOUT],
+ # TODO(yangguo): Hangs on complex regexp. Please investigate if this gives
+ # any useful coverage.
+ 'ecma_3/RegExp/regress-330684': [SKIP],
# This test contains a regexp that runs exponentially long. Spidermonkey
# standalone will hang, though apparently inside Firefox it will trigger a
# long-running-script timeout. JSCRE passes by hitting the matchLimit and
# just pretending that an exhaustive search found no match.
- 'ecma_3/RegExp/regress-307456': [PASS, TIMEOUT],
+ # TODO(yangguo): Hangs on complex regexp. Please investigate if this gives
+ # any useful coverage.
+ 'ecma_3/RegExp/regress-307456': [SKIP],
# We do not detect overflow in bounds for back references and {}
@@ -599,7 +603,6 @@
# Negative hexadecimal literals are parsed as NaN. This test is outdated.
'ecma/TypeConversion/9.3.1-3': [FAIL_OK],
-
##################### FAILING TESTS #####################
# This section is for tests that fail in V8 and pass in JSC.
@@ -614,7 +617,7 @@
# This test seems designed to fail (it produces a 700Mbyte string).
# We fail on out of memory. The important thing is not to crash.
- 'js1_5/Regress/regress-303213': [FAIL, ['mode == debug', TIMEOUT]],
+ 'js1_5/Regress/regress-303213': [FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
# This test fails since we now throw in String.prototype.match when apply
# is given null or undefined as this argument (and so does firefox nightly).
@@ -748,12 +751,15 @@
'js1_5/extensions/toLocaleFormat-01': [FAIL_OK],
'js1_5/extensions/toLocaleFormat-02': [FAIL_OK],
- 'js1_5/extensions/regress-330569': [TIMEOUT],
- 'js1_5/extensions/regress-351448': [TIMEOUT],
+ # TODO(yangguo): Both tests have complex regular expressions (nested (.*)*
+ # and the like). Please investigate if these tests provide any coverage.
+ # Furthermore, an exception is expected which is not thrown by v8.
+ 'js1_5/extensions/regress-330569': [SKIP],
+ 'js1_5/extensions/regress-351448': [SKIP],
# In the 64-bit version, this test takes longer to run out of memory
# than it does in the 32-bit version when attempting to generate a huge
# error message in debug mode.
- 'js1_5/extensions/regress-336410-1': [FAIL_OK, ['mode == debug and arch == x64', TIMEOUT]],
+ 'js1_5/extensions/regress-336410-1': [FAIL_OK, ['mode == debug and arch == x64', TIMEOUT, NO_VARIANTS]],
##################### DECOMPILATION TESTS #####################
@@ -816,7 +822,7 @@
}], # ALWAYS
-['arch == arm', {
+['arch == arm or arch == a64', {
# BUG(3251229): Times out when running new crankshaft test script.
'ecma_3/RegExp/regress-311414': [SKIP],
@@ -832,8 +838,14 @@
# BUG(1040): Allow this test to timeout.
- 'js1_5/GC/regress-203278-2': [PASS, TIMEOUT],
-}], # 'arch == arm'
+ 'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
+}], # 'arch == arm or arch == a64'
+
+
+['arch == a64', {
+ # BUG(v8:3152): Runs out of stack in debug mode.
+ 'js1_5/extensions/regress-355497': [FAIL_OK, ['mode == debug', SKIP]],
+}], # 'arch == a64'
['arch == mipsel', {
@@ -852,6 +864,27 @@
# BUG(1040): Allow this test to timeout.
- 'js1_5/GC/regress-203278-2': [PASS, TIMEOUT],
+ 'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
}], # 'arch == mipsel'
+
+['arch == a64 and simulator_run == True', {
+
+ 'js1_5/GC/regress-203278-2': [SKIP],
+
+ # These tests time out in debug mode but pass in product mode
+ 'js1_5/Regress/regress-360969-03': [SKIP],
+ 'js1_5/Regress/regress-360969-04': [SKIP],
+ 'js1_5/Regress/regress-360969-05': [SKIP],
+ 'js1_5/Regress/regress-360969-06': [SKIP],
+ 'js1_5/extensions/regress-365527': [SKIP],
+ 'ecma/Date/15.9.5.10-2': [SKIP],
+ 'js1_5/Regress/regress-416628': [SKIP],
+ 'js1_5/extensions/regress-371636': [SKIP],
+ 'ecma_3/RegExp/regress-330684': [SKIP],
+ 'ecma_3/RegExp/regress-307456': [SKIP],
+ 'js1_5/Regress/regress-303213': [SKIP],
+ 'js1_5/extensions/regress-330569': [SKIP],
+ 'js1_5/extensions/regress-351448': [SKIP],
+ 'js1_5/extensions/regress-336410-1': [SKIP],
+}], # 'arch == a64 and simulator_run == True'
]
diff --git a/deps/v8/test/preparser/non-use-strict-hex-escape.js b/deps/v8/test/preparser/non-use-strict-hex-escape.js
index bf28923160..44db66e42f 100644
--- a/deps/v8/test/preparser/non-use-strict-hex-escape.js
+++ b/deps/v8/test/preparser/non-use-strict-hex-escape.js
@@ -32,4 +32,4 @@ function foo() {
"use\x20strict";
var x = "hello\040world";
return x;
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/preparser/non-use-strict-octal-escape.js b/deps/v8/test/preparser/non-use-strict-octal-escape.js
index 9e00742c21..55f035a7e0 100644
--- a/deps/v8/test/preparser/non-use-strict-octal-escape.js
+++ b/deps/v8/test/preparser/non-use-strict-octal-escape.js
@@ -32,4 +32,4 @@ function foo() {
"use\040strict";
var x = "hello\040world";
return x;
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/preparser/non-use-strict-uhex-escape.js b/deps/v8/test/preparser/non-use-strict-uhex-escape.js
index 5fba673163..c7df2cb97e 100644
--- a/deps/v8/test/preparser/non-use-strict-uhex-escape.js
+++ b/deps/v8/test/preparser/non-use-strict-uhex-escape.js
@@ -32,4 +32,4 @@ function foo() {
"use\u0020strict";
var x = "hello\040world";
return x;
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/preparser/nonstrict-arguments.js b/deps/v8/test/preparser/nonstrict-arguments.js
index 890f62e739..43c7e2e940 100644
--- a/deps/v8/test/preparser/nonstrict-arguments.js
+++ b/deps/v8/test/preparser/nonstrict-arguments.js
@@ -48,4 +48,3 @@ arguments -= ++arguments;
arguments *= arguments--;
function arguments(arguments) {};
try {} catch (arguments) {}
-
diff --git a/deps/v8/test/preparser/nonstrict-eval.js b/deps/v8/test/preparser/nonstrict-eval.js
index ad994ab438..db6feda3e3 100644
--- a/deps/v8/test/preparser/nonstrict-eval.js
+++ b/deps/v8/test/preparser/nonstrict-eval.js
@@ -48,4 +48,3 @@ eval -= ++eval;
eval *= eval--;
function eval(eval) {};
try {} catch (eval) {}
-
diff --git a/deps/v8/test/preparser/nonstrict-with.js b/deps/v8/test/preparser/nonstrict-with.js
index 12d05a0536..17f0c930a6 100644
--- a/deps/v8/test/preparser/nonstrict-with.js
+++ b/deps/v8/test/preparser/nonstrict-with.js
@@ -38,6 +38,3 @@ with ({x : 42}) {
}
with ({}) {}
-
-
-
diff --git a/deps/v8/test/preparser/preparser.status b/deps/v8/test/preparser/preparser.status
index 9d69988f71..babf35d5d8 100644
--- a/deps/v8/test/preparser/preparser.status
+++ b/deps/v8/test/preparser/preparser.status
@@ -25,6 +25,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# We don't parse RegExps at scanning time, so we can't fail on octal
+# escapes (we need to parse to distinguish octal escapes from valid
+# back-references).
[
[ALWAYS, {
# TODO(mstarzinger): This script parses but throws a TypeError when run.
diff --git a/deps/v8/test/preparser/strict-const.js b/deps/v8/test/preparser/strict-const.js
index 91e9e39e7c..2b9230c053 100644
--- a/deps/v8/test/preparser/strict-const.js
+++ b/deps/v8/test/preparser/strict-const.js
@@ -26,4 +26,4 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"use strict";
-const x = 42; \ No newline at end of file
+const x = 42;
diff --git a/deps/v8/test/preparser/strict-octal-indirect-regexp.js b/deps/v8/test/preparser/strict-octal-indirect-regexp.js
index 122bd3de06..ea5c5e3348 100644
--- a/deps/v8/test/preparser/strict-octal-indirect-regexp.js
+++ b/deps/v8/test/preparser/strict-octal-indirect-regexp.js
@@ -31,4 +31,4 @@ function foo() {
"use strict";
var re = RegExp("Hello\\040World");
return re;
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/preparser/strict-octal-number.js b/deps/v8/test/preparser/strict-octal-number.js
index d387d6a32e..3e991279fa 100644
--- a/deps/v8/test/preparser/strict-octal-number.js
+++ b/deps/v8/test/preparser/strict-octal-number.js
@@ -31,4 +31,4 @@ function foo() {
"use strict";
var x = 012;
return x;
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/preparser/strict-octal-regexp.js b/deps/v8/test/preparser/strict-octal-regexp.js
index fded9bf86c..b39d0b27c7 100644
--- a/deps/v8/test/preparser/strict-octal-regexp.js
+++ b/deps/v8/test/preparser/strict-octal-regexp.js
@@ -31,4 +31,4 @@ function foo() {
"use strict";
var re = /hello\040world/;
return re;
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/preparser/strict-octal-string.js b/deps/v8/test/preparser/strict-octal-string.js
index 40408e6fe6..87c0e99fb1 100644
--- a/deps/v8/test/preparser/strict-octal-string.js
+++ b/deps/v8/test/preparser/strict-octal-string.js
@@ -31,4 +31,4 @@ function foo() {
"use strict";
var x = "hello\040world";
return x;
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/preparser/strict-octal-use-strict-after.js b/deps/v8/test/preparser/strict-octal-use-strict-after.js
index 1af078a8f6..57d0f20151 100644
--- a/deps/v8/test/preparser/strict-octal-use-strict-after.js
+++ b/deps/v8/test/preparser/strict-octal-use-strict-after.js
@@ -32,4 +32,4 @@ function foo() {
"use strict";
"use\040strict";
return true;
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/preparser/strict-octal-use-strict-before.js b/deps/v8/test/preparser/strict-octal-use-strict-before.js
index 1dbb57128d..bfc380f950 100644
--- a/deps/v8/test/preparser/strict-octal-use-strict-before.js
+++ b/deps/v8/test/preparser/strict-octal-use-strict-before.js
@@ -32,4 +32,4 @@ function foo() {
"use\040strict";
"use strict";
return true;
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/preparser/strict-with.js b/deps/v8/test/preparser/strict-with.js
index a19355eec1..411fc2926c 100644
--- a/deps/v8/test/preparser/strict-with.js
+++ b/deps/v8/test/preparser/strict-with.js
@@ -30,4 +30,4 @@
function foo() {
"use strict";
with ({}) {}
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index e546266f3a..fdbdc65118 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -29,12 +29,6 @@
[ALWAYS, {
############################### BUGS ###################################
- # Sequencing of getter side effects on receiver and argument properties
- # is wrong. The receiver callback should be called before any arguments
- # are evaluated.
- # V8 Bug: http://code.google.com/p/v8/issues/detail?id=691
- '11.2.3-3_3': [FAIL],
-
'15.5.4.9_CE': [['no_i18n', SKIP]],
######################## NEEDS INVESTIGATION ###########################
@@ -73,6 +67,7 @@
# trigonometric functions are platform/compiler dependent. Furthermore, the
# expectation values by far deviates from the actual result given by an
# arbitrary-precision calculator, making those tests partly bogus.
+ 'S15.8.2.7_A7': [PASS, FAIL_OK], # Math.cos
'S15.8.2.8_A6': [PASS, FAIL_OK], # Math.exp (less precise with --fast-math)
'S15.8.2.16_A7': [PASS, FAIL_OK], # Math.sin
'S15.8.2.18_A7': [PASS, FAIL_OK], # Math.tan
@@ -104,7 +99,7 @@
'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
-['arch == arm or arch == mipsel', {
+['arch == arm or arch == mipsel or arch == a64', {
# TODO(mstarzinger): Causes stack overflow on simulators due to eager
# compilation of parenthesized function literals. Needs investigation.
@@ -117,5 +112,5 @@
'S15.1.3.2_A2.5_T1': [SKIP],
'S15.1.3.3_A2.3_T1': [SKIP],
'S15.1.3.4_A2.3_T1': [SKIP],
-}], # 'arch == arm or arch == mipsel'
+}], # 'arch == arm or arch == mipsel or arch == a64'
]
diff --git a/deps/v8/test/webkit/Object-create-expected.txt b/deps/v8/test/webkit/Object-create-expected.txt
index 519cfb4436..ff31544028 100644
--- a/deps/v8/test/webkit/Object-create-expected.txt
+++ b/deps/v8/test/webkit/Object-create-expected.txt
@@ -26,8 +26,8 @@ Test to ensure correct behaviour of Object.defineProperties
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS Object.create() threw exception TypeError: Object prototype may only be an Object or null.
-PASS Object.create('a string') threw exception TypeError: Object prototype may only be an Object or null.
+PASS Object.create() threw exception TypeError: Object prototype may only be an Object or null: undefined.
+PASS Object.create('a string') threw exception TypeError: Object prototype may only be an Object or null: a string.
PASS Object.create({}, 'a string') threw exception TypeError: Property description must be an object: a.
PASS Object.create(null, 'a string') threw exception TypeError: Property description must be an object: a.
PASS JSON.stringify(Object.create(null,{property:{value:'foo', enumerable:true}, property2:{value:'foo', enumerable:true}})) is '{"property":"foo","property2":"foo"}'
diff --git a/deps/v8/test/webkit/arguments-bad-index.js b/deps/v8/test/webkit/arguments-bad-index.js
index a4dd4d53b9..28a4fd67f5 100644
--- a/deps/v8/test/webkit/arguments-bad-index.js
+++ b/deps/v8/test/webkit/arguments-bad-index.js
@@ -34,4 +34,3 @@ shouldBe('indexArguments(1, "a")', '"a"');
shouldBe('indexArguments("1 ", "a")', 'undefined');
shouldBe('indexArguments(0xDEADBEEF)', 'undefined');
shouldBe('indexArguments(0xFFFFFFFF)', 'undefined');
-
diff --git a/deps/v8/test/webkit/array-splice-expected.txt b/deps/v8/test/webkit/array-splice-expected.txt
new file mode 100644
index 0000000000..800f2cdfe3
--- /dev/null
+++ b/deps/v8/test/webkit/array-splice-expected.txt
@@ -0,0 +1,53 @@
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This tests array.splice behavior.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+
+PASS arr is ['a','b','c','d']
+PASS arr.splice(2) is ['c','d']
+PASS arr is ['a','b']
+PASS arr.splice(0) is ['a','b']
+PASS arr is []
+PASS arr.splice() is []
+PASS arr is ['a','b','c','d']
+PASS arr.splice(undefined) is ['a','b','c','d']
+PASS arr is []
+PASS arr.splice(null) is ['a','b','c','d']
+PASS arr is []
+PASS arr.splice(100) is []
+PASS arr is ['a','b','c','d']
+PASS arr.splice(-1) is ['d']
+PASS arr is ['a','b','c']
+PASS arr.splice(2, undefined) is []
+PASS arr.splice(2, null) is []
+PASS arr.splice(2, -1) is []
+PASS arr is ['a','b','c']
+PASS arr.splice(2, 100) is ['c']
+PASS arr is ['a','b']
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/array-splice.js b/deps/v8/test/webkit/array-splice.js
new file mode 100644
index 0000000000..045e39e379
--- /dev/null
+++ b/deps/v8/test/webkit/array-splice.js
@@ -0,0 +1,61 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+description(
+"This tests array.splice behavior."
+);
+
+var arr = ['a','b','c','d'];
+shouldBe("arr", "['a','b','c','d']");
+shouldBe("arr.splice(2)", "['c','d']");
+shouldBe("arr", "['a','b']");
+shouldBe("arr.splice(0)", "['a','b']");
+shouldBe("arr", "[]")
+
+arr = ['a','b','c','d'];
+shouldBe("arr.splice()", "[]")
+shouldBe("arr", "['a','b','c','d']");
+shouldBe("arr.splice(undefined)", "['a','b','c','d']")
+shouldBe("arr", "[]");
+
+arr = ['a','b','c','d'];
+shouldBe("arr.splice(null)", "['a','b','c','d']")
+shouldBe("arr", "[]");
+
+arr = ['a','b','c','d'];
+shouldBe("arr.splice(100)", "[]")
+shouldBe("arr", "['a','b','c','d']");
+shouldBe("arr.splice(-1)", "['d']")
+shouldBe("arr", "['a','b','c']");
+
+shouldBe("arr.splice(2, undefined)", "[]")
+shouldBe("arr.splice(2, null)", "[]")
+shouldBe("arr.splice(2, -1)", "[]")
+shouldBe("arr", "['a','b','c']");
+shouldBe("arr.splice(2, 100)", "['c']")
+shouldBe("arr", "['a','b']");
+
+// Check this doesn't crash.
+try {
+ String(Array(0xFFFFFFFD).splice(0));
+} catch (e) { }
diff --git a/deps/v8/test/webkit/concat-while-having-a-bad-time.js b/deps/v8/test/webkit/concat-while-having-a-bad-time.js
index dfda1e08a0..2c29917d5b 100644
--- a/deps/v8/test/webkit/concat-while-having-a-bad-time.js
+++ b/deps/v8/test/webkit/concat-while-having-a-bad-time.js
@@ -27,5 +27,3 @@ description(
Object.defineProperty(Array.prototype, 0, { writable: false });
shouldBe("[42].concat()", "[42]");
-
-
diff --git a/deps/v8/test/webkit/dfg-abs-backwards-propagation.js b/deps/v8/test/webkit/dfg-abs-backwards-propagation.js
index 828e2d620e..1685ba08e2 100644
--- a/deps/v8/test/webkit/dfg-abs-backwards-propagation.js
+++ b/deps/v8/test/webkit/dfg-abs-backwards-propagation.js
@@ -46,4 +46,3 @@ for (var i = 0; i < 200; ++i) {
}
shouldBe("foo(" + f + ", " + array + ", 0)", expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-add-not-number.js b/deps/v8/test/webkit/dfg-add-not-number.js
index df6dcdf2ef..1d3764662e 100644
--- a/deps/v8/test/webkit/dfg-add-not-number.js
+++ b/deps/v8/test/webkit/dfg-add-not-number.js
@@ -32,4 +32,3 @@ function foo(a) {
for (var i = 0; i < 100; ++i)
shouldBe("foo(\"foo\" + i)", "NaN");
-
diff --git a/deps/v8/test/webkit/dfg-arguments-alias-escape.js b/deps/v8/test/webkit/dfg-arguments-alias-escape.js
index f7a012eebd..dce0c3dfd6 100644
--- a/deps/v8/test/webkit/dfg-arguments-alias-escape.js
+++ b/deps/v8/test/webkit/dfg-arguments-alias-escape.js
@@ -40,4 +40,3 @@ function bar(x) {
for (var i = 0; i < 200; ++i)
shouldBe("bar(42)", "1764");
-
diff --git a/deps/v8/test/webkit/dfg-arguments-alias-one-block-overwrite.js b/deps/v8/test/webkit/dfg-arguments-alias-one-block-overwrite.js
index e94ad8b011..33154e99ff 100644
--- a/deps/v8/test/webkit/dfg-arguments-alias-one-block-overwrite.js
+++ b/deps/v8/test/webkit/dfg-arguments-alias-one-block-overwrite.js
@@ -42,4 +42,3 @@ for (var i = 0; i < 300; ++i) {
var c = i + 3;
shouldBe("foo(a, b, c)", "6");
}
-
diff --git a/deps/v8/test/webkit/dfg-arguments-alias.js b/deps/v8/test/webkit/dfg-arguments-alias.js
index c30fe89eb7..6fc7195e8c 100644
--- a/deps/v8/test/webkit/dfg-arguments-alias.js
+++ b/deps/v8/test/webkit/dfg-arguments-alias.js
@@ -39,4 +39,3 @@ function bar(x) {
for (var i = 0; i < 200; ++i)
shouldBe("bar(42)", "42");
-
diff --git a/deps/v8/test/webkit/dfg-arguments-mixed-alias.js b/deps/v8/test/webkit/dfg-arguments-mixed-alias.js
index 477eac3d5f..7d52ffb757 100644
--- a/deps/v8/test/webkit/dfg-arguments-mixed-alias.js
+++ b/deps/v8/test/webkit/dfg-arguments-mixed-alias.js
@@ -39,4 +39,3 @@ function bar(x) {
for (var i = 0; i < 200; ++i)
shouldBe("bar(42)", "42");
-
diff --git a/deps/v8/test/webkit/dfg-arguments-out-of-bounds.js b/deps/v8/test/webkit/dfg-arguments-out-of-bounds.js
index 8774d2cc09..e6e9a017ef 100644
--- a/deps/v8/test/webkit/dfg-arguments-out-of-bounds.js
+++ b/deps/v8/test/webkit/dfg-arguments-out-of-bounds.js
@@ -49,4 +49,3 @@ for (var i = 0; i < 3000; ++i) {
result = "" +foo.apply(void 0, args);
shouldBe("result", expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-arith-add-overflow-check-elimination-predicted-but-not-proven-int.js b/deps/v8/test/webkit/dfg-arith-add-overflow-check-elimination-predicted-but-not-proven-int.js
index 54c049f1c2..ecd2ed2ffa 100644
--- a/deps/v8/test/webkit/dfg-arith-add-overflow-check-elimination-predicted-but-not-proven-int.js
+++ b/deps/v8/test/webkit/dfg-arith-add-overflow-check-elimination-predicted-but-not-proven-int.js
@@ -64,4 +64,3 @@ for (var i = 0; i < warmup + badCases.length; ++i) {
}
shouldBe("bar(" + a + ", " + b + ", {f:" + c + "})", "" + expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-arith-add-overflow-check-elimination-tower-of-large-numbers.js b/deps/v8/test/webkit/dfg-arith-add-overflow-check-elimination-tower-of-large-numbers.js
index 292926eaba..718fa5ee5d 100644
--- a/deps/v8/test/webkit/dfg-arith-add-overflow-check-elimination-tower-of-large-numbers.js
+++ b/deps/v8/test/webkit/dfg-arith-add-overflow-check-elimination-tower-of-large-numbers.js
@@ -57,4 +57,3 @@ for (var i = 0; i < warmup + 1; ++i) {
}
shouldBe("bar(" + a + ", " + b + ")", "" + expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-array-dead.js b/deps/v8/test/webkit/dfg-array-dead.js
index a6276178f3..c40a37c119 100644
--- a/deps/v8/test/webkit/dfg-array-dead.js
+++ b/deps/v8/test/webkit/dfg-array-dead.js
@@ -49,4 +49,3 @@ function bar(x) {
for (var i = 0; i < 1000; ++i) {
shouldBe("foo()", "2");
}
-
diff --git a/deps/v8/test/webkit/dfg-array-length-dead.js b/deps/v8/test/webkit/dfg-array-length-dead.js
index 5256fe1eba..9191524edf 100644
--- a/deps/v8/test/webkit/dfg-array-length-dead.js
+++ b/deps/v8/test/webkit/dfg-array-length-dead.js
@@ -33,4 +33,3 @@ function foo(x) {
for (var i = 0; i < 1000; ++i) {
shouldBe("foo({f:[]})", "42");
}
-
diff --git a/deps/v8/test/webkit/dfg-branch-logical-not-peephole-around-osr-exit.js b/deps/v8/test/webkit/dfg-branch-logical-not-peephole-around-osr-exit.js
index ab0215e7b5..9e92c24e8c 100644
--- a/deps/v8/test/webkit/dfg-branch-logical-not-peephole-around-osr-exit.js
+++ b/deps/v8/test/webkit/dfg-branch-logical-not-peephole-around-osr-exit.js
@@ -39,4 +39,3 @@ for (var i = 0; i < 200; ++i) {
var expected = i + (i == 199 ? 2147483647 : 5) + (i !== 5 ? 1 : -1)
shouldBe("foo(i, 5, i == 199 ? 2147483647 : 5)", "" + expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-branch-not-fail.js b/deps/v8/test/webkit/dfg-branch-not-fail.js
index af7ba97052..31de5afc47 100644
--- a/deps/v8/test/webkit/dfg-branch-not-fail.js
+++ b/deps/v8/test/webkit/dfg-branch-not-fail.js
@@ -51,4 +51,3 @@ for (var i = 0; i < 200; ++i) {
shouldBe("bar({f:True})", "0");
shouldBe("bar({f:False})", "1");
}
-
diff --git a/deps/v8/test/webkit/dfg-call-function-hit-watchpoint.js b/deps/v8/test/webkit/dfg-call-function-hit-watchpoint.js
index 5a332c19f8..6ee1f687cd 100644
--- a/deps/v8/test/webkit/dfg-call-function-hit-watchpoint.js
+++ b/deps/v8/test/webkit/dfg-call-function-hit-watchpoint.js
@@ -43,4 +43,3 @@ for (var i = 0; i < 200; ++i) {
expected = -1;
shouldBe("bar(i, i + 1)", "" + expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-call-method-hit-watchpoint.js b/deps/v8/test/webkit/dfg-call-method-hit-watchpoint.js
index 382f899fee..8c486c786e 100644
--- a/deps/v8/test/webkit/dfg-call-method-hit-watchpoint.js
+++ b/deps/v8/test/webkit/dfg-call-method-hit-watchpoint.js
@@ -50,4 +50,3 @@ for (var i = 0; i < 200; ++i) {
expected = 56;
shouldBe("callFoo(o)", "" + expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-cfa-prove-put-by-id-simple-when-storing-to-specialized-function.js b/deps/v8/test/webkit/dfg-cfa-prove-put-by-id-simple-when-storing-to-specialized-function.js
index 6953ffa466..870b73d52a 100644
--- a/deps/v8/test/webkit/dfg-cfa-prove-put-by-id-simple-when-storing-to-specialized-function.js
+++ b/deps/v8/test/webkit/dfg-cfa-prove-put-by-id-simple-when-storing-to-specialized-function.js
@@ -52,4 +52,3 @@ function fuzz() {
}
shouldBe("bar.call({f:baz}, fuzz)", "\"fuzz\"");
-
diff --git a/deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-branch-not-null-and-decrement.js b/deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-branch-not-null-and-decrement.js
index cac2d10241..aea028da7e 100644
--- a/deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-branch-not-null-and-decrement.js
+++ b/deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-branch-not-null-and-decrement.js
@@ -51,5 +51,3 @@ for (var i = 0; i < 500; ++i) {
}
shouldBe("foo(o)", expected);
}
-
-
diff --git a/deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-branch-not-null.js b/deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-branch-not-null.js
index 7226424426..f2312bd962 100644
--- a/deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-branch-not-null.js
+++ b/deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-branch-not-null.js
@@ -49,4 +49,3 @@ for (var i = 0; i < 500; ++i) {
}
shouldBe("foo(o)", expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-typeof.js b/deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-typeof.js
index bb4fee9261..c422d31c86 100644
--- a/deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-typeof.js
+++ b/deps/v8/test/webkit/dfg-cfg-simplify-eliminate-set-local-type-check-then-typeof.js
@@ -51,4 +51,3 @@ for (var i = 0; i < 500; ++i) {
shouldBe("result[0]", expectedFirst);
shouldBe("result[1]", expectedSecond);
}
-
diff --git a/deps/v8/test/webkit/dfg-cfg-simplify-phantom-get-local-on-same-block-set-local.js b/deps/v8/test/webkit/dfg-cfg-simplify-phantom-get-local-on-same-block-set-local.js
index 65be965ccf..73daacd62a 100644
--- a/deps/v8/test/webkit/dfg-cfg-simplify-phantom-get-local-on-same-block-set-local.js
+++ b/deps/v8/test/webkit/dfg-cfg-simplify-phantom-get-local-on-same-block-set-local.js
@@ -60,4 +60,3 @@ function thingy(o) {
for (var i = 0; i < 200; ++i)
shouldBe("thingy(o)", "42");
-
diff --git a/deps/v8/test/webkit/dfg-check-structure-elimination-for-non-cell-expected.txt b/deps/v8/test/webkit/dfg-check-structure-elimination-for-non-cell-expected.txt
index 63b6ba2bc3..5a0941dd39 100644
--- a/deps/v8/test/webkit/dfg-check-structure-elimination-for-non-cell-expected.txt
+++ b/deps/v8/test/webkit/dfg-check-structure-elimination-for-non-cell-expected.txt
@@ -216,7 +216,7 @@ PASS baz(i) is 66
PASS baz(i) is 66
PASS baz(i) is 66
PASS baz(i) is 66
-Caught exception: TypeError: Cannot call method 'g' of null
+Caught exception: TypeError: Cannot read property 'g' of null
PASS baz(i) is "ERROR"
PASS baz(i) is 66
PASS baz(i) is 66
diff --git a/deps/v8/test/webkit/dfg-compare-final-object-to-final-object-or-other-when-both-proven-final-object.js b/deps/v8/test/webkit/dfg-compare-final-object-to-final-object-or-other-when-both-proven-final-object.js
index 8b986a173d..491dda159e 100644
--- a/deps/v8/test/webkit/dfg-compare-final-object-to-final-object-or-other-when-both-proven-final-object.js
+++ b/deps/v8/test/webkit/dfg-compare-final-object-to-final-object-or-other-when-both-proven-final-object.js
@@ -36,4 +36,3 @@ for (var i = 0; i < 100; ++i) {
} else
shouldThrow("foo({f:42}, null)");
}
-
diff --git a/deps/v8/test/webkit/dfg-compare-final-object-to-final-object-or-other-when-proven-final-object.js b/deps/v8/test/webkit/dfg-compare-final-object-to-final-object-or-other-when-proven-final-object.js
index f49eed9e80..4b1694ba94 100644
--- a/deps/v8/test/webkit/dfg-compare-final-object-to-final-object-or-other-when-proven-final-object.js
+++ b/deps/v8/test/webkit/dfg-compare-final-object-to-final-object-or-other-when-proven-final-object.js
@@ -36,4 +36,3 @@ for (var i = 0; i < 100; ++i) {
} else
shouldThrow("foo({f:42}, null)");
}
-
diff --git a/deps/v8/test/webkit/dfg-constant-fold-first-local-read-after-block-merge.js b/deps/v8/test/webkit/dfg-constant-fold-first-local-read-after-block-merge.js
index 396924240d..85ae201b85 100644
--- a/deps/v8/test/webkit/dfg-constant-fold-first-local-read-after-block-merge.js
+++ b/deps/v8/test/webkit/dfg-constant-fold-first-local-read-after-block-merge.js
@@ -44,4 +44,3 @@ function bar(x) {
for (var i = 0; i < 200; ++i)
shouldBe("bar(5)", "42");
-
diff --git a/deps/v8/test/webkit/dfg-constant-fold-logical-not-branch.js b/deps/v8/test/webkit/dfg-constant-fold-logical-not-branch.js
index 6a91b14cba..bb48f63f21 100644
--- a/deps/v8/test/webkit/dfg-constant-fold-logical-not-branch.js
+++ b/deps/v8/test/webkit/dfg-constant-fold-logical-not-branch.js
@@ -51,5 +51,3 @@ for (var i = 0; i < 1000; ++i) {
shouldBe("foo1(new Stuff())", "43");
shouldBe("foo2(new Stuff())", "58");
}
-
-
diff --git a/deps/v8/test/webkit/dfg-constant-fold-misprediction.js b/deps/v8/test/webkit/dfg-constant-fold-misprediction.js
index cd73f705a2..9760e4f56a 100644
--- a/deps/v8/test/webkit/dfg-constant-fold-misprediction.js
+++ b/deps/v8/test/webkit/dfg-constant-fold-misprediction.js
@@ -56,5 +56,3 @@ function foo(x) {
// Call foo() enough times to make totally sure that we optimize.
for (var i = 0; i < 5; ++i)
shouldBe("foo(0.5)", "1000.50025");
-
-
diff --git a/deps/v8/test/webkit/dfg-cse-cfa-discrepancy.js b/deps/v8/test/webkit/dfg-cse-cfa-discrepancy.js
index a40e7e6931..561776542e 100644
--- a/deps/v8/test/webkit/dfg-cse-cfa-discrepancy.js
+++ b/deps/v8/test/webkit/dfg-cse-cfa-discrepancy.js
@@ -51,4 +51,3 @@ function foo(a, b) {
for (var i = 0; i < 200; ++i)
shouldBe("foo(i, i + 1)", "162");
-
diff --git a/deps/v8/test/webkit/dfg-cse-dead-get-scoped-var.js b/deps/v8/test/webkit/dfg-cse-dead-get-scoped-var.js
index aec600b6f9..de181913c0 100644
--- a/deps/v8/test/webkit/dfg-cse-dead-get-scoped-var.js
+++ b/deps/v8/test/webkit/dfg-cse-dead-get-scoped-var.js
@@ -38,4 +38,3 @@ function foo(a) {
for (var i = 0; i < 1000; ++i)
shouldBe("foo(i)(false)", "42");
-
diff --git a/deps/v8/test/webkit/dfg-dead-min-one-arg.js b/deps/v8/test/webkit/dfg-dead-min-one-arg.js
index 48293be9d8..3195f5b3ce 100644
--- a/deps/v8/test/webkit/dfg-dead-min-one-arg.js
+++ b/deps/v8/test/webkit/dfg-dead-min-one-arg.js
@@ -44,4 +44,3 @@ for (var i = 0; i < 200; ++i) {
shouldBe("ok", "" + i);
shouldBe("result", "" + expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-dead-min-two-args.js b/deps/v8/test/webkit/dfg-dead-min-two-args.js
index f723f43b3a..d5b2a50aaa 100644
--- a/deps/v8/test/webkit/dfg-dead-min-two-args.js
+++ b/deps/v8/test/webkit/dfg-dead-min-two-args.js
@@ -67,4 +67,3 @@ for (var i = 0; i < 200; ++i) {
shouldBe("ok", "" + i);
shouldBe("result", "100");
}
-
diff --git a/deps/v8/test/webkit/dfg-dead-speculation.js b/deps/v8/test/webkit/dfg-dead-speculation.js
index 7778f5a48e..71cdd0f7c0 100644
--- a/deps/v8/test/webkit/dfg-dead-speculation.js
+++ b/deps/v8/test/webkit/dfg-dead-speculation.js
@@ -42,4 +42,3 @@ for (var i = 0; i < 200; ++i) {
shouldBe("foo(thingy, i)", "10");
shouldBe("variable", "" + expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-dead-variable-on-exit.js b/deps/v8/test/webkit/dfg-dead-variable-on-exit.js
index 0f9f48c8cd..bc4c9ea793 100644
--- a/deps/v8/test/webkit/dfg-dead-variable-on-exit.js
+++ b/deps/v8/test/webkit/dfg-dead-variable-on-exit.js
@@ -84,4 +84,3 @@ for (var i = 0; i < 300; ++i) {
code += "); foo(firstArg, secondArg)";
shouldBe(code, "1");
}
-
diff --git a/deps/v8/test/webkit/dfg-double-use-of-post-simplification-double-prediction.js b/deps/v8/test/webkit/dfg-double-use-of-post-simplification-double-prediction.js
index cf385791c0..2c11eb154e 100644
--- a/deps/v8/test/webkit/dfg-double-use-of-post-simplification-double-prediction.js
+++ b/deps/v8/test/webkit/dfg-double-use-of-post-simplification-double-prediction.js
@@ -37,4 +37,3 @@ function foo(a) {
for (var i = 0; i < 200; ++i)
shouldBe("foo(0.5)", "42.5");
-
diff --git a/deps/v8/test/webkit/dfg-double-vote-fuzz.js b/deps/v8/test/webkit/dfg-double-vote-fuzz.js
index 266bc155bb..722341a142 100644
--- a/deps/v8/test/webkit/dfg-double-vote-fuzz.js
+++ b/deps/v8/test/webkit/dfg-double-vote-fuzz.js
@@ -87,5 +87,3 @@ for (var i = 0; i < 256; ++i) {
}
shouldBe("grandResult", "14578304");
-
-
diff --git a/deps/v8/test/webkit/dfg-ensure-non-array-array-storage-on-window.js b/deps/v8/test/webkit/dfg-ensure-non-array-array-storage-on-window.js
index 3ae47c5dc7..11e9fbdafc 100644
--- a/deps/v8/test/webkit/dfg-ensure-non-array-array-storage-on-window.js
+++ b/deps/v8/test/webkit/dfg-ensure-non-array-array-storage-on-window.js
@@ -85,4 +85,3 @@ shouldBe("thingy", "42");
shouldBe("foo(w)", "1");
w.length = 2;
shouldBe("foo(w)", "0/0");
-
diff --git a/deps/v8/test/webkit/dfg-get-by-val-clobber.js b/deps/v8/test/webkit/dfg-get-by-val-clobber.js
index c6c57fe3de..19d0091c2c 100644
--- a/deps/v8/test/webkit/dfg-get-by-val-clobber.js
+++ b/deps/v8/test/webkit/dfg-get-by-val-clobber.js
@@ -49,4 +49,3 @@ for (var i = 0; i < 1000; ++i) {
// Undo.
array1[0] = 1;
}
-
diff --git a/deps/v8/test/webkit/dfg-getter-throw.js b/deps/v8/test/webkit/dfg-getter-throw.js
index c77b33d060..2632242bd3 100644
--- a/deps/v8/test/webkit/dfg-getter-throw.js
+++ b/deps/v8/test/webkit/dfg-getter-throw.js
@@ -47,5 +47,3 @@ for (var i = 0; i < 200; ++i) {
});
shouldBe("bar(o)", i < 100 ? "\"Returned result: " + i + "\"" : "\"Threw exception: Oh hi, I'm an exception!\"");
}
-
-
diff --git a/deps/v8/test/webkit/dfg-getter.js b/deps/v8/test/webkit/dfg-getter.js
index dafd9f9c8c..d0f566444e 100644
--- a/deps/v8/test/webkit/dfg-getter.js
+++ b/deps/v8/test/webkit/dfg-getter.js
@@ -34,5 +34,3 @@ for (var i = 0; i < 200; ++i) {
o.__defineGetter__("f", function(){ return i; });
shouldBe("foo(o)", "" + i);
}
-
-
diff --git a/deps/v8/test/webkit/dfg-holy-put-by-val-interferes-with-get-array-length.js b/deps/v8/test/webkit/dfg-holy-put-by-val-interferes-with-get-array-length.js
index 684ebe8194..4b83711a48 100644
--- a/deps/v8/test/webkit/dfg-holy-put-by-val-interferes-with-get-array-length.js
+++ b/deps/v8/test/webkit/dfg-holy-put-by-val-interferes-with-get-array-length.js
@@ -33,5 +33,3 @@ function foo(array) {
for (var i = 0; i < 100; ++i)
shouldBe("foo([75])", "[1,2]");
-
-
diff --git a/deps/v8/test/webkit/dfg-inline-arguments-osr-exit-and-capture.js b/deps/v8/test/webkit/dfg-inline-arguments-osr-exit-and-capture.js
index 82a49faa68..4d254d8e7d 100644
--- a/deps/v8/test/webkit/dfg-inline-arguments-osr-exit-and-capture.js
+++ b/deps/v8/test/webkit/dfg-inline-arguments-osr-exit-and-capture.js
@@ -53,4 +53,3 @@ for (var i = 0; i < 300; ++i) {
}
shouldBe("baz(arg1, arg2)", "" + expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-inline-arguments-out-of-bounds.js b/deps/v8/test/webkit/dfg-inline-arguments-out-of-bounds.js
index 23b9184cac..9cd6ec0f59 100644
--- a/deps/v8/test/webkit/dfg-inline-arguments-out-of-bounds.js
+++ b/deps/v8/test/webkit/dfg-inline-arguments-out-of-bounds.js
@@ -50,4 +50,3 @@ for (var i = 0; i < 3000; ++i) {
result = "" + bar();
shouldBe("result", expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-inline-arguments-use-directly-from-inlined-code.js b/deps/v8/test/webkit/dfg-inline-arguments-use-directly-from-inlined-code.js
index a1c5ca2576..95a98bc3cb 100644
--- a/deps/v8/test/webkit/dfg-inline-arguments-use-directly-from-inlined-code.js
+++ b/deps/v8/test/webkit/dfg-inline-arguments-use-directly-from-inlined-code.js
@@ -45,4 +45,3 @@ function argsToStr(args) {
for (var __i = 0; __i < 200; ++__i)
shouldBe("argsToStr(bar(\"a\" + __i, \"b\" + __i, \"c\" + __i))", "\"[object Arguments]: a" + __i + ", b" + __i + ", c" + __i + "\"");
-
diff --git a/deps/v8/test/webkit/dfg-inline-arguments-use-from-all-the-places-broken.js b/deps/v8/test/webkit/dfg-inline-arguments-use-from-all-the-places-broken.js
index b4fa6c1883..cc22ec4e4d 100644
--- a/deps/v8/test/webkit/dfg-inline-arguments-use-from-all-the-places-broken.js
+++ b/deps/v8/test/webkit/dfg-inline-arguments-use-from-all-the-places-broken.js
@@ -58,4 +58,3 @@ function argsToStr(args) {
for (var __i = 0; __i < 200; ++__i)
shouldThrow("argsToStr(bar(\"a\" + __i, \"b\" + __i, \"c\" + __i))");
-
diff --git a/deps/v8/test/webkit/dfg-inline-arguments-use-from-all-the-places.js b/deps/v8/test/webkit/dfg-inline-arguments-use-from-all-the-places.js
index 4470d9cc66..fce5430d07 100644
--- a/deps/v8/test/webkit/dfg-inline-arguments-use-from-all-the-places.js
+++ b/deps/v8/test/webkit/dfg-inline-arguments-use-from-all-the-places.js
@@ -62,4 +62,3 @@ for (var __i = 0; __i < 200; ++__i) {
var text3 = "[[object Arguments]: 42, 56]";
shouldBe("argsToStr(bar(o, \"b\" + __i, \"c\" + __i))", "\"[[object Arguments],[object Arguments],[object Arguments],[object Arguments],[object Arguments],[object Arguments],[object Arguments],[object Arguments],[object Arguments],[object Arguments]: " + text1 + ", " + text1 + ", " + text1 + ", " + text1 + ", " + text1 + ", " + text2 + ", " + text1 + ", " + text1 + ", " + text2 + ", " + text3 + "]\"");
}
-
diff --git a/deps/v8/test/webkit/dfg-inline-arguments-use-from-getter.js b/deps/v8/test/webkit/dfg-inline-arguments-use-from-getter.js
index f532d35ce8..bece496020 100644
--- a/deps/v8/test/webkit/dfg-inline-arguments-use-from-getter.js
+++ b/deps/v8/test/webkit/dfg-inline-arguments-use-from-getter.js
@@ -48,4 +48,3 @@ function argsToStr(args) {
for (var __i = 0; __i < 200; ++__i)
shouldBe("argsToStr(bar(o, \"b\" + __i, \"c\" + __i))", "\"[object Arguments]: [object Object], b" + __i + ", c" + __i + "\"");
-
diff --git a/deps/v8/test/webkit/dfg-inline-function-dot-caller.js b/deps/v8/test/webkit/dfg-inline-function-dot-caller.js
index c6dd2b249f..dbbefb8ae2 100644
--- a/deps/v8/test/webkit/dfg-inline-function-dot-caller.js
+++ b/deps/v8/test/webkit/dfg-inline-function-dot-caller.js
@@ -66,4 +66,3 @@ shouldBe("resultArray[3]", "\"g\"");
shouldBe("resultArray[2]", "\"makeInlinableCall\"");
shouldBe("resultArray[1]", "\"inlineable\"");
shouldBe("resultArray[0]", "\"nonInlineable\"");
-
diff --git a/deps/v8/test/webkit/dfg-inline-unused-this-method-check.js b/deps/v8/test/webkit/dfg-inline-unused-this-method-check.js
index 929592eef9..13ad448b9e 100644
--- a/deps/v8/test/webkit/dfg-inline-unused-this-method-check.js
+++ b/deps/v8/test/webkit/dfg-inline-unused-this-method-check.js
@@ -51,4 +51,3 @@ for (var i = 0; i < 1000; ++i) {
shouldBe("baz(object, " + i + ", " + (i * 2) + ")", "" + (offset + i + i * 2));
}
-
diff --git a/deps/v8/test/webkit/dfg-inline-unused-this.js b/deps/v8/test/webkit/dfg-inline-unused-this.js
index 5983d72444..826008326b 100644
--- a/deps/v8/test/webkit/dfg-inline-unused-this.js
+++ b/deps/v8/test/webkit/dfg-inline-unused-this.js
@@ -54,4 +54,3 @@ for (var i = 0; i < 1000; ++i) {
shouldBe("baz(object, " + i + ", " + (i * 2) + ")", "" + (offset + i + i * 2));
}
-
diff --git a/deps/v8/test/webkit/dfg-int-overflow-in-loop.js b/deps/v8/test/webkit/dfg-int-overflow-in-loop.js
index 8752a12cbd..07b8e3442a 100644
--- a/deps/v8/test/webkit/dfg-int-overflow-in-loop.js
+++ b/deps/v8/test/webkit/dfg-int-overflow-in-loop.js
@@ -37,4 +37,3 @@ function foo(a) {
}
shouldBe("foo(0)", "-4094336");
-
diff --git a/deps/v8/test/webkit/dfg-int-overflow-large-constants-in-a-line.js b/deps/v8/test/webkit/dfg-int-overflow-large-constants-in-a-line.js
index 4e2f970dd4..dc6ff09fa2 100644
--- a/deps/v8/test/webkit/dfg-int-overflow-large-constants-in-a-line.js
+++ b/deps/v8/test/webkit/dfg-int-overflow-large-constants-in-a-line.js
@@ -72,5 +72,3 @@ function foo(a) {
for (var i = 0; i < 200; ++i)
shouldBe("foo(2147483647)", "2147483552");
-
-
diff --git a/deps/v8/test/webkit/dfg-integer-optimization.js b/deps/v8/test/webkit/dfg-integer-optimization.js
index 31ec8fce4f..0041aec8d1 100644
--- a/deps/v8/test/webkit/dfg-integer-optimization.js
+++ b/deps/v8/test/webkit/dfg-integer-optimization.js
@@ -43,4 +43,3 @@ for (var i = 0; i < 1000; ++i) {
shouldBe("result.a", "" + eval("((" + a + " + " + b + ") | 0)"))
shouldBe("result.b", "" + eval(a + " + " + b))
}
-
diff --git a/deps/v8/test/webkit/dfg-intrinsic-side-effect-assignment-osr-exit.js b/deps/v8/test/webkit/dfg-intrinsic-side-effect-assignment-osr-exit.js
index 4a32738159..f8316af724 100644
--- a/deps/v8/test/webkit/dfg-intrinsic-side-effect-assignment-osr-exit.js
+++ b/deps/v8/test/webkit/dfg-intrinsic-side-effect-assignment-osr-exit.js
@@ -46,4 +46,3 @@ for (var i = 0; i < 500; ++i) {
}
shouldBe("foo(array)", expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-intrinsic-unused-this-method-check.js b/deps/v8/test/webkit/dfg-intrinsic-unused-this-method-check.js
index 4424400fb2..445cb0b34e 100644
--- a/deps/v8/test/webkit/dfg-intrinsic-unused-this-method-check.js
+++ b/deps/v8/test/webkit/dfg-intrinsic-unused-this-method-check.js
@@ -47,4 +47,3 @@ for (var i = 0; i < 1000; ++i) {
shouldBe("baz(object, " + i + ", " + (i * 2) + ")", "" + (offset + Math.max(i, i * 2)));
}
-
diff --git a/deps/v8/test/webkit/dfg-intrinsic-unused-this.js b/deps/v8/test/webkit/dfg-intrinsic-unused-this.js
index f7d3aa5b27..fa2086873b 100644
--- a/deps/v8/test/webkit/dfg-intrinsic-unused-this.js
+++ b/deps/v8/test/webkit/dfg-intrinsic-unused-this.js
@@ -50,4 +50,3 @@ for (var i = 0; i < 1000; ++i) {
shouldBe("baz(object, " + i + ", " + (i * 2) + ")", "" + (offset + Math.max(i, i * 2)));
}
-
diff --git a/deps/v8/test/webkit/dfg-max-backwards-propagation.js b/deps/v8/test/webkit/dfg-max-backwards-propagation.js
index b4041e1f63..0fd18fbb1c 100644
--- a/deps/v8/test/webkit/dfg-max-backwards-propagation.js
+++ b/deps/v8/test/webkit/dfg-max-backwards-propagation.js
@@ -46,4 +46,3 @@ for (var i = 0; i < 200; ++i) {
}
shouldBe("foo(" + f + ", " + array + ", 0)", expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-min-backwards-propagation.js b/deps/v8/test/webkit/dfg-min-backwards-propagation.js
index 835aefa217..b423a8f357 100644
--- a/deps/v8/test/webkit/dfg-min-backwards-propagation.js
+++ b/deps/v8/test/webkit/dfg-min-backwards-propagation.js
@@ -46,4 +46,3 @@ for (var i = 0; i < 200; ++i) {
}
shouldBe("foo(" + f + ", " + array + ", 0)", expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-mispredict-variable-but-prove-int.js b/deps/v8/test/webkit/dfg-mispredict-variable-but-prove-int.js
index c312670317..47530c6b54 100644
--- a/deps/v8/test/webkit/dfg-mispredict-variable-but-prove-int.js
+++ b/deps/v8/test/webkit/dfg-mispredict-variable-but-prove-int.js
@@ -41,4 +41,3 @@ shouldBe("foo(0, 1, 0)", "0.5");
for (var i = 0; i < 200; ++i)
shouldBe("foo(i, i + 1, i + 2)", "1");
-
diff --git a/deps/v8/test/webkit/dfg-mul-big-integer-with-small-integer-and-bitor.js b/deps/v8/test/webkit/dfg-mul-big-integer-with-small-integer-and-bitor.js
index 51401ea87e..bdb4ecf5a6 100644
--- a/deps/v8/test/webkit/dfg-mul-big-integer-with-small-integer-and-bitor.js
+++ b/deps/v8/test/webkit/dfg-mul-big-integer-with-small-integer-and-bitor.js
@@ -31,6 +31,3 @@ function foo(a) {
for (var i = 0; i < 100; ++i)
shouldBe("foo(2147483647)", "-65536");
-
-
-
diff --git a/deps/v8/test/webkit/dfg-mul-big-integer-with-small-integer-and-detect-overflow.js b/deps/v8/test/webkit/dfg-mul-big-integer-with-small-integer-and-detect-overflow.js
index 65ed8b761c..b891794db6 100644
--- a/deps/v8/test/webkit/dfg-mul-big-integer-with-small-integer-and-detect-overflow.js
+++ b/deps/v8/test/webkit/dfg-mul-big-integer-with-small-integer-and-detect-overflow.js
@@ -31,5 +31,3 @@ function foo(a) {
for (var i = 0; i < 1000; ++i)
shouldBe("foo(2147483647)", "140737488289792");
-
-
diff --git a/deps/v8/test/webkit/dfg-mul-big-integers.js b/deps/v8/test/webkit/dfg-mul-big-integers.js
index 0176a25855..2dbd30d9f9 100644
--- a/deps/v8/test/webkit/dfg-mul-big-integers.js
+++ b/deps/v8/test/webkit/dfg-mul-big-integers.js
@@ -31,5 +31,3 @@ function foo(a, b) {
for (var i = 0; i < 100; ++i)
shouldBe("foo(2147483647, 2147483646)", "-2147483648");
-
-
diff --git a/deps/v8/test/webkit/dfg-multi-basic-block-structure-clobber.js b/deps/v8/test/webkit/dfg-multi-basic-block-structure-clobber.js
index a4d8bdb99a..741a60871f 100644
--- a/deps/v8/test/webkit/dfg-multi-basic-block-structure-clobber.js
+++ b/deps/v8/test/webkit/dfg-multi-basic-block-structure-clobber.js
@@ -53,4 +53,3 @@ for (var i = 0; i < 200; ++i) {
}
shouldBe("foo(predicate, {f:object})", "" + expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-multiply.js b/deps/v8/test/webkit/dfg-multiply.js
index 95f278fece..e52c5bd61c 100644
--- a/deps/v8/test/webkit/dfg-multiply.js
+++ b/deps/v8/test/webkit/dfg-multiply.js
@@ -65,4 +65,3 @@ for (var i = 0; i < 10; ++i) {
shouldBe("doMultiplyConstant4(-1073741824)", "-4294967296");
shouldBe("doMultiplyConstant4(-2147483648)", "-8589934592");
}
-
diff --git a/deps/v8/test/webkit/dfg-negative-array-index.js b/deps/v8/test/webkit/dfg-negative-array-index.js
index f23c7fb8a9..8627ea5ed5 100644
--- a/deps/v8/test/webkit/dfg-negative-array-index.js
+++ b/deps/v8/test/webkit/dfg-negative-array-index.js
@@ -34,4 +34,3 @@ for (var i = 0; i < 100; ++i) {
foo(array, -1);
shouldBe("array[-1]", "42");
}
-
diff --git a/deps/v8/test/webkit/dfg-patchable-get-by-id-after-watchpoint.js b/deps/v8/test/webkit/dfg-patchable-get-by-id-after-watchpoint.js
index 9866126ef0..f4d0035aac 100644
--- a/deps/v8/test/webkit/dfg-patchable-get-by-id-after-watchpoint.js
+++ b/deps/v8/test/webkit/dfg-patchable-get-by-id-after-watchpoint.js
@@ -67,4 +67,3 @@ for (var i = 0; i < 200; ++i) {
}
shouldBe("foo(o, p)", "" + expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-peephole-compare-final-object-to-final-object-or-other-when-both-proven-final-object.js b/deps/v8/test/webkit/dfg-peephole-compare-final-object-to-final-object-or-other-when-both-proven-final-object.js
index f8a67946a2..cbe4343f0a 100644
--- a/deps/v8/test/webkit/dfg-peephole-compare-final-object-to-final-object-or-other-when-both-proven-final-object.js
+++ b/deps/v8/test/webkit/dfg-peephole-compare-final-object-to-final-object-or-other-when-both-proven-final-object.js
@@ -43,4 +43,3 @@ for (var i = 0; i < 100; ++i) {
} else
shouldThrow("foo({f:42}, null)");
}
-
diff --git a/deps/v8/test/webkit/dfg-peephole-compare-final-object-to-final-object-or-other-when-proven-final-object.js b/deps/v8/test/webkit/dfg-peephole-compare-final-object-to-final-object-or-other-when-proven-final-object.js
index 5e27539b17..7fbc38619a 100644
--- a/deps/v8/test/webkit/dfg-peephole-compare-final-object-to-final-object-or-other-when-proven-final-object.js
+++ b/deps/v8/test/webkit/dfg-peephole-compare-final-object-to-final-object-or-other-when-proven-final-object.js
@@ -42,4 +42,3 @@ for (var i = 0; i < 100; ++i) {
} else
shouldThrow("foo({f:42}, null)");
}
-
diff --git a/deps/v8/test/webkit/dfg-phantom-base.js b/deps/v8/test/webkit/dfg-phantom-base.js
index f881b899d1..9b32195754 100644
--- a/deps/v8/test/webkit/dfg-phantom-base.js
+++ b/deps/v8/test/webkit/dfg-phantom-base.js
@@ -39,4 +39,3 @@ function foo(o) {
for (var i = 0; i < 200; ++i)
shouldBe("foo(i < 190 ? {f:42, g:{h:3}} : {f:42, g:{}})", "false");
-
diff --git a/deps/v8/test/webkit/dfg-phantom-get-local.js b/deps/v8/test/webkit/dfg-phantom-get-local.js
index d813436d28..c29dcf6931 100644
--- a/deps/v8/test/webkit/dfg-phantom-get-local.js
+++ b/deps/v8/test/webkit/dfg-phantom-get-local.js
@@ -39,4 +39,3 @@ function foo(o) {
for (var i = 0; i < 200; ++i)
shouldBe("foo(i < 190 ? {f:42, g:{h:3}} : {f:42, g:{}})", "false");
-
diff --git a/deps/v8/test/webkit/dfg-proto-access-inline-osr-exit.js b/deps/v8/test/webkit/dfg-proto-access-inline-osr-exit.js
index 35299806da..19cd657819 100644
--- a/deps/v8/test/webkit/dfg-proto-access-inline-osr-exit.js
+++ b/deps/v8/test/webkit/dfg-proto-access-inline-osr-exit.js
@@ -41,4 +41,3 @@ for (var i = 0; i < 200; ++i) {
myProto.g = 67;
shouldBe("foo({g:new Thingy()})", "42");
}
-
diff --git a/deps/v8/test/webkit/dfg-proven-sqrt-backwards-propagation.js b/deps/v8/test/webkit/dfg-proven-sqrt-backwards-propagation.js
index 5bc66d945d..bc2eef16b7 100644
--- a/deps/v8/test/webkit/dfg-proven-sqrt-backwards-propagation.js
+++ b/deps/v8/test/webkit/dfg-proven-sqrt-backwards-propagation.js
@@ -47,4 +47,3 @@ for (var i = 0; i < 200; ++i) {
}
shouldBe("foo(array, 0)", expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-put-by-id-allocate-storage-polymorphic.js b/deps/v8/test/webkit/dfg-put-by-id-allocate-storage-polymorphic.js
index 5a8e2c05a0..710acb7b41 100644
--- a/deps/v8/test/webkit/dfg-put-by-id-allocate-storage-polymorphic.js
+++ b/deps/v8/test/webkit/dfg-put-by-id-allocate-storage-polymorphic.js
@@ -54,5 +54,3 @@ for (var i = 0; i < 150; ++i) {
else
shouldBe("o.foo", "void 0");
}
-
-
diff --git a/deps/v8/test/webkit/dfg-put-by-id-allocate-storage.js b/deps/v8/test/webkit/dfg-put-by-id-allocate-storage.js
index 4bd9d45363..e803f935f7 100644
--- a/deps/v8/test/webkit/dfg-put-by-id-allocate-storage.js
+++ b/deps/v8/test/webkit/dfg-put-by-id-allocate-storage.js
@@ -47,5 +47,3 @@ for (var i = 0; i < 150; ++i) {
shouldBe("o.f", "6");
shouldBe("o.g", "7");
}
-
-
diff --git a/deps/v8/test/webkit/dfg-put-by-id-prototype-check.js b/deps/v8/test/webkit/dfg-put-by-id-prototype-check.js
index a8b61374ef..035bcb0f21 100644
--- a/deps/v8/test/webkit/dfg-put-by-id-prototype-check.js
+++ b/deps/v8/test/webkit/dfg-put-by-id-prototype-check.js
@@ -49,4 +49,3 @@ for (var i = 0; i < 1000; ++i) {
} else
shouldBe("o.f", "" + i);
}
-
diff --git a/deps/v8/test/webkit/dfg-put-by-id-reallocate-storage-polymorphic.js b/deps/v8/test/webkit/dfg-put-by-id-reallocate-storage-polymorphic.js
index c3fe0fd1e3..5498bb4601 100644
--- a/deps/v8/test/webkit/dfg-put-by-id-reallocate-storage-polymorphic.js
+++ b/deps/v8/test/webkit/dfg-put-by-id-reallocate-storage-polymorphic.js
@@ -68,5 +68,3 @@ for (var i = 0; i < 150; ++i) {
else
shouldBe("o.foo", "void 0");
}
-
-
diff --git a/deps/v8/test/webkit/dfg-put-by-id-reallocate-storage.js b/deps/v8/test/webkit/dfg-put-by-id-reallocate-storage.js
index e6c19eeeff..50948a2e47 100644
--- a/deps/v8/test/webkit/dfg-put-by-id-reallocate-storage.js
+++ b/deps/v8/test/webkit/dfg-put-by-id-reallocate-storage.js
@@ -61,5 +61,3 @@ for (var i = 0; i < 150; ++i) {
shouldBe("o.m", "13");
shouldBe("o.n", "14");
}
-
-
diff --git a/deps/v8/test/webkit/dfg-put-by-val-setter-then-get-by-val.js b/deps/v8/test/webkit/dfg-put-by-val-setter-then-get-by-val.js
index 40d79e03aa..930fbe5cca 100644
--- a/deps/v8/test/webkit/dfg-put-by-val-setter-then-get-by-val.js
+++ b/deps/v8/test/webkit/dfg-put-by-val-setter-then-get-by-val.js
@@ -43,4 +43,3 @@ for (var i = 0; i < 200; ++i) {
shouldBe("foo(array, -1, i)", "42");
shouldBe("thingy", "" + i);
}
-
diff --git a/deps/v8/test/webkit/dfg-put-scoped-var-backward-flow.js b/deps/v8/test/webkit/dfg-put-scoped-var-backward-flow.js
index 9f3bfd36b9..32df63b01c 100644
--- a/deps/v8/test/webkit/dfg-put-scoped-var-backward-flow.js
+++ b/deps/v8/test/webkit/dfg-put-scoped-var-backward-flow.js
@@ -35,4 +35,3 @@ function sum(nums) {
for (var i = 0; i < 200; ++i)
shouldBe("sum([2147483646, 2147483644])", "4294967290");
-
diff --git a/deps/v8/test/webkit/dfg-sqrt-backwards-propagation.js b/deps/v8/test/webkit/dfg-sqrt-backwards-propagation.js
index 9954e3a5bf..aec4b4bb67 100644
--- a/deps/v8/test/webkit/dfg-sqrt-backwards-propagation.js
+++ b/deps/v8/test/webkit/dfg-sqrt-backwards-propagation.js
@@ -46,4 +46,3 @@ for (var i = 0; i < 200; ++i) {
}
shouldBe("foo(" + f + ", " + array + ", 0)", expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-store-unexpected-value-into-argument-and-osr-exit.js b/deps/v8/test/webkit/dfg-store-unexpected-value-into-argument-and-osr-exit.js
index 1006c2840a..77a02c326a 100644
--- a/deps/v8/test/webkit/dfg-store-unexpected-value-into-argument-and-osr-exit.js
+++ b/deps/v8/test/webkit/dfg-store-unexpected-value-into-argument-and-osr-exit.js
@@ -61,4 +61,3 @@ for (var i = 0; i < 200; ++i) {
}
shouldBe("foo(3, {f:f, g:g}, p)", "" + expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-string-stricteq.js b/deps/v8/test/webkit/dfg-string-stricteq.js
index 27a809a850..510709ad27 100644
--- a/deps/v8/test/webkit/dfg-string-stricteq.js
+++ b/deps/v8/test/webkit/dfg-string-stricteq.js
@@ -49,4 +49,3 @@ for (var i = 0; i < 1000; ++i) {
shouldBe("foo({f:\"" + a + "\"}, {f:\"" + b + "\"})", "[" + (a.length + b.length) + ", " + areEqual + "]");
shouldBe("bar({f:\"" + a + "\"}, {f:\"" + b + "\"})", "" + (areEqual ? a.length + b.length + 1 : a.length + b.length - 1));
}
-
diff --git a/deps/v8/test/webkit/dfg-tear-off-arguments-not-activation.js b/deps/v8/test/webkit/dfg-tear-off-arguments-not-activation.js
index d3967ccfd0..e18a086a20 100644
--- a/deps/v8/test/webkit/dfg-tear-off-arguments-not-activation.js
+++ b/deps/v8/test/webkit/dfg-tear-off-arguments-not-activation.js
@@ -42,4 +42,3 @@ for (var i = 0; i < 200; ++i) {
shouldBe("thingy.length", "1");
shouldBe("thingy[0]", "false");
}
-
diff --git a/deps/v8/test/webkit/dfg-tear-off-function-dot-arguments.js b/deps/v8/test/webkit/dfg-tear-off-function-dot-arguments.js
index a8582bf335..cdbf4bde6e 100644
--- a/deps/v8/test/webkit/dfg-tear-off-function-dot-arguments.js
+++ b/deps/v8/test/webkit/dfg-tear-off-function-dot-arguments.js
@@ -42,4 +42,3 @@ for (var i = 0; i < 200; ++i) {
shouldBe("thingy.length", "1");
shouldBe("thingy[0]", "false");
}
-
diff --git a/deps/v8/test/webkit/dfg-to-string-on-cell.js b/deps/v8/test/webkit/dfg-to-string-on-cell.js
index 8ae7388c5f..75af16d723 100644
--- a/deps/v8/test/webkit/dfg-to-string-on-cell.js
+++ b/deps/v8/test/webkit/dfg-to-string-on-cell.js
@@ -29,4 +29,3 @@ function foo(a, o) {
for (var i = 0; i < 100; ++i)
shouldBe("\"\" + foo(\"foo\", i % 2 ? \"hello\" : [1, 2, 3])", i % 2 ? "\"foo,hello\"" : "\"foo,1,2,3\"");
-
diff --git a/deps/v8/test/webkit/dfg-to-string-on-value.js b/deps/v8/test/webkit/dfg-to-string-on-value.js
index 0c89f3b89c..a10767bff2 100644
--- a/deps/v8/test/webkit/dfg-to-string-on-value.js
+++ b/deps/v8/test/webkit/dfg-to-string-on-value.js
@@ -29,4 +29,3 @@ function foo(a, o) {
for (var i = 0; i < 100; ++i)
shouldBe("\"\" + foo(\"foo\", i % 2 ? \"hello\" : 42)", i % 2 ? "\"foo,hello\"" : "\"foo,42\"");
-
diff --git a/deps/v8/test/webkit/dfg-to-string-toString-becomes-bad-with-check-structure.js b/deps/v8/test/webkit/dfg-to-string-toString-becomes-bad-with-check-structure.js
index e37fa99b5a..254b5f24a8 100644
--- a/deps/v8/test/webkit/dfg-to-string-toString-becomes-bad-with-check-structure.js
+++ b/deps/v8/test/webkit/dfg-to-string-toString-becomes-bad-with-check-structure.js
@@ -34,4 +34,3 @@ for (var i = 0; i < 100; ++i) {
String.prototype.toString = function() { return 42; }
shouldBe("foo.call(new String(\"foo\"))", i >= 99 ? "\"42\"" : "\"foo\"");
}
-
diff --git a/deps/v8/test/webkit/dfg-to-string-toString-becomes-bad-with-dictionary-string-prototype.js b/deps/v8/test/webkit/dfg-to-string-toString-becomes-bad-with-dictionary-string-prototype.js
index 6db3467a49..faee44372f 100644
--- a/deps/v8/test/webkit/dfg-to-string-toString-becomes-bad-with-dictionary-string-prototype.js
+++ b/deps/v8/test/webkit/dfg-to-string-toString-becomes-bad-with-dictionary-string-prototype.js
@@ -46,4 +46,3 @@ for (var i = 0; i < 150; ++i) {
}
shouldBe("\"\" + foo(\"hello\")", expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-to-string-toString-becomes-bad.js b/deps/v8/test/webkit/dfg-to-string-toString-becomes-bad.js
index d15917a17b..433475d844 100644
--- a/deps/v8/test/webkit/dfg-to-string-toString-becomes-bad.js
+++ b/deps/v8/test/webkit/dfg-to-string-toString-becomes-bad.js
@@ -39,4 +39,3 @@ for (var i = 0; i < 150; ++i) {
}
shouldBe("\"\" + foo(\"hello\")", expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-to-string-toString-in-string.js b/deps/v8/test/webkit/dfg-to-string-toString-in-string.js
index 17f5794c25..f7f8099c55 100644
--- a/deps/v8/test/webkit/dfg-to-string-toString-in-string.js
+++ b/deps/v8/test/webkit/dfg-to-string-toString-in-string.js
@@ -41,4 +41,3 @@ for (var i = 0; i < 150; ++i) {
}
shouldBe("\"\" + foo(argument)", expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-to-string-valueOf-in-string.js b/deps/v8/test/webkit/dfg-to-string-valueOf-in-string.js
index e591bf7fe7..e5c4c6be35 100644
--- a/deps/v8/test/webkit/dfg-to-string-valueOf-in-string.js
+++ b/deps/v8/test/webkit/dfg-to-string-valueOf-in-string.js
@@ -39,4 +39,3 @@ for (var i = 0; i < 150; ++i) {
}
shouldBe("\"\" + foo(argument)", "\"hello\"");
}
-
diff --git a/deps/v8/test/webkit/dfg-uint32-to-number-in-middle-of-copy-propagation.js b/deps/v8/test/webkit/dfg-uint32-to-number-in-middle-of-copy-propagation.js
index 7705f03df2..dc02237928 100644
--- a/deps/v8/test/webkit/dfg-uint32-to-number-in-middle-of-copy-propagation.js
+++ b/deps/v8/test/webkit/dfg-uint32-to-number-in-middle-of-copy-propagation.js
@@ -35,4 +35,3 @@ function foo(b) {
for (var i = 0; i < 100; ++i)
shouldBe("foo(-1)", "[-1, 4294967295]");
-
diff --git a/deps/v8/test/webkit/dfg-uint32-to-number-on-captured-variable.js b/deps/v8/test/webkit/dfg-uint32-to-number-on-captured-variable.js
index ec2be1d74b..123655453f 100644
--- a/deps/v8/test/webkit/dfg-uint32-to-number-on-captured-variable.js
+++ b/deps/v8/test/webkit/dfg-uint32-to-number-on-captured-variable.js
@@ -34,4 +34,3 @@ function foo(a) {
for (var i = 0; i < 100; ++i)
shouldBe("foo(" + i + ")()", "" + i);
-
diff --git a/deps/v8/test/webkit/dfg-uint32-to-number-skip-then-exit.js b/deps/v8/test/webkit/dfg-uint32-to-number-skip-then-exit.js
index 13029d2f35..40d3a13697 100644
--- a/deps/v8/test/webkit/dfg-uint32-to-number-skip-then-exit.js
+++ b/deps/v8/test/webkit/dfg-uint32-to-number-skip-then-exit.js
@@ -42,4 +42,3 @@ for (var i = 0; i < 200; ++i) {
}
shouldBe("foo(i, 1, o)", "" + expected);
}
-
diff --git a/deps/v8/test/webkit/dfg-uint32-to-number.js b/deps/v8/test/webkit/dfg-uint32-to-number.js
index f7c11d1e23..7fc98da8c3 100644
--- a/deps/v8/test/webkit/dfg-uint32-to-number.js
+++ b/deps/v8/test/webkit/dfg-uint32-to-number.js
@@ -39,4 +39,3 @@ shouldBe("result", "124500");
shouldBe("foo({f:2147483648}, {f:32})", "2147483648");
shouldBe("foo({f:2147483648}, {f:31})", "1");
shouldBe("foo({f:2147483648}, {f:30})", "2");
-
diff --git a/deps/v8/test/webkit/dfg-uint32array-overflow-constant.js b/deps/v8/test/webkit/dfg-uint32array-overflow-constant.js
index 1de13a3b5d..6d7f06ae58 100644
--- a/deps/v8/test/webkit/dfg-uint32array-overflow-constant.js
+++ b/deps/v8/test/webkit/dfg-uint32array-overflow-constant.js
@@ -35,4 +35,3 @@ for (var i = 0; i < 200; ++i) {
foo(array);
shouldBe("array[0]", "0x8005465c");
}
-
diff --git a/deps/v8/test/webkit/dfg-weak-js-constant-silent-fill.js b/deps/v8/test/webkit/dfg-weak-js-constant-silent-fill.js
index fa06a905bc..84f3f4bcb1 100644
--- a/deps/v8/test/webkit/dfg-weak-js-constant-silent-fill.js
+++ b/deps/v8/test/webkit/dfg-weak-js-constant-silent-fill.js
@@ -45,4 +45,3 @@ for (var i = 0; i < 1000; ++i) {
shouldBe("o2.f", "false");
shouldBe("o3.f", "false");
}
-
diff --git a/deps/v8/test/webkit/exception-for-nonobject-expected.txt b/deps/v8/test/webkit/exception-for-nonobject-expected.txt
index 86068b102c..e6003986b9 100644
--- a/deps/v8/test/webkit/exception-for-nonobject-expected.txt
+++ b/deps/v8/test/webkit/exception-for-nonobject-expected.txt
@@ -27,7 +27,7 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS new {}.undefined threw exception TypeError: undefined is not a function.
-PASS 1 instanceof {}.undefined threw exception TypeError: Expecting a function in instanceof check, but got 1.
+PASS 1 instanceof {}.undefined threw exception TypeError: Expecting a function in instanceof check, but got undefined.
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/fast/js/JSON-parse-reviver.js b/deps/v8/test/webkit/fast/js/JSON-parse-reviver.js
index b02a2fc468..1f04602ee9 100644
--- a/deps/v8/test/webkit/fast/js/JSON-parse-reviver.js
+++ b/deps/v8/test/webkit/fast/js/JSON-parse-reviver.js
@@ -201,5 +201,3 @@ function reviveIntroducesNewArrayLikeObject(i, v) {
}
shouldBe('JSON.stringify(JSON.parse("[0,1]", reviveIntroducesNewArrayLikeObject))', '\'[0,["a","a"]]\'');
-
-
diff --git a/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt b/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
index 70827fc0af..b8c4bec106 100644
--- a/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
+++ b/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
@@ -63,17 +63,17 @@ FAIL getSortedOwnPropertyNames(decodeURI) should be length,name. Was arguments,c
FAIL getSortedOwnPropertyNames(decodeURIComponent) should be length,name. Was arguments,caller,length,name.
FAIL getSortedOwnPropertyNames(encodeURI) should be length,name. Was arguments,caller,length,name.
FAIL getSortedOwnPropertyNames(encodeURIComponent) should be length,name. Was arguments,caller,length,name.
-FAIL getSortedOwnPropertyNames(Object) should be create,defineProperties,defineProperty,freeze,getOwnPropertyDescriptor,getOwnPropertyNames,getPrototypeOf,isExtensible,isFrozen,isSealed,keys,length,name,preventExtensions,prototype,seal. Was arguments,caller,create,defineProperties,defineProperty,freeze,getOwnPropertyDescriptor,getOwnPropertyNames,getPrototypeOf,is,isExtensible,isFrozen,isSealed,keys,length,name,preventExtensions,prototype,seal.
+FAIL getSortedOwnPropertyNames(Object) should be create,defineProperties,defineProperty,freeze,getOwnPropertyDescriptor,getOwnPropertyNames,getPrototypeOf,isExtensible,isFrozen,isSealed,keys,length,name,preventExtensions,prototype,seal,setPrototypeOf. Was arguments,caller,create,defineProperties,defineProperty,freeze,getOwnPropertyDescriptor,getOwnPropertyNames,getPrototypeOf,is,isExtensible,isFrozen,isSealed,keys,length,name,preventExtensions,prototype,seal,setPrototypeOf.
PASS getSortedOwnPropertyNames(Object.prototype) is ['__defineGetter__', '__defineSetter__', '__lookupGetter__', '__lookupSetter__', '__proto__', 'constructor', 'hasOwnProperty', 'isPrototypeOf', 'propertyIsEnumerable', 'toLocaleString', 'toString', 'valueOf']
FAIL getSortedOwnPropertyNames(Function) should be length,name,prototype. Was arguments,caller,length,name,prototype.
FAIL getSortedOwnPropertyNames(Function.prototype) should be apply,bind,call,constructor,length,name,toString. Was apply,arguments,bind,call,caller,constructor,length,name,toString.
FAIL getSortedOwnPropertyNames(Array) should be isArray,length,name,prototype. Was arguments,caller,isArray,length,name,prototype.
PASS getSortedOwnPropertyNames(Array.prototype) is ['concat', 'constructor', 'every', 'filter', 'forEach', 'indexOf', 'join', 'lastIndexOf', 'length', 'map', 'pop', 'push', 'reduce', 'reduceRight', 'reverse', 'shift', 'slice', 'some', 'sort', 'splice', 'toLocaleString', 'toString', 'unshift']
FAIL getSortedOwnPropertyNames(String) should be fromCharCode,length,name,prototype. Was arguments,caller,fromCharCode,length,name,prototype.
-PASS getSortedOwnPropertyNames(String.prototype) is ['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'concat', 'constructor', 'fixed', 'fontcolor', 'fontsize', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'replace', 'search', 'slice', 'small', 'split', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']
+PASS getSortedOwnPropertyNames(String.prototype) is ['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'concat', 'constructor', 'fixed', 'fontcolor', 'fontsize', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'normalize', 'replace', 'search', 'slice', 'small', 'split', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']
FAIL getSortedOwnPropertyNames(Boolean) should be length,name,prototype. Was arguments,caller,length,name,prototype.
PASS getSortedOwnPropertyNames(Boolean.prototype) is ['constructor', 'toString', 'valueOf']
-FAIL getSortedOwnPropertyNames(Number) should be MAX_VALUE,MIN_VALUE,NEGATIVE_INFINITY,NaN,POSITIVE_INFINITY,length,name,prototype. Was MAX_VALUE,MIN_VALUE,NEGATIVE_INFINITY,NaN,POSITIVE_INFINITY,arguments,caller,isFinite,isNaN,length,name,prototype.
+FAIL getSortedOwnPropertyNames(Number) should be MAX_VALUE,MIN_VALUE,NEGATIVE_INFINITY,NaN,POSITIVE_INFINITY,length,name,prototype. Was EPSILON,MAX_SAFE_INTEGER,MAX_VALUE,MIN_SAFE_INTEGER,MIN_VALUE,NEGATIVE_INFINITY,NaN,POSITIVE_INFINITY,arguments,caller,isFinite,isInteger,isNaN,isSafeInteger,length,name,parseFloat,parseInt,prototype.
PASS getSortedOwnPropertyNames(Number.prototype) is ['constructor', 'toExponential', 'toFixed', 'toLocaleString', 'toPrecision', 'toString', 'valueOf']
FAIL getSortedOwnPropertyNames(Date) should be UTC,length,name,now,parse,prototype. Was UTC,arguments,caller,length,name,now,parse,prototype.
PASS getSortedOwnPropertyNames(Date.prototype) is ['constructor', 'getDate', 'getDay', 'getFullYear', 'getHours', 'getMilliseconds', 'getMinutes', 'getMonth', 'getSeconds', 'getTime', 'getTimezoneOffset', 'getUTCDate', 'getUTCDay', 'getUTCFullYear', 'getUTCHours', 'getUTCMilliseconds', 'getUTCMinutes', 'getUTCMonth', 'getUTCSeconds', 'getYear', 'setDate', 'setFullYear', 'setHours', 'setMilliseconds', 'setMinutes', 'setMonth', 'setSeconds', 'setTime', 'setUTCDate', 'setUTCFullYear', 'setUTCHours', 'setUTCMilliseconds', 'setUTCMinutes', 'setUTCMonth', 'setUTCSeconds', 'setYear', 'toDateString', 'toGMTString', 'toISOString', 'toJSON', 'toLocaleDateString', 'toLocaleString', 'toLocaleTimeString', 'toString', 'toTimeString', 'toUTCString', 'valueOf']
diff --git a/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames.js b/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames.js
index 4f3334a48e..6373cf1ae0 100644
--- a/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames.js
+++ b/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames.js
@@ -71,14 +71,14 @@ var expectedPropertyNamesSet = {
"encodeURI": "['length', 'name']",
"encodeURIComponent": "['length', 'name']",
// Built-in ECMA objects
- "Object": "['create', 'defineProperties', 'defineProperty', 'freeze', 'getOwnPropertyDescriptor', 'getOwnPropertyNames', 'getPrototypeOf', 'isExtensible', 'isFrozen', 'isSealed', 'keys', 'length', 'name', 'preventExtensions', 'prototype', 'seal']",
+ "Object": "['create', 'defineProperties', 'defineProperty', 'freeze', 'getOwnPropertyDescriptor', 'getOwnPropertyNames', 'getPrototypeOf', 'isExtensible', 'isFrozen', 'isSealed', 'keys', 'length', 'name', 'preventExtensions', 'prototype', 'seal', 'setPrototypeOf']",
"Object.prototype": "['__defineGetter__', '__defineSetter__', '__lookupGetter__', '__lookupSetter__', '__proto__', 'constructor', 'hasOwnProperty', 'isPrototypeOf', 'propertyIsEnumerable', 'toLocaleString', 'toString', 'valueOf']",
"Function": "['length', 'name', 'prototype']",
"Function.prototype": "['apply', 'bind', 'call', 'constructor', 'length', 'name', 'toString']",
"Array": "['isArray', 'length', 'name', 'prototype']",
"Array.prototype": "['concat', 'constructor', 'every', 'filter', 'forEach', 'indexOf', 'join', 'lastIndexOf', 'length', 'map', 'pop', 'push', 'reduce', 'reduceRight', 'reverse', 'shift', 'slice', 'some', 'sort', 'splice', 'toLocaleString', 'toString', 'unshift']",
"String": "['fromCharCode', 'length', 'name', 'prototype']",
- "String.prototype": "['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'concat', 'constructor', 'fixed', 'fontcolor', 'fontsize', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'replace', 'search', 'slice', 'small', 'split', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']",
+ "String.prototype": "['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'concat', 'constructor', 'fixed', 'fontcolor', 'fontsize', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'normalize', 'replace', 'search', 'slice', 'small', 'split', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']",
"Boolean": "['length', 'name', 'prototype']",
"Boolean.prototype": "['constructor', 'toString', 'valueOf']",
"Number": "['MAX_VALUE', 'MIN_VALUE', 'NEGATIVE_INFINITY', 'NaN', 'POSITIVE_INFINITY', 'length', 'name', 'prototype']",
diff --git a/deps/v8/test/webkit/fast/js/array-bad-time-expected.txt b/deps/v8/test/webkit/fast/js/array-bad-time-expected.txt
index 7668e7e333..478c9a8945 100644
--- a/deps/v8/test/webkit/fast/js/array-bad-time-expected.txt
+++ b/deps/v8/test/webkit/fast/js/array-bad-time-expected.txt
@@ -977,58 +977,57 @@ PASS "0,1,2,3,4" is "0,1,2,3,4"
PASS "0,1,2,3,4" is "0,1,2,3,4"
PASS "0,1,2,3,4" is "0,1,2,3,4"
Henceforth I will have a bad time.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL ouches should be 50. Was 0.
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS ouches is 50
PASS successfullyParsed is true
TEST COMPLETE
-
diff --git a/deps/v8/test/webkit/fast/js/array-bad-time.js b/deps/v8/test/webkit/fast/js/array-bad-time.js
index 1d3802eda3..26241b229e 100644
--- a/deps/v8/test/webkit/fast/js/array-bad-time.js
+++ b/deps/v8/test/webkit/fast/js/array-bad-time.js
@@ -33,7 +33,7 @@ function foo(haveABadTime) {
for (var i = 0; i < result.length; ++i) {
if (i == haveABadTime) {
debug("Henceforth I will have a bad time.");
- Array.prototype.__defineSetter__("3", function() { debug("Ouch!"); ouches++; });
+ Array.prototype.__defineSetter__("3", function() { ouches++; });
}
result[i] = i;
}
diff --git a/deps/v8/test/webkit/fast/js/array-prototype-properties-expected.txt b/deps/v8/test/webkit/fast/js/array-prototype-properties-expected.txt
index 6ab72a450e..9e98e2608b 100644
--- a/deps/v8/test/webkit/fast/js/array-prototype-properties-expected.txt
+++ b/deps/v8/test/webkit/fast/js/array-prototype-properties-expected.txt
@@ -42,7 +42,7 @@ PASS Array.prototype.every.call(undefined, toString) threw exception TypeError:
PASS Array.prototype.forEach.call(undefined, toString) threw exception TypeError: Array.prototype.forEach called on null or undefined.
PASS Array.prototype.some.call(undefined, toString) threw exception TypeError: Array.prototype.some called on null or undefined.
PASS Array.prototype.indexOf.call(undefined, 0) threw exception TypeError: Array.prototype.indexOf called on null or undefined.
-PASS Array.prototype.indlastIndexOfexOf.call(undefined, 0) threw exception TypeError: Cannot call method 'call' of undefined.
+PASS Array.prototype.indlastIndexOfexOf.call(undefined, 0) threw exception TypeError: Cannot read property 'call' of undefined.
PASS Array.prototype.filter.call(undefined, toString) threw exception TypeError: Array.prototype.filter called on null or undefined.
PASS Array.prototype.reduce.call(undefined, toString) threw exception TypeError: Array.prototype.reduce called on null or undefined.
PASS Array.prototype.reduceRight.call(undefined, toString) threw exception TypeError: Array.prototype.reduceRight called on null or undefined.
diff --git a/deps/v8/test/webkit/fast/js/array-slow-put-expected.txt b/deps/v8/test/webkit/fast/js/array-slow-put-expected.txt
index 474846255d..99b44c337a 100644
--- a/deps/v8/test/webkit/fast/js/array-slow-put-expected.txt
+++ b/deps/v8/test/webkit/fast/js/array-slow-put-expected.txt
@@ -26,108 +26,107 @@ Tests that defining a setter on the Array prototype works.
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL ouches should be 100. Was 0.
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS ouches is 100
PASS successfullyParsed is true
TEST COMPLETE
-
diff --git a/deps/v8/test/webkit/fast/js/array-slow-put.js b/deps/v8/test/webkit/fast/js/array-slow-put.js
index 1e8c44d4ed..63bcad09d9 100644
--- a/deps/v8/test/webkit/fast/js/array-slow-put.js
+++ b/deps/v8/test/webkit/fast/js/array-slow-put.js
@@ -26,7 +26,7 @@ description(
);
var ouches = 0;
-Array.prototype.__defineSetter__("3", function() { debug("Ouch!"); ouches++; });
+Array.prototype.__defineSetter__("3", function() { ouches++; });
function foo() {
var result = [];
diff --git a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
index f743f6d352..07ecf99ce3 100644
--- a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
+++ b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
@@ -53,22 +53,22 @@ PASS testThis.call() is undefined
PASS testThis.apply() is undefined
PASS testThis.call(undefined) is undefined
PASS testThis.apply(undefined) is undefined
-PASS (function eval(){'use strict';}) threw exception SyntaxError: Function name may not be eval or arguments in strict mode.
-PASS (function(){(function eval(){'use strict';})}) threw exception SyntaxError: Function name may not be eval or arguments in strict mode.
-PASS (function (eval){'use strict';}) threw exception SyntaxError: Parameter name eval or arguments is not allowed in strict mode.
-PASS (function(){(function (eval){'use strict';})}) threw exception SyntaxError: Parameter name eval or arguments is not allowed in strict mode.
-PASS (function arguments(){'use strict';}) threw exception SyntaxError: Function name may not be eval or arguments in strict mode.
-PASS (function(){(function arguments(){'use strict';})}) threw exception SyntaxError: Function name may not be eval or arguments in strict mode.
-PASS (function (arguments){'use strict';}) threw exception SyntaxError: Parameter name eval or arguments is not allowed in strict mode.
-PASS (function(){(function (arguments){'use strict';})}) threw exception SyntaxError: Parameter name eval or arguments is not allowed in strict mode.
-PASS (function (){'use strict'; var eval;}) threw exception SyntaxError: Variable name may not be eval or arguments in strict mode.
-PASS (function(){(function (){'use strict'; var eval;})}) threw exception SyntaxError: Variable name may not be eval or arguments in strict mode.
-PASS (function (){'use strict'; var arguments;}) threw exception SyntaxError: Variable name may not be eval or arguments in strict mode.
-PASS (function(){(function (){'use strict'; var arguments;})}) threw exception SyntaxError: Variable name may not be eval or arguments in strict mode.
-PASS (function (){'use strict'; try{}catch(eval){}}) threw exception SyntaxError: Catch variable may not be eval or arguments in strict mode.
-PASS (function(){(function (){'use strict'; try{}catch(eval){}})}) threw exception SyntaxError: Catch variable may not be eval or arguments in strict mode.
-PASS (function (){'use strict'; try{}catch(arguments){}}) threw exception SyntaxError: Catch variable may not be eval or arguments in strict mode.
-PASS (function(){(function (){'use strict'; try{}catch(arguments){}})}) threw exception SyntaxError: Catch variable may not be eval or arguments in strict mode.
+PASS (function eval(){'use strict';}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){(function eval(){'use strict';})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (eval){'use strict';}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){(function (eval){'use strict';})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function arguments(){'use strict';}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){(function arguments(){'use strict';})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (arguments){'use strict';}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){(function (arguments){'use strict';})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict'; var eval;}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){(function (){'use strict'; var eval;})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict'; var arguments;}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){(function (){'use strict'; var arguments;})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict'; try{}catch(eval){}}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){(function (){'use strict'; try{}catch(eval){}})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict'; try{}catch(arguments){}}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){(function (){'use strict'; try{}catch(arguments){}})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function (a, a){'use strict';}) threw exception SyntaxError: Strict mode function may not have duplicate parameter names.
PASS (function(){(function (a, a){'use strict';})}) threw exception SyntaxError: Strict mode function may not have duplicate parameter names.
PASS (function (a){'use strict'; delete a;})() threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
@@ -134,23 +134,23 @@ PASS 'use strict'; eval('var introducedVariable = "FAIL: variable introduced int
PASS 'use strict'; objectWithReadonlyProperty.prop = 'fail' threw exception TypeError: Cannot assign to read only property 'prop' of #<Object>.
PASS 'use strict'; delete objectWithReadonlyProperty.prop threw exception TypeError: Cannot delete property 'prop' of #<Object>.
PASS 'use strict'; delete objectWithReadonlyProperty[readonlyPropName] threw exception TypeError: Cannot delete property 'prop' of #<Object>.
-PASS 'use strict'; ++eval threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS (function(){'use strict'; ++eval}) threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS 'use strict'; eval++ threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS (function(){'use strict'; eval++}) threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS 'use strict'; --eval threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS (function(){'use strict'; --eval}) threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS 'use strict'; eval-- threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS (function(){'use strict'; eval--}) threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS 'use strict'; function f() { ++arguments } threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS (function(){'use strict'; function f() { ++arguments }}) threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS 'use strict'; function f() { arguments++ } threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS (function(){'use strict'; function f() { arguments++ }}) threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS 'use strict'; function f() { --arguments } threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS (function(){'use strict'; function f() { --arguments }}) threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS 'use strict'; function f() { arguments-- } threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS (function(){'use strict'; function f() { arguments-- }}) threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
-PASS global.eval('"use strict"; if (0) ++arguments; true;') threw exception SyntaxError: Prefix increment/decrement may not have eval or arguments operand in strict mode.
+PASS 'use strict'; ++eval threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict'; ++eval}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS 'use strict'; eval++ threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict'; eval++}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS 'use strict'; --eval threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict'; --eval}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS 'use strict'; eval-- threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict'; eval--}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS 'use strict'; function f() { ++arguments } threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict'; function f() { ++arguments }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS 'use strict'; function f() { arguments++ } threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict'; function f() { arguments++ }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS 'use strict'; function f() { --arguments } threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict'; function f() { --arguments }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS 'use strict'; function f() { arguments-- } threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict'; function f() { arguments-- }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS global.eval('"use strict"; if (0) ++arguments; true;') threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict'; ++(1, eval) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
FAIL (function(){'use strict'; ++(1, eval)}) should throw an exception. Was function (){'use strict'; ++(1, eval)}.
PASS 'use strict'; (1, eval)++ threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
@@ -212,16 +212,16 @@ PASS 'use strict';̻ threw exception SyntaxError: Unexpected token ILLEGAL.
PASS (function(){'use strict';̻}) threw exception SyntaxError: Unexpected token ILLEGAL.
PASS 'use strict';5.f threw exception SyntaxError: Unexpected token ILLEGAL.
PASS (function(){'use strict';5.f}) threw exception SyntaxError: Unexpected token ILLEGAL.
-PASS 'use strict';1-(eval=1); threw exception SyntaxError: Assignment to eval or arguments is not allowed in strict mode.
-PASS (function(){'use strict';1-(eval=1);}) threw exception SyntaxError: Assignment to eval or arguments is not allowed in strict mode.
-PASS 'use strict';arguments=1; threw exception SyntaxError: Assignment to eval or arguments is not allowed in strict mode.
-PASS (function(){'use strict';arguments=1;}) threw exception SyntaxError: Assignment to eval or arguments is not allowed in strict mode.
-PASS 'use strict';1-(arguments=1); threw exception SyntaxError: Assignment to eval or arguments is not allowed in strict mode.
-PASS (function(){'use strict';1-(arguments=1);}) threw exception SyntaxError: Assignment to eval or arguments is not allowed in strict mode.
-PASS 'use strict';var a=(eval=1); threw exception SyntaxError: Assignment to eval or arguments is not allowed in strict mode.
-PASS (function(){'use strict';var a=(eval=1);}) threw exception SyntaxError: Assignment to eval or arguments is not allowed in strict mode.
-PASS 'use strict';var a=(arguments=1); threw exception SyntaxError: Assignment to eval or arguments is not allowed in strict mode.
-PASS (function(){'use strict';var a=(arguments=1);}) threw exception SyntaxError: Assignment to eval or arguments is not allowed in strict mode.
+PASS 'use strict';1-(eval=1); threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict';1-(eval=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS 'use strict';arguments=1; threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict';arguments=1;}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS 'use strict';1-(arguments=1); threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict';1-(arguments=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS 'use strict';var a=(eval=1); threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict';var a=(eval=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS 'use strict';var a=(arguments=1); threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function(){'use strict';var a=(arguments=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict'; try { throw 1; } catch (e) { aGlobal = true; } is true
PASS 'use strict'; (function () { try { throw 1; } catch (e) { aGlobal = true; }})(); aGlobal; is true
PASS (function () {'use strict'; try { throw 1; } catch (e) { aGlobal = true; }})(); aGlobal; is true
diff --git a/deps/v8/test/webkit/fast/js/date-toisostring-expected.txt b/deps/v8/test/webkit/fast/js/date-toisostring-expected.txt
index 865090aa14..26517264ae 100644
--- a/deps/v8/test/webkit/fast/js/date-toisostring-expected.txt
+++ b/deps/v8/test/webkit/fast/js/date-toisostring-expected.txt
@@ -26,8 +26,8 @@ Tests for Date.toISOString
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS Date.toISOString.call({}) threw exception TypeError: Cannot call method 'call' of undefined.
-PASS Date.toISOString.call(0) threw exception TypeError: Cannot call method 'call' of undefined.
+PASS Date.toISOString.call({}) threw exception TypeError: Cannot read property 'call' of undefined.
+PASS Date.toISOString.call(0) threw exception TypeError: Cannot read property 'call' of undefined.
PASS new Date(-400).toISOString() is '1969-12-31T23:59:59.600Z'
PASS new Date(0).toISOString() is '1970-01-01T00:00:00.000Z'
PASS new Date('1 January 1500 UTC').toISOString() is '1500-01-01T00:00:00.000Z'
diff --git a/deps/v8/test/webkit/fast/js/deep-recursion-test.js b/deps/v8/test/webkit/fast/js/deep-recursion-test.js
index 59df2f1c9a..c2a695da46 100644
--- a/deps/v8/test/webkit/fast/js/deep-recursion-test.js
+++ b/deps/v8/test/webkit/fast/js/deep-recursion-test.js
@@ -75,4 +75,4 @@ description("This test how deep we can recurse, and that we get an exception whe
} catch (ex) {
var msg = String(eval(ex));
shouldBe("msg", "'RangeError: Maximum call stack size exceeded.'");
- } \ No newline at end of file
+ }
diff --git a/deps/v8/test/webkit/fast/js/function-decompilation-operators.js b/deps/v8/test/webkit/fast/js/function-decompilation-operators.js
index 759767f88e..cab6dfaf96 100644
--- a/deps/v8/test/webkit/fast/js/function-decompilation-operators.js
+++ b/deps/v8/test/webkit/fast/js/function-decompilation-operators.js
@@ -80,4 +80,4 @@ description("This test checks toString() round-trip decompilation for binary and
for (test in tests) {
var decompiledFunction = eval("(function () { " + tests[test] + ";})").toString().replace(/\n/g, "");
shouldBe("decompiledFunction", "'function () { " + tests[test] + ";}'");
- } \ No newline at end of file
+ }
diff --git a/deps/v8/test/webkit/fast/js/kde/Array.js b/deps/v8/test/webkit/fast/js/kde/Array.js
index 26bf331588..595c3b3543 100644
--- a/deps/v8/test/webkit/fast/js/kde/Array.js
+++ b/deps/v8/test/webkit/fast/js/kde/Array.js
@@ -233,4 +233,4 @@ function testToString() {
Number.prototype.toLocaleString = backupNumberToLocaleString;
RegExp.prototype.toString = backupRegExpToString;
RegExp.prototype.toLocaleString = backupRegExpToLocaleString;
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/webkit/fast/js/kde/Boolean.js b/deps/v8/test/webkit/fast/js/kde/Boolean.js
index dbc117a726..6ddb953094 100644
--- a/deps/v8/test/webkit/fast/js/kde/Boolean.js
+++ b/deps/v8/test/webkit/fast/js/kde/Boolean.js
@@ -29,4 +29,4 @@ shouldBe("(new Boolean(true)).valueOf()", "true");
shouldBe("(new Boolean(false)).valueOf()", "false");
shouldBe("(new Boolean(Boolean(true))).valueOf()", "true");
shouldBeTrue("true.valueOf() === true");
-shouldBeTrue("false.toString() === 'false'"); \ No newline at end of file
+shouldBeTrue("false.toString() === 'false'");
diff --git a/deps/v8/test/webkit/fast/js/kde/Date-setYear.js b/deps/v8/test/webkit/fast/js/kde/Date-setYear.js
index 4ccf02517a..140403d2b1 100644
--- a/deps/v8/test/webkit/fast/js/kde/Date-setYear.js
+++ b/deps/v8/test/webkit/fast/js/kde/Date-setYear.js
@@ -33,4 +33,4 @@ shouldBe("d.setYear(100), d.getFullYear()", "100");
shouldBe("d.setYear(2050), d.getFullYear()", "2050");
shouldBe("d.setYear(1899), d.getFullYear()", "1899");
shouldBe("d.setYear(2000), d.getFullYear()", "2000");
-shouldBe("d.setYear(2100), d.getFullYear()", "2100"); \ No newline at end of file
+shouldBe("d.setYear(2100), d.getFullYear()", "2100");
diff --git a/deps/v8/test/webkit/fast/js/kde/Error.js b/deps/v8/test/webkit/fast/js/kde/Error.js
index 6ecd5c3c8f..0f1d911a12 100644
--- a/deps/v8/test/webkit/fast/js/kde/Error.js
+++ b/deps/v8/test/webkit/fast/js/kde/Error.js
@@ -32,4 +32,4 @@ shouldBe("(new Error('msg')).name", "'Error'");
shouldBe("Object.prototype.toString.apply(Error())", "'[object Error]'");
shouldBe("Object.prototype.toString.apply(Error)", "'[object Function]'");
-shouldBe("Object.prototype.toString.apply(EvalError)", "'[object Function]'"); \ No newline at end of file
+shouldBe("Object.prototype.toString.apply(EvalError)", "'[object Function]'");
diff --git a/deps/v8/test/webkit/fast/js/kde/GlobalObject.js b/deps/v8/test/webkit/fast/js/kde/GlobalObject.js
index 112aea8cfd..aa09cd2d37 100644
--- a/deps/v8/test/webkit/fast/js/kde/GlobalObject.js
+++ b/deps/v8/test/webkit/fast/js/kde/GlobalObject.js
@@ -96,4 +96,4 @@ shouldBe("parseFloat('3.1x', 3)", "3.1");
shouldBeFalse("isFinite(parseFloat('Infinity'))");
shouldBeFalse("delete NaN");
shouldBeFalse("delete Infinity");
-shouldBeFalse("delete undefined"); \ No newline at end of file
+shouldBeFalse("delete undefined");
diff --git a/deps/v8/test/webkit/fast/js/kde/Number.js b/deps/v8/test/webkit/fast/js/kde/Number.js
index 358fe7a09f..946c09367c 100644
--- a/deps/v8/test/webkit/fast/js/kde/Number.js
+++ b/deps/v8/test/webkit/fast/js/kde/Number.js
@@ -496,4 +496,4 @@ shouldBe("Number(0).toPrecision(17)","\"0.0000000000000000\"");
shouldBe("Number(0).toPrecision(18)","\"0.00000000000000000\"");
shouldBe("Number(0).toPrecision(19)","\"0.000000000000000000\"");
shouldBe("Number(0).toPrecision(20)","\"0.0000000000000000000\"");
-shouldBe("Number(0).toPrecision(21)","\"0.00000000000000000000\""); \ No newline at end of file
+shouldBe("Number(0).toPrecision(21)","\"0.00000000000000000000\"");
diff --git a/deps/v8/test/webkit/fast/js/kde/Object.js b/deps/v8/test/webkit/fast/js/kde/Object.js
index 2637309464..5138128eac 100644
--- a/deps/v8/test/webkit/fast/js/kde/Object.js
+++ b/deps/v8/test/webkit/fast/js/kde/Object.js
@@ -39,4 +39,4 @@ shouldBe("(new Object('s')).valueOf()", "'s'");
shouldBe("String(Object())", "'[object Object]'");
shouldBe("Object().toString()", "'[object Object]'");
-shouldBe("String(Object().valueOf())", "'[object Object]'"); \ No newline at end of file
+shouldBe("String(Object().valueOf())", "'[object Object]'");
diff --git a/deps/v8/test/webkit/fast/js/kde/Prototype.js b/deps/v8/test/webkit/fast/js/kde/Prototype.js
index a2971fde18..8e6638eb7d 100644
--- a/deps/v8/test/webkit/fast/js/kde/Prototype.js
+++ b/deps/v8/test/webkit/fast/js/kde/Prototype.js
@@ -55,4 +55,4 @@ shouldBe("b.author", "'Fred'"); // outpus "Fred"
///////////////////////////////////////////////////////
-shouldBe("delete Boolean.prototype", "false"); \ No newline at end of file
+shouldBe("delete Boolean.prototype", "false");
diff --git a/deps/v8/test/webkit/fast/js/kde/RegExp.js b/deps/v8/test/webkit/fast/js/kde/RegExp.js
index eea6a8776e..ede84d0ecb 100644
--- a/deps/v8/test/webkit/fast/js/kde/RegExp.js
+++ b/deps/v8/test/webkit/fast/js/kde/RegExp.js
@@ -179,4 +179,4 @@ shouldBe("(new RegExp('(?:)')).source", "'(?:)'");
shouldBe("/(?:)/.toString()", "'/(?:)/'");
shouldBe("/(?:)/.source", "'(?:)'");
-debug("Done."); \ No newline at end of file
+debug("Done.");
diff --git a/deps/v8/test/webkit/fast/js/kde/arguments-scope.js b/deps/v8/test/webkit/fast/js/kde/arguments-scope.js
index cef3698b5e..b6977390ea 100644
--- a/deps/v8/test/webkit/fast/js/kde/arguments-scope.js
+++ b/deps/v8/test/webkit/fast/js/kde/arguments-scope.js
@@ -70,4 +70,4 @@ test0();
test1();
test2(42);
test3();
-test4(); \ No newline at end of file
+test4();
diff --git a/deps/v8/test/webkit/fast/js/kde/assignments.js b/deps/v8/test/webkit/fast/js/kde/assignments.js
index 2cb49cf933..d654bb3dca 100644
--- a/deps/v8/test/webkit/fast/js/kde/assignments.js
+++ b/deps/v8/test/webkit/fast/js/kde/assignments.js
@@ -33,4 +33,4 @@ shouldBe("var i = 8; i >>= 1", "4");
shouldBe("var i = 1; i >>= 2", "0");
shouldBe("var i = -8; i >>= 24", "-1");
shouldBe("var i = 8; i >>>= 2", "2");
-shouldBe("var i = -8; i >>>= 24", "255"); \ No newline at end of file
+shouldBe("var i = -8; i >>>= 24", "255");
diff --git a/deps/v8/test/webkit/fast/js/kde/cast.js b/deps/v8/test/webkit/fast/js/kde/cast.js
index c8d3792a63..72d5e1c8b3 100644
--- a/deps/v8/test/webkit/fast/js/kde/cast.js
+++ b/deps/v8/test/webkit/fast/js/kde/cast.js
@@ -25,4 +25,4 @@ description("KDE JS Test");
shouldBeTrue("Boolean(1) === true");
shouldBeTrue("var s = String; s(1) === '1'");
shouldBeTrue("n = Number; n(true) === 1");
-shouldBe("String(Array('a', 'b'        ))", "'a,b'"); \ No newline at end of file
+shouldBe("String(Array('a', 'b'        ))", "'a,b'");
diff --git a/deps/v8/test/webkit/fast/js/kde/comment-1.js b/deps/v8/test/webkit/fast/js/kde/comment-1.js
index 043f052f47..391ab8ec81 100644
--- a/deps/v8/test/webkit/fast/js/kde/comment-1.js
+++ b/deps/v8/test/webkit/fast/js/kde/comment-1.js
@@ -22,4 +22,4 @@
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
description("KDE JS Test");
-// comment with linebreak \ No newline at end of file
+// comment with linebreak
diff --git a/deps/v8/test/webkit/fast/js/kde/comment-2.js b/deps/v8/test/webkit/fast/js/kde/comment-2.js
index 043f052f47..391ab8ec81 100644
--- a/deps/v8/test/webkit/fast/js/kde/comment-2.js
+++ b/deps/v8/test/webkit/fast/js/kde/comment-2.js
@@ -22,4 +22,4 @@
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
description("KDE JS Test");
-// comment with linebreak \ No newline at end of file
+// comment with linebreak
diff --git a/deps/v8/test/webkit/fast/js/kde/completion.js b/deps/v8/test/webkit/fast/js/kde/completion.js
index f75c98f401..f9f0209397 100644
--- a/deps/v8/test/webkit/fast/js/kde/completion.js
+++ b/deps/v8/test/webkit/fast/js/kde/completion.js
@@ -46,4 +46,4 @@ shouldBe("val", "13");
val = eval("14; function f() {}");
shouldBe("val", "14");
val = eval("15; var v = 0");
-shouldBe("val", "15"); \ No newline at end of file
+shouldBe("val", "15");
diff --git a/deps/v8/test/webkit/fast/js/kde/conditional.js b/deps/v8/test/webkit/fast/js/kde/conditional.js
index ee9c0bf867..beed2073c3 100644
--- a/deps/v8/test/webkit/fast/js/kde/conditional.js
+++ b/deps/v8/test/webkit/fast/js/kde/conditional.js
@@ -30,4 +30,4 @@ shouldBe("undefined ? 1 : 2", "2");
var a = 1;
if ( undefined )
a = 2;
-shouldBe("/*var a=1;if (undefined) a = 2;*/ a", "1"); \ No newline at end of file
+shouldBe("/*var a=1;if (undefined) a = 2;*/ a", "1");
diff --git a/deps/v8/test/webkit/fast/js/kde/constructor_length.js b/deps/v8/test/webkit/fast/js/kde/constructor_length.js
index 5ec9dda0ad..75ddd50543 100644
--- a/deps/v8/test/webkit/fast/js/kde/constructor_length.js
+++ b/deps/v8/test/webkit/fast/js/kde/constructor_length.js
@@ -36,4 +36,4 @@ shouldBe("RangeError.length","1");
shouldBe("ReferenceError.length","1");
shouldBe("SyntaxError.length","1");
shouldBe("TypeError.length","1");
-shouldBe("URIError.length","1"); \ No newline at end of file
+shouldBe("URIError.length","1");
diff --git a/deps/v8/test/webkit/fast/js/kde/crash-1.js b/deps/v8/test/webkit/fast/js/kde/crash-1.js
index 316ce51342..e0b082e80f 100644
--- a/deps/v8/test/webkit/fast/js/kde/crash-1.js
+++ b/deps/v8/test/webkit/fast/js/kde/crash-1.js
@@ -29,4 +29,4 @@ try {
v.toString();
} catch (e) {
debug("OK. Caught an exception.");
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/webkit/fast/js/kde/crash-2.js b/deps/v8/test/webkit/fast/js/kde/crash-2.js
index 84bd67f32b..feab15b1bf 100644
--- a/deps/v8/test/webkit/fast/js/kde/crash-2.js
+++ b/deps/v8/test/webkit/fast/js/kde/crash-2.js
@@ -31,4 +31,4 @@ try {
foo();
} catch (e) {
debug("OK. Caught an exception");
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/webkit/fast/js/kde/delete.js b/deps/v8/test/webkit/fast/js/kde/delete.js
index 0acb308de4..8aeb0d09e8 100644
--- a/deps/v8/test/webkit/fast/js/kde/delete.js
+++ b/deps/v8/test/webkit/fast/js/kde/delete.js
@@ -24,4 +24,4 @@
description("KDE JS Test");
shouldBe("a = 1; delete a;", "true");
shouldBe("delete nonexistant;", "true");
-shouldBe("delete NaN", "false"); \ No newline at end of file
+shouldBe("delete NaN", "false");
diff --git a/deps/v8/test/webkit/fast/js/kde/empty.js b/deps/v8/test/webkit/fast/js/kde/empty.js
index f09008a591..6ab1596527 100644
--- a/deps/v8/test/webkit/fast/js/kde/empty.js
+++ b/deps/v8/test/webkit/fast/js/kde/empty.js
@@ -21,4 +21,4 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-description("KDE JS Test"); \ No newline at end of file
+description("KDE JS Test");
diff --git a/deps/v8/test/webkit/fast/js/kde/encode_decode_uri.js b/deps/v8/test/webkit/fast/js/kde/encode_decode_uri.js
index c58cc12bab..54f374ea93 100644
--- a/deps/v8/test/webkit/fast/js/kde/encode_decode_uri.js
+++ b/deps/v8/test/webkit/fast/js/kde/encode_decode_uri.js
@@ -99,4 +99,4 @@ function checkWithFunctions(encodeFunction, decodeFunction)
}
checkWithFunctions("encodeURI", "decodeURI");
-checkWithFunctions("encodeURIComponent", "decodeURIComponent"); \ No newline at end of file
+checkWithFunctions("encodeURIComponent", "decodeURIComponent");
diff --git a/deps/v8/test/webkit/fast/js/kde/eval.js b/deps/v8/test/webkit/fast/js/kde/eval.js
index 7c4b5d4b92..03d05e4498 100644
--- a/deps/v8/test/webkit/fast/js/kde/eval.js
+++ b/deps/v8/test/webkit/fast/js/kde/eval.js
@@ -53,4 +53,4 @@ function lotto() {
for (var j = 0; j < 1; j++)
return eval('j');
}
-shouldBe("lotto()", "0"); \ No newline at end of file
+shouldBe("lotto()", "0");
diff --git a/deps/v8/test/webkit/fast/js/kde/evil-n.js b/deps/v8/test/webkit/fast/js/kde/evil-n.js
index b9b5cdb376..71c9dc5e9f 100644
--- a/deps/v8/test/webkit/fast/js/kde/evil-n.js
+++ b/deps/v8/test/webkit/fast/js/kde/evil-n.js
@@ -25,4 +25,4 @@ description("KDE JS Test");
shouldBe("(new Error()).message", "''");
// the empty match isn't taken in account
-shouldBe("''.split(/.*/).length", "0"); \ No newline at end of file
+shouldBe("''.split(/.*/).length", "0");
diff --git a/deps/v8/test/webkit/fast/js/kde/exception_propagation.js b/deps/v8/test/webkit/fast/js/kde/exception_propagation.js
index 71be4f784e..9d6e753bc6 100644
--- a/deps/v8/test/webkit/fast/js/kde/exception_propagation.js
+++ b/deps/v8/test/webkit/fast/js/kde/exception_propagation.js
@@ -443,4 +443,4 @@ try {
catch (e) {
}
shouldBe("set_inside_with_cantconverttoobject","4");
-// ### test case, sw \ No newline at end of file
+// ### test case, sw
diff --git a/deps/v8/test/webkit/fast/js/kde/exceptions.js b/deps/v8/test/webkit/fast/js/kde/exceptions.js
index f2c9872830..73b7b57fb4 100644
--- a/deps/v8/test/webkit/fast/js/kde/exceptions.js
+++ b/deps/v8/test/webkit/fast/js/kde/exceptions.js
@@ -124,4 +124,4 @@ testThrow2();
testReferenceError();
testFunctionError();
testMathFunctionError();
-testWhileAbortion(); \ No newline at end of file
+testWhileAbortion();
diff --git a/deps/v8/test/webkit/fast/js/kde/func-decl.js b/deps/v8/test/webkit/fast/js/kde/func-decl.js
index b046b22bc9..672aeca719 100644
--- a/deps/v8/test/webkit/fast/js/kde/func-decl.js
+++ b/deps/v8/test/webkit/fast/js/kde/func-decl.js
@@ -65,4 +65,4 @@ function test() {
}
}
-test(); \ No newline at end of file
+test();
diff --git a/deps/v8/test/webkit/fast/js/kde/inbuilt_function_proto-expected.txt b/deps/v8/test/webkit/fast/js/kde/inbuilt_function_proto-expected.txt
index 99818c3cb5..8fc28af4b2 100644
--- a/deps/v8/test/webkit/fast/js/kde/inbuilt_function_proto-expected.txt
+++ b/deps/v8/test/webkit/fast/js/kde/inbuilt_function_proto-expected.txt
@@ -47,6 +47,7 @@ PASS String.prototype.charCodeAt.__proto__ is Function.prototype
PASS String.prototype.indexOf.__proto__ is Function.prototype
PASS String.prototype.lastIndexOf.__proto__ is Function.prototype
PASS String.prototype.match.__proto__ is Function.prototype
+PASS String.prototype.normalize.__proto__ is Function.prototype
PASS String.prototype.replace.__proto__ is Function.prototype
PASS String.prototype.search.__proto__ is Function.prototype
PASS String.prototype.slice.__proto__ is Function.prototype
diff --git a/deps/v8/test/webkit/fast/js/kde/inbuilt_function_proto.js b/deps/v8/test/webkit/fast/js/kde/inbuilt_function_proto.js
index de13b70855..cd2657a3c5 100644
--- a/deps/v8/test/webkit/fast/js/kde/inbuilt_function_proto.js
+++ b/deps/v8/test/webkit/fast/js/kde/inbuilt_function_proto.js
@@ -43,6 +43,7 @@ shouldBe("String.prototype.charCodeAt.__proto__","Function.prototype");
shouldBe("String.prototype.indexOf.__proto__","Function.prototype");
shouldBe("String.prototype.lastIndexOf.__proto__","Function.prototype");
shouldBe("String.prototype.match.__proto__","Function.prototype");
+shouldBe("String.prototype.normalize.__proto__","Function.prototype");
shouldBe("String.prototype.replace.__proto__","Function.prototype");
shouldBe("String.prototype.search.__proto__","Function.prototype");
shouldBe("String.prototype.slice.__proto__","Function.prototype");
@@ -114,4 +115,4 @@ shouldBe("Date.prototype.toGMTString.__proto__","Function.prototype");
shouldBe("RegExp.prototype.exec.__proto__","Function.prototype");
shouldBe("RegExp.prototype.test.__proto__","Function.prototype");
shouldBe("RegExp.prototype.toString.__proto__","Function.prototype");
-shouldBe("Error.prototype.toString.__proto__","Function.prototype"); \ No newline at end of file
+shouldBe("Error.prototype.toString.__proto__","Function.prototype");
diff --git a/deps/v8/test/webkit/fast/js/kde/iteration.js b/deps/v8/test/webkit/fast/js/kde/iteration.js
index 7004be054b..3f76cc180d 100644
--- a/deps/v8/test/webkit/fast/js/kde/iteration.js
+++ b/deps/v8/test/webkit/fast/js/kde/iteration.js
@@ -76,4 +76,4 @@ list = "";
for (var a = [1,2,3], length = a.length, i = 0; i < length; i++) {
list += a[i];
}
-shouldBe("list", "'123'"); \ No newline at end of file
+shouldBe("list", "'123'");
diff --git a/deps/v8/test/webkit/fast/js/kde/j-comment-3.js b/deps/v8/test/webkit/fast/js/kde/j-comment-3.js
index 2cf0a0f5de..e20d83a001 100644
--- a/deps/v8/test/webkit/fast/js/kde/j-comment-3.js
+++ b/deps/v8/test/webkit/fast/js/kde/j-comment-3.js
@@ -22,4 +22,4 @@
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
description("KDE JS Test");
-<!-- HTML comment (not ECMA) \ No newline at end of file
+<!-- HTML comment (not ECMA)
diff --git a/deps/v8/test/webkit/fast/js/kde/j-comment-4.js b/deps/v8/test/webkit/fast/js/kde/j-comment-4.js
index 4d23f3bc5b..314493f158 100644
--- a/deps/v8/test/webkit/fast/js/kde/j-comment-4.js
+++ b/deps/v8/test/webkit/fast/js/kde/j-comment-4.js
@@ -22,4 +22,4 @@
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
description("KDE JS Test");
---> end of HTML comment (not ECMA) \ No newline at end of file
+--> end of HTML comment (not ECMA)
diff --git a/deps/v8/test/webkit/fast/js/kde/literals.js b/deps/v8/test/webkit/fast/js/kde/literals.js
index 57cf8cd919..ca6f0544e6 100644
--- a/deps/v8/test/webkit/fast/js/kde/literals.js
+++ b/deps/v8/test/webkit/fast/js/kde/literals.js
@@ -35,4 +35,4 @@ shouldBe("0xFF", "255");
shouldBe("01", "1");
shouldBe("010", "8");
shouldBe("09", "9");
-shouldBe("019", "19"); \ No newline at end of file
+shouldBe("019", "19");
diff --git a/deps/v8/test/webkit/fast/js/kde/lval-exceptions.js b/deps/v8/test/webkit/fast/js/kde/lval-exceptions.js
index 32641f8d4d..14012b561d 100644
--- a/deps/v8/test/webkit/fast/js/kde/lval-exceptions.js
+++ b/deps/v8/test/webkit/fast/js/kde/lval-exceptions.js
@@ -71,4 +71,4 @@ function fnShouldNotThrow(f)
} catch (e) {
testFailed(f + " threw an exception " + e + " when no exception expected");
}
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/webkit/fast/js/kde/math.js b/deps/v8/test/webkit/fast/js/kde/math.js
index 3cbb1b285a..4f73809397 100644
--- a/deps/v8/test/webkit/fast/js/kde/math.js
+++ b/deps/v8/test/webkit/fast/js/kde/math.js
@@ -128,4 +128,4 @@ shouldBe("list","''");
Math.myprop=true; // adding a custom property to the math object (why not?)
list=""
for ( var i in Math ) { list += i + ','; }
-shouldBe("list","'myprop,'"); \ No newline at end of file
+shouldBe("list","'myprop,'");
diff --git a/deps/v8/test/webkit/fast/js/kde/md5-1.js b/deps/v8/test/webkit/fast/js/kde/md5-1.js
index 9c20c550cb..5df7af01f8 100644
--- a/deps/v8/test/webkit/fast/js/kde/md5-1.js
+++ b/deps/v8/test/webkit/fast/js/kde/md5-1.js
@@ -410,4 +410,4 @@ function MD5(entree)
return s;
}
-shouldBe("MD5('kde')", "'186cf28b76f2264e9fea8fcf91cb4f5d'"); \ No newline at end of file
+shouldBe("MD5('kde')", "'186cf28b76f2264e9fea8fcf91cb4f5d'");
diff --git a/deps/v8/test/webkit/fast/js/kde/md5-2.js b/deps/v8/test/webkit/fast/js/kde/md5-2.js
index fa591704c8..1b9d4aa628 100644
--- a/deps/v8/test/webkit/fast/js/kde/md5-2.js
+++ b/deps/v8/test/webkit/fast/js/kde/md5-2.js
@@ -240,4 +240,4 @@ function hexMD5w(str) { return binl2hex(coreMD5(strw2binl(str))) }
function b64MD5 (str) { return binl2b64(coreMD5( str2binl(str))) }
function b64MD5w(str) { return binl2b64(coreMD5(strw2binl(str))) }
/* Backward compatibility */
-function calcMD5(str) { return binl2hex(coreMD5( str2binl(str))) } \ No newline at end of file
+function calcMD5(str) { return binl2hex(coreMD5( str2binl(str))) }
diff --git a/deps/v8/test/webkit/fast/js/kde/object_prototype.js b/deps/v8/test/webkit/fast/js/kde/object_prototype.js
index 3b17cf6edc..8ffaf14671 100644
--- a/deps/v8/test/webkit/fast/js/kde/object_prototype.js
+++ b/deps/v8/test/webkit/fast/js/kde/object_prototype.js
@@ -98,4 +98,4 @@ shouldBe("myfunc.someproperty","4");
shouldBe("myfunc.propertyIsEnumerable('length')","false");
shouldBe("myfunc.propertyIsEnumerable('someproperty')","true");
shouldBe("checkEnumerable(myfunc,'length')","false");
-shouldBe("checkEnumerable(myfunc,'someproperty')","true"); \ No newline at end of file
+shouldBe("checkEnumerable(myfunc,'someproperty')","true");
diff --git a/deps/v8/test/webkit/fast/js/kde/object_prototype_tostring.js b/deps/v8/test/webkit/fast/js/kde/object_prototype_tostring.js
index 9814d8cea3..8b3ce98404 100644
--- a/deps/v8/test/webkit/fast/js/kde/object_prototype_tostring.js
+++ b/deps/v8/test/webkit/fast/js/kde/object_prototype_tostring.js
@@ -48,4 +48,4 @@ _error.toString = Object.prototype.toString;
shouldBe("_error.toString()","\"[object Error]\"");
var _function = new Function();
_function.toString = Object.prototype.toString;
-shouldBe("_function.toString()","\"[object Function]\""); \ No newline at end of file
+shouldBe("_function.toString()","\"[object Function]\"");
diff --git a/deps/v8/test/webkit/fast/js/kde/operators.js b/deps/v8/test/webkit/fast/js/kde/operators.js
index 185ae01eab..38e658b2bd 100644
--- a/deps/v8/test/webkit/fast/js/kde/operators.js
+++ b/deps/v8/test/webkit/fast/js/kde/operators.js
@@ -501,4 +501,4 @@ shouldBeTrue("'a' in { a:1, b:2 }");
// instanceof
// Those 2 lines don't parse in Netscape...
shouldBe("(new Boolean()) instanceof Boolean", "true");
-shouldBe("(new Boolean()) instanceof Number", "false"); \ No newline at end of file
+shouldBe("(new Boolean()) instanceof Number", "false");
diff --git a/deps/v8/test/webkit/fast/js/kde/parse.js b/deps/v8/test/webkit/fast/js/kde/parse.js
index ca85e7637c..94467f1061 100644
--- a/deps/v8/test/webkit/fast/js/kde/parse.js
+++ b/deps/v8/test/webkit/fast/js/kde/parse.js
@@ -63,4 +63,4 @@ shouldThrow("var f\\u00F7;");
shouldThrow("var \\u0030;");
shouldThrow("var test = { }; test.i= 0; test.i\\u002b= 1; test.i;");
-shouldBe("var test = { }; test.i= 0; test.i\u002b= 1; test.i;", "1"); \ No newline at end of file
+shouldBe("var test = { }; test.i= 0; test.i\u002b= 1; test.i;", "1");
diff --git a/deps/v8/test/webkit/fast/js/kde/prototype_length.js b/deps/v8/test/webkit/fast/js/kde/prototype_length.js
index 2936fa55a9..4eb888c3da 100644
--- a/deps/v8/test/webkit/fast/js/kde/prototype_length.js
+++ b/deps/v8/test/webkit/fast/js/kde/prototype_length.js
@@ -57,4 +57,4 @@ shouldBe("foundFunctionPrototypeLength","false");
var foundStringPrototypeLength = false;
for (i in String.prototype) { if (i == "length") foundStringPrototypeLength = true; }
-shouldBe("foundStringPrototypeLength","false"); \ No newline at end of file
+shouldBe("foundStringPrototypeLength","false");
diff --git a/deps/v8/test/webkit/fast/js/kde/prototype_proto.js b/deps/v8/test/webkit/fast/js/kde/prototype_proto.js
index f956d94cd7..386d503cdd 100644
--- a/deps/v8/test/webkit/fast/js/kde/prototype_proto.js
+++ b/deps/v8/test/webkit/fast/js/kde/prototype_proto.js
@@ -30,4 +30,4 @@ shouldBe("Boolean.prototype.__proto__","Object.prototype");
shouldBe("Number.prototype.__proto__","Object.prototype");
shouldBe("Date.prototype.__proto__","Object.prototype");
shouldBe("RegExp.prototype.__proto__","Object.prototype");
-shouldBe("Error.prototype.__proto__","Object.prototype"); \ No newline at end of file
+shouldBe("Error.prototype.__proto__","Object.prototype");
diff --git a/deps/v8/test/webkit/fast/js/kde/scope.js b/deps/v8/test/webkit/fast/js/kde/scope.js
index cead049be8..e628d477fb 100644
--- a/deps/v8/test/webkit/fast/js/kde/scope.js
+++ b/deps/v8/test/webkit/fast/js/kde/scope.js
@@ -55,4 +55,4 @@ function testForIn() {
}
testForIn();
-shouldBe("g", "'foo'"); //Before the eval, g was in outer scope, but not after! \ No newline at end of file
+shouldBe("g", "'foo'"); //Before the eval, g was in outer scope, but not after!
diff --git a/deps/v8/test/webkit/fast/js/kde/statements.js b/deps/v8/test/webkit/fast/js/kde/statements.js
index ecc6c321a4..cf8b2a97fe 100644
--- a/deps/v8/test/webkit/fast/js/kde/statements.js
+++ b/deps/v8/test/webkit/fast/js/kde/statements.js
@@ -96,4 +96,4 @@ function testSwitch4(v) {
return result;
};
-shouldBe("testSwitch4(0)", "'ab'"); \ No newline at end of file
+shouldBe("testSwitch4(0)", "'ab'");
diff --git a/deps/v8/test/webkit/fast/js/kde/var_decl_init.js b/deps/v8/test/webkit/fast/js/kde/var_decl_init.js
index 0bf4a9e273..c72ca4ed9d 100644
--- a/deps/v8/test/webkit/fast/js/kde/var_decl_init.js
+++ b/deps/v8/test/webkit/fast/js/kde/var_decl_init.js
@@ -181,4 +181,4 @@ shouldBe("overrideVar", "1");
var overrideVar2 = 1;
var overrideVar2 = 2;
-shouldBe("overrideVar2", "2"); \ No newline at end of file
+shouldBe("overrideVar2", "2");
diff --git a/deps/v8/test/webkit/fast/js/object-bad-time-expected.txt b/deps/v8/test/webkit/fast/js/object-bad-time-expected.txt
index b282bd8451..7b6b6ea73d 100644
--- a/deps/v8/test/webkit/fast/js/object-bad-time-expected.txt
+++ b/deps/v8/test/webkit/fast/js/object-bad-time-expected.txt
@@ -977,58 +977,57 @@ PASS "0,1,2,3,4" is "0,1,2,3,4"
PASS "0,1,2,3,4" is "0,1,2,3,4"
PASS "0,1,2,3,4" is "0,1,2,3,4"
Henceforth I will have a bad time.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL ouches should be 50. Was 0.
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS ouches is 50
PASS successfullyParsed is true
TEST COMPLETE
-
diff --git a/deps/v8/test/webkit/fast/js/object-bad-time.js b/deps/v8/test/webkit/fast/js/object-bad-time.js
index 2fdb2d17ce..45fb4dd292 100644
--- a/deps/v8/test/webkit/fast/js/object-bad-time.js
+++ b/deps/v8/test/webkit/fast/js/object-bad-time.js
@@ -36,7 +36,7 @@ function foo(haveABadTime) {
for (var i = 0; i < result.length; ++i) {
if (i == haveABadTime) {
debug("Henceforth I will have a bad time.");
- Cons.prototype.__defineSetter__("3", function() { debug("Ouch!"); ouches++; });
+ Cons.prototype.__defineSetter__("3", function() { ouches++; });
}
result[i] = i;
}
diff --git a/deps/v8/test/webkit/fast/js/object-slow-put-expected.txt b/deps/v8/test/webkit/fast/js/object-slow-put-expected.txt
index 901133b4b7..f20c4a59e8 100644
--- a/deps/v8/test/webkit/fast/js/object-slow-put-expected.txt
+++ b/deps/v8/test/webkit/fast/js/object-slow-put-expected.txt
@@ -26,108 +26,107 @@ Tests that defining a setter on the prototype of an object used for indexed stor
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL "0,1,2,3,4" should be 0,1,2,,4. Was 0,1,2,3,4.
-FAIL ouches should be 100. Was 0.
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS "0,1,2,,4" is "0,1,2,,4"
+PASS ouches is 100
PASS successfullyParsed is true
TEST COMPLETE
-
diff --git a/deps/v8/test/webkit/fast/js/object-slow-put.js b/deps/v8/test/webkit/fast/js/object-slow-put.js
index bf8234f209..49c20ba210 100644
--- a/deps/v8/test/webkit/fast/js/object-slow-put.js
+++ b/deps/v8/test/webkit/fast/js/object-slow-put.js
@@ -29,7 +29,7 @@ function Cons() {
}
var ouches = 0;
-Cons.prototype.__defineSetter__("3", function() { debug("Ouch!"); ouches++; });
+Cons.prototype.__defineSetter__("3", function() { ouches++; });
function foo() {
var result = new Cons();
diff --git a/deps/v8/test/webkit/fast/js/string-anchor.js b/deps/v8/test/webkit/fast/js/string-anchor.js
index f213b89156..d954928acc 100644
--- a/deps/v8/test/webkit/fast/js/string-anchor.js
+++ b/deps/v8/test/webkit/fast/js/string-anchor.js
@@ -50,4 +50,4 @@ shouldThrow("String.prototype.anchor.call(undefined)", '"TypeError: Type error"'
shouldThrow("String.prototype.anchor.call(null)", '"TypeError: Type error"');
// Check anchor.length.
-shouldBe("String.prototype.anchor.length", "1"); \ No newline at end of file
+shouldBe("String.prototype.anchor.length", "1");
diff --git a/deps/v8/test/webkit/fast/js/string-fontcolor.js b/deps/v8/test/webkit/fast/js/string-fontcolor.js
index 67f4ef2e07..17ca62ff5f 100644
--- a/deps/v8/test/webkit/fast/js/string-fontcolor.js
+++ b/deps/v8/test/webkit/fast/js/string-fontcolor.js
@@ -50,4 +50,4 @@ shouldThrow("String.prototype.fontcolor.call(undefined)", '"TypeError: Type erro
shouldThrow("String.prototype.fontcolor.call(null)", '"TypeError: Type error"');
// Check fontcolor.length.
-shouldBe("String.prototype.fontcolor.length", "1"); \ No newline at end of file
+shouldBe("String.prototype.fontcolor.length", "1");
diff --git a/deps/v8/test/webkit/fast/js/string-fontsize.js b/deps/v8/test/webkit/fast/js/string-fontsize.js
index e103e5e6bc..b3b48c5b35 100644
--- a/deps/v8/test/webkit/fast/js/string-fontsize.js
+++ b/deps/v8/test/webkit/fast/js/string-fontsize.js
@@ -53,4 +53,4 @@ shouldThrow("String.prototype.fontsize.call(undefined)", '"TypeError: Type error
shouldThrow("String.prototype.fontsize.call(null)", '"TypeError: Type error"');
// Check fontsize.length.
-shouldBe("String.prototype.fontsize.length", "1"); \ No newline at end of file
+shouldBe("String.prototype.fontsize.length", "1");
diff --git a/deps/v8/test/webkit/fast/js/string-link.js b/deps/v8/test/webkit/fast/js/string-link.js
index 8b96915be1..873e7af3b4 100644
--- a/deps/v8/test/webkit/fast/js/string-link.js
+++ b/deps/v8/test/webkit/fast/js/string-link.js
@@ -53,4 +53,4 @@ shouldThrow("String.prototype.link.call(undefined)", '"TypeError: Type error"');
shouldThrow("String.prototype.link.call(null)", '"TypeError: Type error"');
// Check link.length.
-shouldBe("String.prototype.link.length", "1"); \ No newline at end of file
+shouldBe("String.prototype.link.length", "1");
diff --git a/deps/v8/test/webkit/fast/js/toString-number.js b/deps/v8/test/webkit/fast/js/toString-number.js
index 2acb3d1d09..6280f41537 100644
--- a/deps/v8/test/webkit/fast/js/toString-number.js
+++ b/deps/v8/test/webkit/fast/js/toString-number.js
@@ -91,4 +91,4 @@ description("Test the conversion performed by the function Number.prototype.toSt
shouldBeEqualToString('Number.prototype.toString.call(' + number + ', 36)', stringBase36);
shouldBeEqualToString('Number.prototype.toString.call(new Number(' + number + '), 36)', stringBase36);
}
- successfullyParsed = true; \ No newline at end of file
+ successfullyParsed = true;
diff --git a/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt b/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
index 6bf5ae57ba..3e36c70615 100644
--- a/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
+++ b/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
@@ -28,12 +28,13 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS [1].toString() is '1'
PASS [1].toLocaleString() is 'toLocaleString'
-FAIL [1].toLocaleString() should be 1. Threw exception TypeError: Property 'toLocaleString' of object [object Number] is not a function
+FAIL [1].toLocaleString() should be 1. Threw exception TypeError: string is not a function
PASS [/r/].toString() is 'toString2'
PASS [/r/].toLocaleString() is 'toLocaleString2'
-FAIL [/r/].toLocaleString() should be toString2. Threw exception TypeError: Property 'toLocaleString' of object [object RegExp] is not a function
+FAIL [/r/].toLocaleString() should be toString2. Threw exception TypeError: string is not a function
PASS caught is true
PASS successfullyParsed is true
+
TEST COMPLETE
diff --git a/deps/v8/test/webkit/fast/regex/assertion.js b/deps/v8/test/webkit/fast/regex/assertion.js
index 3d980ce14d..d4c46f2928 100644
--- a/deps/v8/test/webkit/fast/regex/assertion.js
+++ b/deps/v8/test/webkit/fast/regex/assertion.js
@@ -107,4 +107,3 @@ shouldBe("regex23.exec('ax')", "null");
var regex24 = /(?=a|b?)c/;
shouldBe("regex24.exec('x')", "null");
-
diff --git a/deps/v8/test/webkit/fast/regex/constructor.js b/deps/v8/test/webkit/fast/regex/constructor.js
index 9d35245286..552d82e3ac 100644
--- a/deps/v8/test/webkit/fast/regex/constructor.js
+++ b/deps/v8/test/webkit/fast/regex/constructor.js
@@ -29,4 +29,3 @@ shouldBeTrue("re === RegExp(re)");
shouldBeTrue("re !== new RegExp(re)");
shouldThrow("re === RegExp(re,'i')");
shouldThrow("re !== new RegExp(re,'i')");
-
diff --git a/deps/v8/test/webkit/fast/regex/dotstar.js b/deps/v8/test/webkit/fast/regex/dotstar.js
index 20696377d4..883c4aacfe 100644
--- a/deps/v8/test/webkit/fast/regex/dotstar.js
+++ b/deps/v8/test/webkit/fast/regex/dotstar.js
@@ -157,4 +157,3 @@ shouldBe("regexp12.exec('first\\nblah2\\nblah3')", "['blah3']");
var regexp13 = /.*\n\d+.*/;
shouldBe("regexp13.exec('abc\\n123')", "['abc\\n123']");
-
diff --git a/deps/v8/test/webkit/fast/regex/parentheses.js b/deps/v8/test/webkit/fast/regex/parentheses.js
index 57a2830fe0..4d7a141d22 100644
--- a/deps/v8/test/webkit/fast/regex/parentheses.js
+++ b/deps/v8/test/webkit/fast/regex/parentheses.js
@@ -274,4 +274,3 @@ var regexp58 = /a|b(?:[^b])*?c/;
shouldBe("regexp58.exec('badbc')", "['a']");
var regexp59 = /(X(?:.(?!X))*?Y)|(Y(?:.(?!Y))*?Z)/g;
shouldBe("'Y aaa X Match1 Y aaa Y Match2 Z'.match(regexp59)", "['X Match1 Y','Y Match2 Z']");
-
diff --git a/deps/v8/test/webkit/function-dot-apply-replace-base.js b/deps/v8/test/webkit/function-dot-apply-replace-base.js
index ed0f84b978..e0494e54db 100644
--- a/deps/v8/test/webkit/function-dot-apply-replace-base.js
+++ b/deps/v8/test/webkit/function-dot-apply-replace-base.js
@@ -44,6 +44,3 @@ function bar() {
}
shouldBe("bar()", "3");
-
-
-
diff --git a/deps/v8/test/webkit/get-by-pname-non-final-object.js b/deps/v8/test/webkit/get-by-pname-non-final-object.js
index f891af680c..1c798d1d55 100644
--- a/deps/v8/test/webkit/get-by-pname-non-final-object.js
+++ b/deps/v8/test/webkit/get-by-pname-non-final-object.js
@@ -70,4 +70,3 @@ for (var i = 0; i < 100; ++i) {
shouldBe("foo(r)", "113");
shouldBe("foo(s)", "182");
}
-
diff --git a/deps/v8/test/webkit/get-by-pname-that-looks-like-a-patchable-get-by-val.js b/deps/v8/test/webkit/get-by-pname-that-looks-like-a-patchable-get-by-val.js
index d3e69b6dc9..a0b6ccf81e 100644
--- a/deps/v8/test/webkit/get-by-pname-that-looks-like-a-patchable-get-by-val.js
+++ b/deps/v8/test/webkit/get-by-pname-that-looks-like-a-patchable-get-by-val.js
@@ -40,5 +40,3 @@ function foo() {
}
shouldBe("foo()", "300");
-
-
diff --git a/deps/v8/test/webkit/get-by-pname.js b/deps/v8/test/webkit/get-by-pname.js
index 13a7c5d4a9..07afa168a4 100644
--- a/deps/v8/test/webkit/get-by-pname.js
+++ b/deps/v8/test/webkit/get-by-pname.js
@@ -45,4 +45,3 @@ for (var i = 0; i < 100; ++i) {
shouldBe("foo(r)", "113");
shouldBe("foo(s)", "182");
}
-
diff --git a/deps/v8/test/webkit/indexed-setter-on-global-object.js b/deps/v8/test/webkit/indexed-setter-on-global-object.js
index 8453206849..20c70c24f5 100644
--- a/deps/v8/test/webkit/indexed-setter-on-global-object.js
+++ b/deps/v8/test/webkit/indexed-setter-on-global-object.js
@@ -34,4 +34,3 @@ this.__defineSetter__(42, function(value) {
this[42] = "foo";
shouldBe("thingy", "\"foo\"");
-
diff --git a/deps/v8/test/webkit/instance-of-immediates-expected.txt b/deps/v8/test/webkit/instance-of-immediates-expected.txt
index a6f6cc5072..db58f00d30 100644
--- a/deps/v8/test/webkit/instance-of-immediates-expected.txt
+++ b/deps/v8/test/webkit/instance-of-immediates-expected.txt
@@ -27,11 +27,11 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS (1 instanceof 1) threw exception TypeError: Expecting a function in instanceof check, but got 1.
-PASS ({} instanceof 1) threw exception TypeError: Expecting a function in instanceof check, but got #<Object>.
-PASS (obj instanceof 1) threw exception TypeError: Expecting a function in instanceof check, but got #<Constructor>.
-PASS (1 instanceof {}) threw exception TypeError: Expecting a function in instanceof check, but got 1.
+PASS ({} instanceof 1) threw exception TypeError: Expecting a function in instanceof check, but got 1.
+PASS (obj instanceof 1) threw exception TypeError: Expecting a function in instanceof check, but got 1.
+PASS (1 instanceof {}) threw exception TypeError: Expecting a function in instanceof check, but got #<Object>.
PASS ({} instanceof {}) threw exception TypeError: Expecting a function in instanceof check, but got #<Object>.
-PASS (obj instanceof {}) threw exception TypeError: Expecting a function in instanceof check, but got #<Constructor>.
+PASS (obj instanceof {}) threw exception TypeError: Expecting a function in instanceof check, but got #<Object>.
PASS (1 instanceof Constructor) is false
PASS ({} instanceof Constructor) is false
PASS (obj instanceof Constructor) is true
diff --git a/deps/v8/test/webkit/new-array-double-with-holes.js b/deps/v8/test/webkit/new-array-double-with-holes.js
index 208fc91dbe..faf2bfbf3b 100644
--- a/deps/v8/test/webkit/new-array-double-with-holes.js
+++ b/deps/v8/test/webkit/new-array-double-with-holes.js
@@ -31,4 +31,3 @@ function foo(array, i) {
for (var i = 0; i < 100; ++i)
shouldBe("foo([, 1.5], 0)", "void 0");
-
diff --git a/deps/v8/test/webkit/regexp-in-and-foreach-handling.js b/deps/v8/test/webkit/regexp-in-and-foreach-handling.js
index a2430db7ba..f345c8f544 100644
--- a/deps/v8/test/webkit/regexp-in-and-foreach-handling.js
+++ b/deps/v8/test/webkit/regexp-in-and-foreach-handling.js
@@ -69,4 +69,3 @@ for (var i in tests) {
shouldBe('testInOperator(' + i + ')', tests[i][2]);
shouldBe('testForEachFunction(' + i + ')', tests[i][2]);
}
-
diff --git a/deps/v8/test/webkit/regexp-zero-length-alternatives.js b/deps/v8/test/webkit/regexp-zero-length-alternatives.js
index 6e18de08c7..c91f1c1217 100644
--- a/deps/v8/test/webkit/regexp-zero-length-alternatives.js
+++ b/deps/v8/test/webkit/regexp-zero-length-alternatives.js
@@ -272,4 +272,3 @@ shouldBe('emptyStr.match(re34)', '[""]');
shouldBe('s1.match(re34)', '[""]');
shouldBe('s2.match(re34)', '[""]');
shouldBe('s3.match(re34)', '[""]');
-
diff --git a/deps/v8/test/webkit/resolve-arguments-from-scope.js b/deps/v8/test/webkit/resolve-arguments-from-scope.js
index def71585df..b3900e7772 100644
--- a/deps/v8/test/webkit/resolve-arguments-from-scope.js
+++ b/deps/v8/test/webkit/resolve-arguments-from-scope.js
@@ -42,4 +42,3 @@ for (var i = 0; i < 100; ++i) {
shouldBe("foo(42).length", "1");
shouldBe("foo(42, 23)[1]", "23");
}
-
diff --git a/deps/v8/test/webkit/sort-with-side-effecting-comparisons.js b/deps/v8/test/webkit/sort-with-side-effecting-comparisons.js
index 5dda02fa55..6794e322f5 100644
--- a/deps/v8/test/webkit/sort-with-side-effecting-comparisons.js
+++ b/deps/v8/test/webkit/sort-with-side-effecting-comparisons.js
@@ -40,5 +40,3 @@ array.sort(function(a, b) {
});
testPassed("It worked.");
-
-
diff --git a/deps/v8/test/webkit/stack-unwinding.js b/deps/v8/test/webkit/stack-unwinding.js
index 8c737fa00d..0a898274a6 100644
--- a/deps/v8/test/webkit/stack-unwinding.js
+++ b/deps/v8/test/webkit/stack-unwinding.js
@@ -62,4 +62,3 @@ try {
} catch (e) {
testPassed("Exception thrown and caught");
}
-
diff --git a/deps/v8/test/webkit/string-replacement-outofmemory-expected.txt b/deps/v8/test/webkit/string-replacement-outofmemory-expected.txt
new file mode 100644
index 0000000000..68ac217966
--- /dev/null
+++ b/deps/v8/test/webkit/string-replacement-outofmemory-expected.txt
@@ -0,0 +1,23 @@
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/deps/v8/test/webkit/string-replacement-outofmemory.js b/deps/v8/test/webkit/string-replacement-outofmemory.js
new file mode 100644
index 0000000000..2b8e18a854
--- /dev/null
+++ b/deps/v8/test/webkit/string-replacement-outofmemory.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+description(
+'This tests that string replacement with a large replacement string causes an out-of-memory exception. See <a href="https://bugs.webkit.org/show_bug.cgi?id=102956">bug 102956</a> for more details.'
+);
+
+function createStringWithRepeatedChar(c, multiplicity) {
+ while (c.length < multiplicity)
+ c += c;
+ c = c.substring(0, multiplicity);
+ return c;
+}
+
+var x = "1";
+var y = "2";
+x = createStringWithRepeatedChar(x, 1 << 12);
+y = createStringWithRepeatedChar(y, (1 << 20) + 1);
+
+shouldThrow("x.replace(/\\d/g, y)", '"Error: Out of memory"');
+var successfullyParsed = true;
diff --git a/deps/v8/test/webkit/string-trim-expected.txt b/deps/v8/test/webkit/string-trim-expected.txt
index 9540f1c8db..6472f89d0b 100644
--- a/deps/v8/test/webkit/string-trim-expected.txt
+++ b/deps/v8/test/webkit/string-trim-expected.txt
@@ -89,20 +89,38 @@ PASS whitespace[19].s.trimRight() is ''
PASS whitespace[20].s.trim() is ''
PASS whitespace[20].s.trimLeft() is ''
PASS whitespace[20].s.trimRight() is ''
-PASS whitespace[21].s.trim() is ''
-PASS whitespace[21].s.trimLeft() is ''
-PASS whitespace[21].s.trimRight() is ''
-PASS wsString.trim() is ''
-PASS wsString.trimLeft() is ''
-PASS wsString.trimRight() is ''
-PASS trimString.trim() is testString
-PASS trimString.trimLeft() is leftTrimString
-PASS trimString.trimRight() is rightTrimString
-PASS leftTrimString.trim() is testString
+FAIL whitespace[21].s.trim() should be . Was ​.
+FAIL whitespace[21].s.trimLeft() should be . Was ​.
+FAIL whitespace[21].s.trimRight() should be . Was ​.
+FAIL wsString.trim() should be . Was ​.
+FAIL wsString.trimLeft() should be . Was ​.
+FAIL wsString.trimRight() should be . Was
+
+              

​.
+FAIL trimString.trim() should be foo bar. Was ​foo bar
+
+              

​.
+FAIL trimString.trimLeft() should be foo bar
+
+              

​. Was ​foo bar
+
+              

​.
+FAIL trimString.trimRight() should be
+
+              

​foo bar. Was
+
+              

​foo bar
+
+              

​.
+FAIL leftTrimString.trim() should be foo bar. Was foo bar
+
+              

​.
PASS leftTrimString.trimLeft() is leftTrimString
-PASS leftTrimString.trimRight() is testString
-PASS rightTrimString.trim() is testString
-PASS rightTrimString.trimLeft() is testString
+FAIL leftTrimString.trimRight() should be foo bar. Was foo bar
+
+              

​.
+FAIL rightTrimString.trim() should be foo bar. Was ​foo bar.
+FAIL rightTrimString.trimLeft() should be foo bar. Was ​foo bar.
PASS rightTrimString.trimRight() is rightTrimString
PASS trim.call(0) is '0'
PASS trimLeft.call(0) is '0'
diff --git a/deps/v8/test/webkit/string-trim.js b/deps/v8/test/webkit/string-trim.js
index e644754736..27f99d579a 100644
--- a/deps/v8/test/webkit/string-trim.js
+++ b/deps/v8/test/webkit/string-trim.js
@@ -92,4 +92,3 @@ for (var i = 0; i < testValues.length; i++) {
shouldBe("trimLeft.call("+testValues[i]+")", "'"+eval(testValues[i])+"'");
shouldBe("trimRight.call("+testValues[i]+")", "'"+eval(testValues[i])+"'");
}
-
diff --git a/deps/v8/test/webkit/throw-from-finally.js b/deps/v8/test/webkit/throw-from-finally.js
index baa4ae8f49..183245d8d1 100644
--- a/deps/v8/test/webkit/throw-from-finally.js
+++ b/deps/v8/test/webkit/throw-from-finally.js
@@ -78,4 +78,3 @@ try {
}
shouldBe("\"\" + events", "\"1:try,1:finally,2:finally,2:thingy,3:thingy,3:finally,4:thingy,4:finally,4:another thingy,5:hi,5:wat\"");
-
diff --git a/deps/v8/test/webkit/try-catch-try-try-catch-try-finally-return-catch-finally.js b/deps/v8/test/webkit/try-catch-try-try-catch-try-finally-return-catch-finally.js
index 140ba7fcf2..1547bdf0b1 100644
--- a/deps/v8/test/webkit/try-catch-try-try-catch-try-finally-return-catch-finally.js
+++ b/deps/v8/test/webkit/try-catch-try-try-catch-try-finally-return-catch-finally.js
@@ -50,4 +50,3 @@ try {
} catch (e) {
testPassed("It worked.");
}
-
diff --git a/deps/v8/test/webkit/try-try-return-finally-finally.js b/deps/v8/test/webkit/try-try-return-finally-finally.js
index 8eef64252d..4861824299 100644
--- a/deps/v8/test/webkit/try-try-return-finally-finally.js
+++ b/deps/v8/test/webkit/try-try-return-finally-finally.js
@@ -45,4 +45,3 @@ try {
} catch (e) {
testPassed("It worked.");
}
-
diff --git a/deps/v8/test/webkit/var-declarations-zero-width.js b/deps/v8/test/webkit/var-declarations-zero-width.js
index 8b5f90f2c9..e7b7c455dc 100644
--- a/deps/v8/test/webkit/var-declarations-zero-width.js
+++ b/deps/v8/test/webkit/var-declarations-zero-width.js
@@ -25,4 +25,4 @@ description("This page tests if U+200C and U+200D are allowed as part of an iden
shouldBe("var x\u200c = 42; x\u200c", "42");
shouldBe("var x\u200d = 43; x\u200d", "43");
-shouldBe("var x\u200c\u200d = 44; x\u200c\u200d", "44"); \ No newline at end of file
+shouldBe("var x\u200c\u200d = 44; x\u200c\u200d", "44");
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index eba1be3f0f..1c68070d42 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -26,6 +26,18 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
[
+[ALWAYS, {
+ # BUG(237872). TODO(bmeurer): Investigate.
+ 'string-replacement-outofmemory': [FAIL],
+
+ ##############################################################################
+ # Flaky tests.
+ # BUG(v8:2989).
+ 'dfg-inline-arguments-become-double': [PASS, FAIL],
+ 'dfg-inline-arguments-become-int32': [PASS, FAIL],
+ 'dfg-inline-arguments-reset': [PASS, FAIL],
+ 'dfg-inline-arguments-reset-changetype': [PASS, FAIL],
+}], # ALWAYS
['mode == debug', {
# Too slow in debug mode.
'dfg-int-overflow-in-loop': [SKIP],
@@ -33,4 +45,10 @@
'reentrant-caching': [SKIP],
'sort-large-array': [SKIP],
}], # 'mode == debug'
+['simulator', {
+ 'function-apply-aliased': [SKIP],
+}], # 'simulator'
+['arch == a64 and simulator_run == True', {
+ 'dfg-int-overflow-in-loop': [SKIP],
+}], # 'arch == a64 and simulator_run == True'
]
diff --git a/deps/v8/tools/bash-completion.sh b/deps/v8/tools/bash-completion.sh
index 9f65c67731..6e324246d6 100755
--- a/deps/v8/tools/bash-completion.sh
+++ b/deps/v8/tools/bash-completion.sh
@@ -37,7 +37,7 @@ v8_source=$(readlink -f $(dirname $BASH_SOURCE)/..)
_v8_flag() {
local cur defines targets
cur="${COMP_WORDS[COMP_CWORD]}"
- defines=$(cat src/flag-definitions.h \
+ defines=$(cat $v8_source/src/flag-definitions.h \
| grep "^DEFINE" \
| grep -v "DEFINE_implication" \
| sed -e 's/_/-/g')
@@ -45,7 +45,7 @@ _v8_flag() {
| sed -ne 's/^DEFINE-[^(]*(\([^,]*\).*/--\1/p'; \
echo "$defines" \
| sed -ne 's/^DEFINE-bool(\([^,]*\).*/--no\1/p'; \
- cat src/d8.cc \
+ cat $v8_source/src/d8.cc \
| grep "strcmp(argv\[i\]" \
| sed -ne 's/^[^"]*"--\([^"]*\)".*/--\1/p')
COMPREPLY=($(compgen -W "$targets" -- "$cur"))
diff --git a/deps/v8/tools/blink_tests/TestExpectations b/deps/v8/tools/blink_tests/TestExpectations
index eec1d5a216..039a918a04 100644
--- a/deps/v8/tools/blink_tests/TestExpectations
+++ b/deps/v8/tools/blink_tests/TestExpectations
@@ -25,3 +25,6 @@ crbug.com/178745 [ Win Debug ] plugins/open-and-close-window-with-plugin.html [
[ Linux Debug ] fast/js/regress/function-dot-apply.html [ Slow ]
crbug.com/249894 [ Linux Debug ] fast/js/regress/inline-arguments-access.html [ Pass Failure Crash Slow ]
[ Linux Debug ] fast/js/regress/inline-arguments-local-escape.html [ Slow ]
+
+# This test is temporarily disabled in Blink, too.
+crbug.com/340639 fast/js/reserved-words-as-property.html [ Pass Failure ]
diff --git a/deps/v8/tools/consarray.js b/deps/v8/tools/consarray.js
index c67abb7971..dbce1de298 100644
--- a/deps/v8/tools/consarray.js
+++ b/deps/v8/tools/consarray.js
@@ -90,4 +90,3 @@ ConsArray.Cell = function(data, next) {
this.data = data;
this.next = next;
};
-
diff --git a/deps/v8/tools/cross_build_gcc.sh b/deps/v8/tools/cross_build_gcc.sh
new file mode 100755
index 0000000000..b9e87280db
--- /dev/null
+++ b/deps/v8/tools/cross_build_gcc.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+if [ "$#" -lt 1 ]; then
+ echo "Usage: tools/cross_build_gcc.sh <GCC prefix> [make arguments ...]"
+ exit 1
+fi
+
+export CXX=$1g++
+export AR=$1ar
+export RANLIB=$1ranlib
+export CC=$1gcc
+export LD=$1g++
+
+OK=1
+if [ ! -x "$CXX" ]; then
+ echo "Error: $CXX does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$AR" ]; then
+ echo "Error: $AR does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$RANLIB" ]; then
+ echo "Error: $RANLIB does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$CC" ]; then
+ echo "Error: $CC does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$LD" ]; then
+ echo "Error: $LD does not exist or is not executable."
+ OK=0
+fi
+if [ $OK -ne 1 ]; then
+ exit 1
+fi
+
+shift
+make snapshot=off $@
diff --git a/deps/v8/tools/draw_instruction_graph.sh b/deps/v8/tools/draw_instruction_graph.sh
new file mode 100755
index 0000000000..7d502d8160
--- /dev/null
+++ b/deps/v8/tools/draw_instruction_graph.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script reads in CSV formatted instruction data, and draws a stacked
+# graph in png format.
+
+defaultfile=a64_inst.csv
+defaultout=a64_inst.png
+gnuplot=/usr/bin/gnuplot
+
+
+# File containing CSV instruction data from simulator.
+file=${1:-$defaultfile}
+
+# Output graph png file.
+out=${2:-$defaultout}
+
+# Check input file exists.
+if [ ! -e $file ]; then
+ echo "Input file not found: $file."
+ echo "Usage: draw_instruction_graph.sh <input csv> <output png>"
+ exit 1
+fi
+
+# Search for an error message, and if found, exit.
+error=`grep -m1 '# Error:' $file`
+if [ -n "$error" ]; then
+ echo "Error message in input file:"
+ echo " $error"
+ exit 2
+fi
+
+# Sample period - period over which numbers for each category of instructions is
+# counted.
+sp=`grep -m1 '# sample_period=' $file | cut -d= -f2`
+
+# Get number of counters in the CSV file.
+nc=`grep -m1 '# counters=' $file | cut -d= -f2`
+
+# Find the annotation arrows. They appear as comments in the CSV file, in the
+# format:
+# # xx @ yyyyy
+# Where xx is a two character annotation identifier, and yyyyy is the
+# position in the executed instruction stream that generated the annotation.
+# Turn these locations into labelled arrows.
+arrows=`sed '/^[^#]/ d' $file | \
+ perl -pe "s/^# .. @ (\d+)/set arrow from \1, graph 0.9 to \1, $sp/"`;
+labels=`sed '/^[^#]/d' $file | \
+ sed -r 's/^# (..) @ (.+)/set label at \2, graph 0.9 "\1" \
+ center offset 0,0.5 font "FreeSans, 8"/'`;
+
+# Check for gnuplot, and warn if not available.
+if [ ! -e $gnuplot ]; then
+ echo "Can't find gnuplot at $gnuplot."
+ echo "Gnuplot version 4.6.3 or later required."
+ exit 3
+fi
+
+# Initialise gnuplot, and give it the data to draw.
+echo | $gnuplot <<EOF
+$arrows
+$labels
+MAXCOL=$nc
+set term png size 1920, 800 #ffffff
+set output '$out'
+set datafile separator ','
+set xtics font 'FreeSans, 10'
+set xlabel 'Instructions' font 'FreeSans, 10'
+set ytics font 'FreeSans, 10'
+set yrange [0:*]
+set key outside font 'FreeSans, 8'
+
+set style line 2 lc rgb '#800000'
+set style line 3 lc rgb '#d00000'
+set style line 4 lc rgb '#ff6000'
+set style line 5 lc rgb '#ffc000'
+set style line 6 lc rgb '#ffff00'
+
+set style line 7 lc rgb '#ff00ff'
+set style line 8 lc rgb '#ffc0ff'
+
+set style line 9 lc rgb '#004040'
+set style line 10 lc rgb '#008080'
+set style line 11 lc rgb '#40c0c0'
+set style line 12 lc rgb '#c0f0f0'
+
+set style line 13 lc rgb '#004000'
+set style line 14 lc rgb '#008000'
+set style line 15 lc rgb '#40c040'
+set style line 16 lc rgb '#c0f0c0'
+
+set style line 17 lc rgb '#2020f0'
+set style line 18 lc rgb '#6060f0'
+set style line 19 lc rgb '#a0a0f0'
+
+set style line 20 lc rgb '#000000'
+set style line 21 lc rgb '#ffffff'
+
+plot for [i=2:MAXCOL] '$file' using 1:(sum [col=i:MAXCOL] column(col)) \
+title columnheader(i) with filledcurve y1=0 ls i
+EOF
+
+
+
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 1a4f22195b..de35c2e670 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -100,6 +100,8 @@ consts_misc = [
{ 'name': 'off_fp_context',
'value': 'StandardFrameConstants::kContextOffset' },
+ { 'name': 'off_fp_constant_pool',
+ 'value': 'StandardFrameConstants::kConstantPoolOffset' },
{ 'name': 'off_fp_marker',
'value': 'StandardFrameConstants::kMarkerOffset' },
{ 'name': 'off_fp_function',
diff --git a/deps/v8/tools/generate-trig-table.py b/deps/v8/tools/generate-trig-table.py
new file mode 100644
index 0000000000..c03cf73e2f
--- /dev/null
+++ b/deps/v8/tools/generate-trig-table.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is a utility for populating the lookup table for the
+# approximation of trigonometric functions.
+
+import sys, math
+
+SAMPLES = 1800
+
+TEMPLATE = """\
+// Copyright 2013 Google Inc. All Rights Reserved.
+
+// This file was generated from a python script.
+
+#include "v8.h"
+#include "trig-table.h"
+
+namespace v8 {
+namespace internal {
+
+ const double TrigonometricLookupTable::kSinTable[] =
+ { %(sine_table)s };
+ const double TrigonometricLookupTable::kCosXIntervalTable[] =
+ { %(cosine_table)s };
+ const int TrigonometricLookupTable::kSamples = %(samples)i;
+ const int TrigonometricLookupTable::kTableSize = %(table_size)i;
+ const double TrigonometricLookupTable::kSamplesOverPiHalf =
+ %(samples_over_pi_half)s;
+
+} } // v8::internal
+"""
+
+def main():
+ pi_half = math.pi / 2
+ interval = pi_half / SAMPLES
+ sin = []
+ cos_times_interval = []
+ table_size = SAMPLES + 2
+
+ for i in range(0, table_size):
+ sample = i * interval
+ sin.append(repr(math.sin(sample)))
+ cos_times_interval.append(repr(math.cos(sample) * interval))
+
+ output_file = sys.argv[1]
+ output = open(str(output_file), "w")
+ output.write(TEMPLATE % {
+ 'sine_table': ','.join(sin),
+ 'cosine_table': ','.join(cos_times_interval),
+ 'samples': SAMPLES,
+ 'table_size': table_size,
+ 'samples_over_pi_half': repr(SAMPLES / pi_half)
+ })
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index d09c042204..a5a2ae08a8 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -881,6 +881,19 @@ def FormatDisasmLine(start, heap, line):
if stack_slot:
marker = "=>"
code = AnnotateAddresses(heap, line[1])
+
+ # Compute the actual call target which the disassembler is too stupid
+ # to figure out (it adds the call offset to the disassembly offset rather
+ # than the absolute instruction address).
+ if heap.reader.arch == MD_CPU_ARCHITECTURE_X86:
+ if code.startswith("e8"):
+ words = code.split()
+ if len(words) > 6 and words[5] == "call":
+ offset = int(words[4] + words[3] + words[2] + words[1], 16)
+ target = (line_address + offset + 5) & 0xFFFFFFFF
+ code = code.replace(words[6], "0x%08x" % target)
+ # TODO(jkummerow): port this hack to ARM and x64.
+
return "%s%08x %08x: %s" % (marker, line_address, line[0], code)
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index f3097ef7d5..242ddf02b5 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -28,6 +28,7 @@
{
'variables': {
'v8_code': 1,
+ 'v8_random_seed%': 314159265,
},
'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
'targets': [
@@ -58,7 +59,6 @@
['component=="shared_library"', {
'type': '<(component)',
'sources': [
- '../../src/defaults.cc',
# Note: on non-Windows we still build this file so that gyp
# has some sources to link into the component.
'../../src/v8dll-main.cc',
@@ -113,10 +113,15 @@
'dependencies': [
'mksnapshot.<(v8_target_arch)#host',
'js2c#host',
+ 'generate_trig_table#host',
],
}, {
'toolsets': ['target'],
- 'dependencies': ['mksnapshot.<(v8_target_arch)', 'js2c'],
+ 'dependencies': [
+ 'mksnapshot.<(v8_target_arch)',
+ 'js2c',
+ 'generate_trig_table',
+ ],
}],
['component=="shared_library"', {
'defines': [
@@ -140,6 +145,7 @@
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/trig-table.cc',
'<(INTERMEDIATE_DIR)/snapshot.cc',
],
'actions': [
@@ -156,6 +162,11 @@
'--log-snapshot-positions',
'--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
],
+ 'conditions': [
+ ['v8_random_seed!=0', {
+ 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
+ }],
+ ],
},
'action': [
'<@(_inputs)',
@@ -177,15 +188,16 @@
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/trig-table.cc',
'../../src/snapshot-empty.cc',
],
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
- 'dependencies': ['js2c#host'],
+ 'dependencies': ['js2c#host', 'generate_trig_table#host'],
}, {
'toolsets': ['target'],
- 'dependencies': ['js2c'],
+ 'dependencies': ['js2c', 'generate_trig_table'],
}],
['component=="shared_library"', {
'defines': [
@@ -195,6 +207,32 @@
}],
]
},
+ { 'target_name': 'generate_trig_table',
+ 'type': 'none',
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ 'actions': [
+ {
+ 'action_name': 'generate',
+ 'inputs': [
+ '../../tools/generate-trig-table.py',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/trig-table.cc',
+ ],
+ 'action': [
+ 'python',
+ '../../tools/generate-trig-table.py',
+ '<@(_outputs)',
+ ],
+ },
+ ]
+ },
{
'target_name': 'v8_base.<(v8_target_arch)',
'type': 'static_library',
@@ -215,7 +253,6 @@
'../../src/allocation-tracker.h',
'../../src/api.cc',
'../../src/api.h',
- '../../src/apiutils.h',
'../../src/arguments.cc',
'../../src/arguments.h',
'../../src/assembler.cc',
@@ -294,14 +331,19 @@
'../../src/execution.h',
'../../src/extensions/externalize-string-extension.cc',
'../../src/extensions/externalize-string-extension.h',
+ '../../src/extensions/free-buffer-extension.cc',
+ '../../src/extensions/free-buffer-extension.h',
'../../src/extensions/gc-extension.cc',
'../../src/extensions/gc-extension.h',
'../../src/extensions/statistics-extension.cc',
'../../src/extensions/statistics-extension.h',
+ '../../src/extensions/trigger-failure-extension.cc',
+ '../../src/extensions/trigger-failure-extension.h',
'../../src/factory.cc',
'../../src/factory.h',
'../../src/fast-dtoa.cc',
'../../src/fast-dtoa.h',
+ '../../src/feedback-slots.h',
'../../src/fixed-dtoa.cc',
'../../src/fixed-dtoa.h',
'../../src/flag-definitions.h',
@@ -402,6 +444,13 @@
'../../src/jsregexp.cc',
'../../src/jsregexp.h',
'../../src/lazy-instance.h',
+ # TODO(jochen): move libplatform/ files to their own target.
+ '../../src/libplatform/default-platform.cc',
+ '../../src/libplatform/default-platform.h',
+ '../../src/libplatform/task-queue.cc',
+ '../../src/libplatform/task-queue.h',
+ '../../src/libplatform/worker-thread.cc',
+ '../../src/libplatform/worker-thread.h',
'../../src/list-inl.h',
'../../src/list.h',
'../../src/lithium-allocator-inl.h',
@@ -597,6 +646,53 @@
'../../src/arm/stub-cache-arm.cc',
],
}],
+ ['v8_target_arch=="a64"', {
+ 'sources': [ ### gcmole(arch:a64) ###
+ '../../src/a64/assembler-a64.cc',
+ '../../src/a64/assembler-a64.h',
+ '../../src/a64/assembler-a64-inl.h',
+ '../../src/a64/builtins-a64.cc',
+ '../../src/a64/codegen-a64.cc',
+ '../../src/a64/codegen-a64.h',
+ '../../src/a64/code-stubs-a64.cc',
+ '../../src/a64/code-stubs-a64.h',
+ '../../src/a64/constants-a64.h',
+ '../../src/a64/cpu-a64.cc',
+ '../../src/a64/cpu-a64.h',
+ '../../src/a64/debug-a64.cc',
+ '../../src/a64/debugger-a64.cc',
+ '../../src/a64/debugger-a64.h',
+ '../../src/a64/decoder-a64.cc',
+ '../../src/a64/decoder-a64.h',
+ '../../src/a64/deoptimizer-a64.cc',
+ '../../src/a64/disasm-a64.cc',
+ '../../src/a64/disasm-a64.h',
+ '../../src/a64/frames-a64.cc',
+ '../../src/a64/frames-a64.h',
+ '../../src/a64/full-codegen-a64.cc',
+ '../../src/a64/ic-a64.cc',
+ '../../src/a64/instructions-a64.cc',
+ '../../src/a64/instructions-a64.h',
+ '../../src/a64/instrument-a64.cc',
+ '../../src/a64/instrument-a64.h',
+ '../../src/a64/lithium-a64.cc',
+ '../../src/a64/lithium-a64.h',
+ '../../src/a64/lithium-codegen-a64.cc',
+ '../../src/a64/lithium-codegen-a64.h',
+ '../../src/a64/lithium-gap-resolver-a64.cc',
+ '../../src/a64/lithium-gap-resolver-a64.h',
+ '../../src/a64/macro-assembler-a64.cc',
+ '../../src/a64/macro-assembler-a64.h',
+ '../../src/a64/macro-assembler-a64-inl.h',
+ '../../src/a64/regexp-macro-assembler-a64.cc',
+ '../../src/a64/regexp-macro-assembler-a64.h',
+ '../../src/a64/simulator-a64.cc',
+ '../../src/a64/simulator-a64.h',
+ '../../src/a64/stub-cache-a64.cc',
+ '../../src/a64/utils-a64.cc',
+ '../../src/a64/utils-a64.h',
+ ],
+ }],
['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', {
'sources': [ ### gcmole(arch:ia32) ###
'../../src/ia32/assembler-ia32-inl.h',
@@ -762,6 +858,43 @@
],
},
],
+ ['OS=="qnx"', {
+ 'link_settings': {
+ 'target_conditions': [
+ ['_toolset=="host" and host_os=="linux"', {
+ 'libraries': [
+ '-lrt'
+ ],
+ }],
+ ['_toolset=="target"', {
+ 'libraries': [
+ '-lbacktrace', '-lsocket'
+ ],
+ }],
+ ],
+ },
+ 'sources': [
+ '../../src/platform-posix.cc',
+ ],
+ 'target_conditions': [
+ ['_toolset=="host" and host_os=="linux"', {
+ 'sources': [
+ '../../src/platform-linux.cc'
+ ],
+ }],
+ ['_toolset=="host" and host_os=="mac"', {
+ 'sources': [
+ '../../src/platform-macos.cc'
+ ],
+ }],
+ ['_toolset=="target"', {
+ 'sources': [
+ '../../src/platform-qnx.cc'
+ ],
+ }],
+ ],
+ },
+ ],
['OS=="freebsd"', {
'link_settings': {
'libraries': [
@@ -859,10 +992,6 @@
'BUILDING_V8_SHARED',
'V8_SHARED',
],
- }, {
- 'sources': [
- '../../src/defaults.cc',
- ],
}],
['v8_postmortem_support=="true"', {
'sources': [
@@ -885,6 +1014,12 @@
'<(icu_gyp_path):icudata',
],
}],
+ ['v8_use_default_platform==0', {
+ 'sources!': [
+ '../../src/default-platform.cc',
+ '../../src/default-platform.h',
+ ],
+ }],
],
},
{
@@ -934,6 +1069,7 @@
'../../src/proxy.js',
'../../src/collection.js',
'../../src/object-observe.js',
+ '../../src/promise.js',
'../../src/generator.js',
'../../src/array-iterator.js',
'../../src/harmony-string.js',
@@ -1036,31 +1172,5 @@
}],
],
},
- {
- 'target_name': 'v8_shell',
- 'type': 'executable',
- 'dependencies': [
- 'v8'
- ],
- 'sources': [
- '../../samples/shell.cc',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
- }, {
- 'toolsets': ['target'],
- }],
- ['OS=="win"', {
- # This could be gotten by not setting chromium_code, if that's OK.
- 'defines': ['_CRT_SECURE_NO_WARNINGS'],
- }],
- ['v8_compress_startup_data=="bz2"', {
- 'libraries': [
- '-lbz2',
- ]
- }],
- ],
- },
],
}
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index 9492b0030c..f67d053ad2 100644..100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -116,41 +116,47 @@ def ExpandConstants(lines, constants):
return lines
+def ExpandMacroDefinition(lines, pos, name_pattern, macro, expander):
+ pattern_match = name_pattern.search(lines, pos)
+ while pattern_match is not None:
+ # Scan over the arguments
+ height = 1
+ start = pattern_match.start()
+ end = pattern_match.end()
+ assert lines[end - 1] == '('
+ last_match = end
+ arg_index = [0] # Wrap state into array, to work around Python "scoping"
+ mapping = { }
+ def add_arg(str):
+ # Remember to expand recursively in the arguments
+ replacement = expander(str.strip())
+ mapping[macro.args[arg_index[0]]] = replacement
+ arg_index[0] += 1
+ while end < len(lines) and height > 0:
+ # We don't count commas at higher nesting levels.
+ if lines[end] == ',' and height == 1:
+ add_arg(lines[last_match:end])
+ last_match = end + 1
+ elif lines[end] in ['(', '{', '[']:
+ height = height + 1
+ elif lines[end] in [')', '}', ']']:
+ height = height - 1
+ end = end + 1
+ # Remember to add the last match.
+ add_arg(lines[last_match:end-1])
+ result = macro.expand(mapping)
+ # Replace the occurrence of the macro with the expansion
+ lines = lines[:start] + result + lines[end:]
+ pattern_match = name_pattern.search(lines, start + len(result))
+ return lines
+
def ExpandMacros(lines, macros):
# We allow macros to depend on the previously declared macros, but
# we don't allow self-dependecies or recursion.
for name_pattern, macro in reversed(macros):
- pattern_match = name_pattern.search(lines, 0)
- while pattern_match is not None:
- # Scan over the arguments
- height = 1
- start = pattern_match.start()
- end = pattern_match.end()
- assert lines[end - 1] == '('
- last_match = end
- arg_index = [0] # Wrap state into array, to work around Python "scoping"
- mapping = { }
- def add_arg(str):
- # Remember to expand recursively in the arguments
- replacement = ExpandMacros(str.strip(), macros)
- mapping[macro.args[arg_index[0]]] = replacement
- arg_index[0] += 1
- while end < len(lines) and height > 0:
- # We don't count commas at higher nesting levels.
- if lines[end] == ',' and height == 1:
- add_arg(lines[last_match:end])
- last_match = end + 1
- elif lines[end] in ['(', '{', '[']:
- height = height + 1
- elif lines[end] in [')', '}', ']']:
- height = height - 1
- end = end + 1
- # Remember to add the last match.
- add_arg(lines[last_match:end-1])
- result = macro.expand(mapping)
- # Replace the occurrence of the macro with the expansion
- lines = lines[:start] + result + lines[end:]
- pattern_match = name_pattern.search(lines, start + len(result))
+ def expander(s):
+ return ExpandMacros(s, macros)
+ lines = ExpandMacroDefinition(lines, 0, name_pattern, macro, expander)
return lines
class TextMacro:
@@ -210,6 +216,34 @@ def ReadMacros(lines):
raise ("Illegal line: " + line)
return (constants, macros)
+INLINE_MACRO_PATTERN = re.compile(r'macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*\n')
+INLINE_MACRO_END_PATTERN = re.compile(r'endmacro\s*\n')
+
+def ExpandInlineMacros(lines, filename):
+ pos = 0
+ while True:
+ macro_match = INLINE_MACRO_PATTERN.search(lines, pos)
+ if macro_match is None:
+ # no more macros
+ return lines
+ name = macro_match.group(1)
+ args = [match.strip() for match in macro_match.group(2).split(',')]
+ end_macro_match = INLINE_MACRO_END_PATTERN.search(lines, macro_match.end());
+ if end_macro_match is None:
+ raise ("Macro %s unclosed in %s" % (name, filename))
+ body = lines[macro_match.end():end_macro_match.start()]
+
+ # remove macro definition
+ lines = lines[:macro_match.start()] + lines[end_macro_match.end():]
+ name_pattern = re.compile("\\b%s\\(" % name)
+ macro = TextMacro(args, body)
+
+ # advance position to where the macro defintion was
+ pos = macro_match.start()
+
+ def non_expander(s):
+ return s
+ lines = ExpandMacroDefinition(lines, pos, name_pattern, macro, non_expander)
HEADER_TEMPLATE = """\
// Copyright 2011 Google Inc. All Rights Reserved.
@@ -325,6 +359,8 @@ def JS2C(source, target, env):
lines = ReadFile(filename)
lines = ExpandConstants(lines, consts)
lines = ExpandMacros(lines, macros)
+ lines = RemoveCommentsAndTrailingWhitespace(lines)
+ lines = ExpandInlineMacros(lines, filename)
Validate(lines, filename)
lines = minifier.JSMinify(lines)
id = (os.path.split(filename)[1])[:-3]
diff --git a/deps/v8/tools/lexer-shell.cc b/deps/v8/tools/lexer-shell.cc
new file mode 100644
index 0000000000..0610e7f70d
--- /dev/null
+++ b/deps/v8/tools/lexer-shell.cc
@@ -0,0 +1,267 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <assert.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string>
+#include <vector>
+#include "v8.h"
+
+#include "api.h"
+#include "ast.h"
+#include "char-predicates-inl.h"
+#include "messages.h"
+#include "platform.h"
+#include "runtime.h"
+#include "scanner-character-streams.h"
+#include "scopeinfo.h"
+#include "string-stream.h"
+#include "scanner.h"
+
+
+using namespace v8::internal;
+
+enum Encoding {
+ LATIN1,
+ UTF8,
+ UTF16
+};
+
+
+const byte* ReadFile(const char* name, Isolate* isolate,
+ int* size, int repeat) {
+ FILE* file = fopen(name, "rb");
+ *size = 0;
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int file_size = ftell(file);
+ rewind(file);
+
+ *size = file_size * repeat;
+
+ byte* chars = new byte[*size + 1];
+ for (int i = 0; i < file_size;) {
+ int read = static_cast<int>(fread(&chars[i], 1, file_size - i, file));
+ i += read;
+ }
+ fclose(file);
+
+ for (int i = file_size; i < *size; i++) {
+ chars[i] = chars[i - file_size];
+ }
+ chars[*size] = 0;
+
+ return chars;
+}
+
+
+class BaselineScanner {
+ public:
+ BaselineScanner(const char* fname,
+ Isolate* isolate,
+ Encoding encoding,
+ ElapsedTimer* timer,
+ int repeat)
+ : stream_(NULL) {
+ int length = 0;
+ source_ = ReadFile(fname, isolate, &length, repeat);
+ unicode_cache_ = new UnicodeCache();
+ scanner_ = new Scanner(unicode_cache_);
+ switch (encoding) {
+ case UTF8:
+ stream_ = new Utf8ToUtf16CharacterStream(source_, length);
+ break;
+ case UTF16: {
+ Handle<String> result = isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(source_),
+ length / 2));
+ stream_ =
+ new GenericStringUtf16CharacterStream(result, 0, result->length());
+ break;
+ }
+ case LATIN1: {
+ Handle<String> result = isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(source_, length));
+ stream_ =
+ new GenericStringUtf16CharacterStream(result, 0, result->length());
+ break;
+ }
+ }
+ timer->Start();
+ scanner_->Initialize(stream_);
+ }
+
+ ~BaselineScanner() {
+ delete scanner_;
+ delete stream_;
+ delete unicode_cache_;
+ delete[] source_;
+ }
+
+ Token::Value Next(int* beg_pos, int* end_pos) {
+ Token::Value res = scanner_->Next();
+ *beg_pos = scanner_->location().beg_pos;
+ *end_pos = scanner_->location().end_pos;
+ return res;
+ }
+
+ private:
+ UnicodeCache* unicode_cache_;
+ Scanner* scanner_;
+ const byte* source_;
+ BufferedUtf16CharacterStream* stream_;
+};
+
+
+struct TokenWithLocation {
+ Token::Value value;
+ size_t beg;
+ size_t end;
+ TokenWithLocation() : value(Token::ILLEGAL), beg(0), end(0) { }
+ TokenWithLocation(Token::Value value, size_t beg, size_t end) :
+ value(value), beg(beg), end(end) { }
+ bool operator==(const TokenWithLocation& other) {
+ return value == other.value && beg == other.beg && end == other.end;
+ }
+ bool operator!=(const TokenWithLocation& other) {
+ return !(*this == other);
+ }
+ void Print(const char* prefix) const {
+ printf("%s %11s at (%d, %d)\n",
+ prefix, Token::Name(value),
+ static_cast<int>(beg), static_cast<int>(end));
+ }
+};
+
+
+TimeDelta RunBaselineScanner(const char* fname,
+ Isolate* isolate,
+ Encoding encoding,
+ bool dump_tokens,
+ std::vector<TokenWithLocation>* tokens,
+ int repeat) {
+ ElapsedTimer timer;
+ BaselineScanner scanner(fname, isolate, encoding, &timer, repeat);
+ Token::Value token;
+ int beg, end;
+ do {
+ token = scanner.Next(&beg, &end);
+ if (dump_tokens) {
+ tokens->push_back(TokenWithLocation(token, beg, end));
+ }
+ } while (token != Token::EOS);
+ return timer.Elapsed();
+}
+
+
+void PrintTokens(const char* name,
+ const std::vector<TokenWithLocation>& tokens) {
+ printf("No of tokens: %d\n",
+ static_cast<int>(tokens.size()));
+ printf("%s:\n", name);
+ for (size_t i = 0; i < tokens.size(); ++i) {
+ tokens[i].Print("=>");
+ }
+}
+
+
+TimeDelta ProcessFile(
+ const char* fname,
+ Encoding encoding,
+ Isolate* isolate,
+ bool print_tokens,
+ int repeat) {
+ if (print_tokens) {
+ printf("Processing file %s\n", fname);
+ }
+ HandleScope handle_scope(isolate);
+ std::vector<TokenWithLocation> baseline_tokens;
+ TimeDelta baseline_time;
+ baseline_time = RunBaselineScanner(
+ fname, isolate, encoding, print_tokens,
+ &baseline_tokens, repeat);
+ if (print_tokens) {
+ PrintTokens("Baseline", baseline_tokens);
+ }
+ return baseline_time;
+}
+
+
+int main(int argc, char* argv[]) {
+ v8::V8::InitializeICU();
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ Encoding encoding = LATIN1;
+ bool print_tokens = false;
+ std::vector<std::string> fnames;
+ std::string benchmark;
+ int repeat = 1;
+ for (int i = 0; i < argc; ++i) {
+ if (strcmp(argv[i], "--latin1") == 0) {
+ encoding = LATIN1;
+ } else if (strcmp(argv[i], "--utf8") == 0) {
+ encoding = UTF8;
+ } else if (strcmp(argv[i], "--utf16") == 0) {
+ encoding = UTF16;
+ } else if (strcmp(argv[i], "--print-tokens") == 0) {
+ print_tokens = true;
+ } else if (strncmp(argv[i], "--benchmark=", 12) == 0) {
+ benchmark = std::string(argv[i]).substr(12);
+ } else if (strncmp(argv[i], "--repeat=", 9) == 0) {
+ std::string repeat_str = std::string(argv[i]).substr(9);
+ repeat = atoi(repeat_str.c_str());
+ } else if (i > 0 && argv[i][0] != '-') {
+ fnames.push_back(std::string(argv[i]));
+ }
+ }
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ ASSERT(!context.IsEmpty());
+ {
+ v8::Context::Scope scope(context);
+ Isolate* isolate = Isolate::Current();
+ double baseline_total = 0;
+ for (size_t i = 0; i < fnames.size(); i++) {
+ TimeDelta time;
+ time = ProcessFile(fnames[i].c_str(), encoding, isolate, print_tokens,
+ repeat);
+ baseline_total += time.InMillisecondsF();
+ }
+ if (benchmark.empty()) benchmark = "Baseline";
+ printf("%s(RunTime): %.f ms\n", benchmark.c_str(), baseline_total);
+ }
+ }
+ v8::V8::Dispose();
+ return 0;
+}
diff --git a/deps/v8/tools/lexer-shell.gyp b/deps/v8/tools/lexer-shell.gyp
new file mode 100644
index 0000000000..8e6ab7a844
--- /dev/null
+++ b/deps/v8/tools/lexer-shell.gyp
@@ -0,0 +1,57 @@
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ 'v8_enable_i18n_support%': 1,
+ },
+ 'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'lexer-shell',
+ 'type': 'executable',
+ 'dependencies': [
+ '../tools/gyp/v8.gyp:v8',
+ ],
+ 'conditions': [
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ],
+ }],
+ ],
+ 'include_dirs+': [
+ '../src',
+ ],
+ 'sources': [
+ 'lexer-shell.cc',
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/tools/merge-to-branch.sh b/deps/v8/tools/merge-to-branch.sh
index 260dc8a149..4e8a86c834 100755
--- a/deps/v8/tools/merge-to-branch.sh
+++ b/deps/v8/tools/merge-to-branch.sh
@@ -69,8 +69,9 @@ restore_patch_commit_hashes_if_unset() {
}
########## Option parsing
+REVERT_FROM_BLEEDING_EDGE=0
-while getopts ":hs:fp:rm:" OPTION ; do
+while getopts ":hs:fp:rm:R" OPTION ; do
case $OPTION in
h) usage
exit 0
@@ -85,6 +86,9 @@ while getopts ":hs:fp:rm:" OPTION ; do
;;
s) START_STEP=$OPTARG
;;
+ R) REVERSE_PATCH="--reverse"
+ REVERT_FROM_BLEEDING_EDGE=1
+ ;;
?) echo "Illegal option: -$OPTARG"
usage
exit 1
@@ -104,7 +108,8 @@ touch "$ALREADY_MERGING_SENTINEL_FILE"
initial_environment_checks
if [ $START_STEP -le $CURRENT_STEP ] ; then
- if [ ${#@} -lt 2 ] ; then
+ let MIN_EXPECTED_ARGS=2-$REVERT_FROM_BLEEDING_EDGE
+ if [ ${#@} -lt $MIN_EXPECTED_ARGS ] ; then
if [ -z "$EXTRA_PATCH" ] ; then
die "Either a patch file or revision numbers must be specified"
fi
@@ -113,9 +118,13 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
fi
fi
echo ">>> Step $CURRENT_STEP: Preparation"
- MERGE_TO_BRANCH=$1
- [[ -n "$MERGE_TO_BRANCH" ]] || die "Please specify a branch to merge to"
- shift
+ if [ $REVERT_FROM_BLEEDING_EDGE -eq 1 ] ; then
+ MERGE_TO_BRANCH="bleeding_edge"
+ else
+ MERGE_TO_BRANCH=$1
+ [[ -n "$MERGE_TO_BRANCH" ]] || die "Please specify a branch to merge to"
+ shift
+ fi
persist "MERGE_TO_BRANCH"
common_prepare
fi
@@ -130,10 +139,54 @@ fi
let CURRENT_STEP+=1
if [ $START_STEP -le $CURRENT_STEP ] ; then
+ echo ">>> Step $CURRENT_STEP: Search for corresponding architecture ports."
+ for REVISION in "$@" ; do
+ # Add the revision to the array if it isn't already added.
+ if [[ ! "${FULL_REVISION_LIST[@]}" =~ (^| )$REVISION($| ) ]] ; then
+ FULL_REVISION_LIST=("${FULL_REVISION_LIST[@]}" "$REVISION")
+ fi
+ # Search for commits which matches the "Port rXXX" pattern.
+ GIT_HASHES=$(git log svn/bleeding_edge --reverse \
+ --format=%H --grep="Port r$REVISION")
+ if [ -n "$GIT_HASHES" ]; then
+ while read -r NEXT_GIT_HASH; do
+ NEXT_SVN_REVISION=$(git svn find-rev $NEXT_GIT_HASH svn/bleeding_edge)
+ [[ -n "$NEXT_SVN_REVISION" ]] \
+ || die "Cannot determine svn revision for $NEXT_GIT_HASH"
+ FULL_REVISION_LIST=("${FULL_REVISION_LIST[@]}" "$NEXT_SVN_REVISION")
+ REVISION_TITLE=$(git log -1 --format=%s $NEXT_GIT_HASH)
+ # Is this revision included in the original revision list?
+ if [[ $@ =~ (^| )$NEXT_SVN_REVISION($| ) ]] ; then
+ echo "Found port of r$REVISION -> \
+r$NEXT_SVN_REVISION (already included): $REVISION_TITLE"
+ else
+ echo "Found port of r$REVISION -> \
+r$NEXT_SVN_REVISION: $REVISION_TITLE"
+ PORT_REVISION_LIST=("${PORT_REVISION_LIST[@]}" "$NEXT_SVN_REVISION")
+ fi
+ done <<< "$GIT_HASHES"
+ fi
+ done
+ # Next step expects a list, not an array.
+ FULL_REVISION_LIST="${FULL_REVISION_LIST[@]}"
+ # Do we find any port?
+ if [ ${#PORT_REVISION_LIST[@]} -ne 0 ] ; then
+ confirm "Automatically add corresponding ports (${PORT_REVISION_LIST[*]})?"
+ #: 'n': Restore the original revision list.
+ if [ $? -ne 0 ] ; then
+ FULL_REVISION_LIST="$@"
+ fi
+ fi
+ persist "FULL_REVISION_LIST"
+fi
+
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
echo ">>> Step $CURRENT_STEP: Find the git \
revisions associated with the patches."
+ restore_if_unset "FULL_REVISION_LIST"
current=0
- for REVISION in "$@" ; do
+ for REVISION in $FULL_REVISION_LIST ; do
NEXT_HASH=$(git svn find-rev "r$REVISION" svn/bleeding_edge)
[[ -n "$NEXT_HASH" ]] \
|| die "Cannot determine git hash for r$REVISION"
@@ -144,7 +197,11 @@ revisions associated with the patches."
done
if [ -n "$REVISION_LIST" ] ; then
if [ -n "$REVERSE_PATCH" ] ; then
- NEW_COMMIT_MSG="Rollback of$REVISION_LIST in $MERGE_TO_BRANCH branch."
+ if [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then
+ NEW_COMMIT_MSG="Rollback of$REVISION_LIST in $MERGE_TO_BRANCH branch."
+ else
+ NEW_COMMIT_MSG="Revert$REVISION_LIST."
+ fi
else
NEW_COMMIT_MSG="Merged$REVISION_LIST into $MERGE_TO_BRANCH branch."
fi;
@@ -166,6 +223,7 @@ revisions associated with the patches."
done
if [ -n "$BUG_AGGREGATE" ] ; then
echo "BUG=$BUG_AGGREGATE" >> $COMMITMSG_FILE
+ echo "LOG=N" >> $COMMITMSG_FILE
fi
persist "NEW_COMMIT_MSG"
persist "REVISION_LIST"
@@ -177,7 +235,6 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
echo ">>> Step $CURRENT_STEP: Apply patches for selected revisions."
restore_if_unset "MERGE_TO_BRANCH"
restore_patch_commit_hashes_if_unset "PATCH_COMMIT_HASHES"
- rm -f "$TOUCHED_FILES_FILE"
for HASH in ${PATCH_COMMIT_HASHES[@]} ; do
echo "Applying patch for $HASH to $MERGE_TO_BRANCH..."
git log -1 -p $HASH > "$TEMPORARY_PATCH_FILE"
@@ -189,14 +246,14 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
fi
let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
+if [ $START_STEP -le $CURRENT_STEP ] && [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then
echo ">>> Step $CURRENT_STEP: Prepare $VERSION_FILE."
# These version numbers are used again for creating the tag
read_and_persist_version
fi
let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
+if [ $START_STEP -le $CURRENT_STEP ] && [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then
echo ">>> Step $CURRENT_STEP: Increment version number."
restore_if_unset "PATCH"
NEWPATCH=$(($PATCH + 1))
@@ -229,12 +286,14 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
git checkout $BRANCHNAME \
|| die "cannot ensure that the current branch is $BRANCHNAME"
wait_for_lgtm
- PRESUBMIT_TREE_CHECK="skip" git cl dcommit \
+ PRESUBMIT_TREE_CHECK="skip" git cl presubmit \
+ || die "presubmit failed"
+ PRESUBMIT_TREE_CHECK="skip" git cl dcommit --bypass-hooks \
|| die "failed to commit to $MERGE_TO_BRANCH"
fi
let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
+if [ $START_STEP -le $CURRENT_STEP ] && [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then
echo ">>> Step $CURRENT_STEP: Determine svn commit revision"
restore_if_unset "NEW_COMMIT_MSG"
restore_if_unset "MERGE_TO_BRANCH"
@@ -248,7 +307,7 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
fi
let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
+if [ $START_STEP -le $CURRENT_STEP ] && [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then
echo ">>> Step $CURRENT_STEP: Create the tag."
restore_if_unset "SVN_REVISION"
restore_version_if_unset "NEW"
@@ -273,9 +332,11 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
restore_if_unset "REVISION_LIST"
restore_version_if_unset "NEW"
common_cleanup
- echo "*** SUMMARY ***"
- echo "version: $NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH"
- echo "branch: $TO_URL"
- echo "svn revision: $SVN_REVISION"
- [[ -n "$REVISION_LIST" ]] && echo "patches:$REVISION_LIST"
+ if [ $REVERT_FROM_BLEEDING_EDGE==0 ] ; then
+ echo "*** SUMMARY ***"
+ echo "version: $NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH"
+ echo "branch: $TO_URL"
+ echo "svn revision: $SVN_REVISION"
+ [[ -n "$REVISION_LIST" ]] && echo "patches:$REVISION_LIST"
+ fi
fi
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 1ab6347774..88f1459d73 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -144,8 +144,8 @@ class FileContentsCache(object):
try:
sums_file = open(self.sums_file_name, 'r')
self.sums = pickle.load(sums_file)
- except IOError:
- # File might not exist, this is OK.
+ except:
+ # Cannot parse pickle for any reason. Not much we can do about it.
pass
finally:
if sums_file:
@@ -155,6 +155,14 @@ class FileContentsCache(object):
try:
sums_file = open(self.sums_file_name, 'w')
pickle.dump(self.sums, sums_file)
+ except:
+ # Failed to write pickle. Try to clean-up behind us.
+ if sums_file:
+ sums_file.close()
+ try:
+ os.unlink(self.sums_file_name)
+ except:
+ pass
finally:
sums_file.close()
@@ -191,7 +199,8 @@ class SourceFileProcessor(object):
return True
def IgnoreDir(self, name):
- return name.startswith('.') or name == 'data' or name == 'sputniktests'
+ return (name.startswith('.') or
+ name in ('data', 'kraken', 'octane', 'sunspider'))
def IgnoreFile(self, name):
return name.startswith('.')
@@ -312,12 +321,8 @@ class SourceProcessor(SourceFileProcessor):
return ['.']
def IgnoreDir(self, name):
- return (super(SourceProcessor, self).IgnoreDir(name)
- or (name == 'third_party')
- or (name == 'gyp')
- or (name == 'out')
- or (name == 'obj')
- or (name == 'DerivedSources'))
+ return (super(SourceProcessor, self).IgnoreDir(name) or
+ name in ('third_party', 'gyp', 'out', 'obj', 'DerivedSources'))
IGNORE_COPYRIGHTS = ['cpplint.py',
'daemon.py',
@@ -365,6 +370,9 @@ class SourceProcessor(SourceFileProcessor):
else:
print "%s has trailing whitespaces in line %s." % (name, linenumbers)
result = False
+ if not contents.endswith('\n') or contents.endswith('\n\n'):
+ print "%s does not end with a single new line." % name
+ result = False
# Check two empty lines between declarations.
if name.endswith(".cc"):
line = 0
diff --git a/deps/v8/tools/push-to-trunk/auto_roll.py b/deps/v8/tools/push-to-trunk/auto_roll.py
new file mode 100755
index 0000000000..086f5a8abb
--- /dev/null
+++ b/deps/v8/tools/push-to-trunk/auto_roll.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import optparse
+import os
+import re
+import sys
+import urllib
+
+from common_includes import *
+import push_to_trunk
+from push_to_trunk import PushToTrunkOptions
+from push_to_trunk import RunPushToTrunk
+
+SETTINGS_LOCATION = "SETTINGS_LOCATION"
+
+CONFIG = {
+ PERSISTFILE_BASENAME: "/tmp/v8-auto-roll-tempfile",
+ DOT_GIT_LOCATION: ".git",
+ SETTINGS_LOCATION: "~/.auto-roll",
+}
+
+
+class AutoRollOptions(CommonOptions):
+ def __init__(self, options):
+ super(AutoRollOptions, self).__init__(options)
+ self.requires_editor = False
+ self.status_password = options.status_password
+ self.c = options.c
+ self.push = getattr(options, 'push', False)
+ self.author = getattr(options, 'a', None)
+
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
+
+ def RunStep(self):
+ self.InitialEnvironmentChecks()
+ self.CommonPrepare()
+
+
+class CheckAutoRollSettings(Step):
+ MESSAGE = "Checking settings file."
+
+ def RunStep(self):
+ settings_file = os.path.realpath(self.Config(SETTINGS_LOCATION))
+ if os.path.exists(settings_file):
+ settings_dict = json.loads(FileToText(settings_file))
+ if settings_dict.get("enable_auto_roll") is False:
+ self.Die("Push to trunk disabled by auto-roll settings file: %s"
+ % settings_file)
+
+
+class CheckTreeStatus(Step):
+ MESSAGE = "Checking v8 tree status message."
+
+ def RunStep(self):
+ status_url = "https://v8-status.appspot.com/current?format=json"
+ status_json = self.ReadURL(status_url, wait_plan=[5, 20, 300, 300])
+ message = json.loads(status_json)["message"]
+ if re.search(r"nopush|no push", message, flags=re.I):
+ self.Die("Push to trunk disabled by tree state: %s" % message)
+ self.Persist("tree_message", message)
+
+
+class FetchLatestRevision(Step):
+ MESSAGE = "Fetching latest V8 revision."
+
+ def RunStep(self):
+ log = self.Git("svn log -1 --oneline").strip()
+ match = re.match(r"^r(\d+) ", log)
+ if not match:
+ self.Die("Could not extract current svn revision from log.")
+ self.Persist("latest", match.group(1))
+
+
+class CheckLastPush(Step):
+ MESSAGE = "Checking last V8 push to trunk."
+
+ def RunStep(self):
+ self.RestoreIfUnset("latest")
+ log = self.Git("svn log -1 --oneline ChangeLog").strip()
+ match = re.match(r"^r(\d+) \| Prepare push to trunk", log)
+ if match:
+ latest = int(self._state["latest"])
+ last_push = int(match.group(1))
+ # TODO(machebach): This metric counts all revisions. It could be
+ # improved by counting only the revisions on bleeding_edge.
+ if latest - last_push < 10:
+ # This makes sure the script doesn't push twice in a row when the cron
+ # job retries several times.
+ self.Die("Last push too recently: %d" % last_push)
+
+
+class FetchLKGR(Step):
+ MESSAGE = "Fetching V8 LKGR."
+
+ def RunStep(self):
+ lkgr_url = "https://v8-status.appspot.com/lkgr"
+ # Retry several times since app engine might have issues.
+ self.Persist("lkgr", self.ReadURL(lkgr_url, wait_plan=[5, 20, 300, 300]))
+
+
+class PushToTrunk(Step):
+ MESSAGE = "Pushing to trunk if possible."
+
+ def PushTreeStatus(self, message):
+ if not self._options.status_password:
+ print "Skipping tree status update without password file."
+ return
+ params = {
+ "message": message,
+ "username": "v8-auto-roll@chromium.org",
+ "password": FileToText(self._options.status_password).strip(),
+ }
+ params = urllib.urlencode(params)
+ print "Pushing tree status: '%s'" % message
+ self.ReadURL("https://v8-status.appspot.com/status", params,
+ wait_plan=[5, 20])
+
+ def RunStep(self):
+ self.RestoreIfUnset("latest")
+ self.RestoreIfUnset("lkgr")
+ self.RestoreIfUnset("tree_message")
+ latest = int(self._state["latest"])
+ lkgr = int(self._state["lkgr"])
+ if latest == lkgr:
+ print "ToT (r%d) is clean. Pushing to trunk." % latest
+ self.PushTreeStatus("Tree is closed (preparing to push)")
+
+ # TODO(machenbach): Update the script before calling it.
+ try:
+ if self._options.push:
+ self._side_effect_handler.Call(
+ RunPushToTrunk,
+ push_to_trunk.CONFIG,
+ PushToTrunkOptions.MakeForcedOptions(self._options.author,
+ self._options.reviewer,
+ self._options.c),
+ self._side_effect_handler)
+ finally:
+ self.PushTreeStatus(self._state["tree_message"])
+ else:
+ print("ToT (r%d) is ahead of the LKGR (r%d). Skipping push to trunk."
+ % (latest, lkgr))
+
+
+def RunAutoRoll(config,
+ options,
+ side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
+ step_classes = [
+ Preparation,
+ CheckAutoRollSettings,
+ CheckTreeStatus,
+ FetchLatestRevision,
+ CheckLastPush,
+ FetchLKGR,
+ PushToTrunk,
+ ]
+ RunScript(step_classes, config, options, side_effect_handler)
+
+
+def BuildOptions():
+ result = optparse.OptionParser()
+ result.add_option("-a", "--author", dest="a",
+ help=("Specify the author email used for rietveld."))
+ result.add_option("-c", "--chromium", dest="c",
+ help=("Specify the path to your Chromium src/ "
+ "directory to automate the V8 roll."))
+ result.add_option("-p", "--push",
+ help="Push to trunk if possible. Dry run if unspecified.",
+ default=False, action="store_true")
+ result.add_option("-r", "--reviewer",
+ help=("Specify the account name to be used for reviews."))
+ result.add_option("-s", "--step", dest="s",
+ help="Specify the step where to start work. Default: 0.",
+ default=0, type="int")
+ result.add_option("--status-password",
+ help="A file with the password to the status app.")
+ return result
+
+
+def Main():
+ parser = BuildOptions()
+ (options, args) = parser.parse_args()
+ if not options.a or not options.c or not options.reviewer:
+ print "You need to specify author, chromium src location and reviewer."
+ parser.print_help()
+ return 1
+ RunAutoRoll(CONFIG, AutoRollOptions(options))
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/deps/v8/tools/push-to-trunk/common_includes.py b/deps/v8/tools/push-to-trunk/common_includes.py
new file mode 100644
index 0000000000..f1d5e50117
--- /dev/null
+++ b/deps/v8/tools/push-to-trunk/common_includes.py
@@ -0,0 +1,519 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import datetime
+import os
+import re
+import subprocess
+import sys
+import textwrap
+import time
+import urllib2
+
+PERSISTFILE_BASENAME = "PERSISTFILE_BASENAME"
+TEMP_BRANCH = "TEMP_BRANCH"
+BRANCHNAME = "BRANCHNAME"
+DOT_GIT_LOCATION = "DOT_GIT_LOCATION"
+VERSION_FILE = "VERSION_FILE"
+CHANGELOG_FILE = "CHANGELOG_FILE"
+CHANGELOG_ENTRY_FILE = "CHANGELOG_ENTRY_FILE"
+COMMITMSG_FILE = "COMMITMSG_FILE"
+PATCH_FILE = "PATCH_FILE"
+
+
+def TextToFile(text, file_name):
+ with open(file_name, "w") as f:
+ f.write(text)
+
+
+def AppendToFile(text, file_name):
+ with open(file_name, "a") as f:
+ f.write(text)
+
+
+def LinesInFile(file_name):
+ with open(file_name) as f:
+ for line in f:
+ yield line
+
+
+def FileToText(file_name):
+ with open(file_name) as f:
+ return f.read()
+
+
+def MSub(rexp, replacement, text):
+ return re.sub(rexp, replacement, text, flags=re.MULTILINE)
+
+
+def Fill80(line):
+ # Replace tabs and remove surrounding space.
+ line = re.sub(r"\t", r" ", line.strip())
+
+ # Format with 8 characters indentation and line width 80.
+ return textwrap.fill(line, width=80, initial_indent=" ",
+ subsequent_indent=" ")
+
+
+def GetLastChangeLogEntries(change_log_file):
+ result = []
+ for line in LinesInFile(change_log_file):
+ if re.search(r"^\d{4}-\d{2}-\d{2}:", line) and result: break
+ result.append(line)
+ return "".join(result)
+
+
+def MakeComment(text):
+ return MSub(r"^( ?)", "#", text)
+
+
+def StripComments(text):
+ # Use split not splitlines to keep terminal newlines.
+ return "\n".join(filter(lambda x: not x.startswith("#"), text.split("\n")))
+
+
+def MakeChangeLogBody(commit_messages, auto_format=False):
+ result = ""
+ added_titles = set()
+ for (title, body, author) in commit_messages:
+ # TODO(machenbach): Better check for reverts. A revert should remove the
+ # original CL from the actual log entry.
+ title = title.strip()
+ if auto_format:
+ # Only add commits that set the LOG flag correctly.
+ log_exp = r"^[ \t]*LOG[ \t]*=[ \t]*(?:(?:Y(?:ES)?)|TRUE)"
+ if not re.search(log_exp, body, flags=re.I | re.M):
+ continue
+ # Never include reverts.
+ if title.startswith("Revert "):
+ continue
+ # Don't include duplicates.
+ if title in added_titles:
+ continue
+
+ # Add and format the commit's title and bug reference. Move dot to the end.
+ added_titles.add(title)
+ raw_title = re.sub(r"(\.|\?|!)$", "", title)
+ bug_reference = MakeChangeLogBugReference(body)
+ space = " " if bug_reference else ""
+ result += "%s\n" % Fill80("%s%s%s." % (raw_title, space, bug_reference))
+
+ # Append the commit's author for reference if not in auto-format mode.
+ if not auto_format:
+ result += "%s\n" % Fill80("(%s)" % author.strip())
+
+ result += "\n"
+ return result
+
+
+def MakeChangeLogBugReference(body):
+ """Grep for "BUG=xxxx" lines in the commit message and convert them to
+ "(issue xxxx)".
+ """
+ crbugs = []
+ v8bugs = []
+
+ def AddIssues(text):
+ ref = re.match(r"^BUG[ \t]*=[ \t]*(.+)$", text.strip())
+ if not ref:
+ return
+ for bug in ref.group(1).split(","):
+ bug = bug.strip()
+ match = re.match(r"^v8:(\d+)$", bug)
+ if match: v8bugs.append(int(match.group(1)))
+ else:
+ match = re.match(r"^(?:chromium:)?(\d+)$", bug)
+ if match: crbugs.append(int(match.group(1)))
+
+ # Add issues to crbugs and v8bugs.
+ map(AddIssues, body.splitlines())
+
+ # Filter duplicates, sort, stringify.
+ crbugs = map(str, sorted(set(crbugs)))
+ v8bugs = map(str, sorted(set(v8bugs)))
+
+ bug_groups = []
+ def FormatIssues(prefix, bugs):
+ if len(bugs) > 0:
+ plural = "s" if len(bugs) > 1 else ""
+ bug_groups.append("%sissue%s %s" % (prefix, plural, ", ".join(bugs)))
+
+ FormatIssues("", v8bugs)
+ FormatIssues("Chromium ", crbugs)
+
+ if len(bug_groups) > 0:
+ return "(%s)" % ", ".join(bug_groups)
+ else:
+ return ""
+
+
+# Some commands don't like the pipe, e.g. calling vi from within the script or
+# from subscripts like git cl upload.
+def Command(cmd, args="", prefix="", pipe=True):
+ # TODO(machenbach): Use timeout.
+ cmd_line = "%s %s %s" % (prefix, cmd, args)
+ print "Command: %s" % cmd_line
+ try:
+ if pipe:
+ return subprocess.check_output(cmd_line, shell=True)
+ else:
+ return subprocess.check_call(cmd_line, shell=True)
+ except subprocess.CalledProcessError:
+ return None
+
+
+# Wrapper for side effects.
+class SideEffectHandler(object):
+ def Call(self, fun, *args, **kwargs):
+ return fun(*args, **kwargs)
+
+ def Command(self, cmd, args="", prefix="", pipe=True):
+ return Command(cmd, args, prefix, pipe)
+
+ def ReadLine(self):
+ return sys.stdin.readline().strip()
+
+ def ReadURL(self, url, params=None):
+ # pylint: disable=E1121
+ url_fh = urllib2.urlopen(url, params, 60)
+ try:
+ return url_fh.read()
+ finally:
+ url_fh.close()
+
+ def Sleep(self, seconds):
+ time.sleep(seconds)
+
+ def GetDate(self):
+ return datetime.date.today().strftime("%Y-%m-%d")
+
+DEFAULT_SIDE_EFFECT_HANDLER = SideEffectHandler()
+
+
+class NoRetryException(Exception):
+ pass
+
+
+class CommonOptions(object):
+ def __init__(self, options, manual=True):
+ self.requires_editor = True
+ self.wait_for_lgtm = True
+ self.s = options.s
+ self.force_readline_defaults = not manual
+ self.force_upload = not manual
+ self.manual = manual
+ self.reviewer = getattr(options, 'reviewer', None)
+ self.author = getattr(options, 'a', None)
+
+
+class Step(object):
+ def __init__(self, text, requires, number, config, state, options, handler):
+ self._text = text
+ self._requires = requires
+ self._number = number
+ self._config = config
+ self._state = state
+ self._options = options
+ self._side_effect_handler = handler
+ assert self._number >= 0
+ assert self._config is not None
+ assert self._state is not None
+ assert self._side_effect_handler is not None
+ assert isinstance(options, CommonOptions)
+
+ def Config(self, key):
+ return self._config[key]
+
+ def Run(self):
+ if self._requires:
+ self.RestoreIfUnset(self._requires)
+ if not self._state[self._requires]:
+ return
+ print ">>> Step %d: %s" % (self._number, self._text)
+ self.RunStep()
+
+ def RunStep(self):
+ raise NotImplementedError
+
+ def Retry(self, cb, retry_on=None, wait_plan=None):
+ """ Retry a function.
+ Params:
+ cb: The function to retry.
+ retry_on: A callback that takes the result of the function and returns
+ True if the function should be retried. A function throwing an
+ exception is always retried.
+ wait_plan: A list of waiting delays between retries in seconds. The
+ maximum number of retries is len(wait_plan).
+ """
+ retry_on = retry_on or (lambda x: False)
+ wait_plan = list(wait_plan or [])
+ wait_plan.reverse()
+ while True:
+ got_exception = False
+ try:
+ result = cb()
+ except NoRetryException, e:
+ raise e
+ except Exception:
+ got_exception = True
+ if got_exception or retry_on(result):
+ if not wait_plan:
+ raise Exception("Retried too often. Giving up.")
+ wait_time = wait_plan.pop()
+ print "Waiting for %f seconds." % wait_time
+ self._side_effect_handler.Sleep(wait_time)
+ print "Retrying..."
+ else:
+ return result
+
+ def ReadLine(self, default=None):
+ # Don't prompt in forced mode.
+ if self._options.force_readline_defaults and default is not None:
+ print "%s (forced)" % default
+ return default
+ else:
+ return self._side_effect_handler.ReadLine()
+
+ def Git(self, args="", prefix="", pipe=True, retry_on=None):
+ cmd = lambda: self._side_effect_handler.Command("git", args, prefix, pipe)
+ return self.Retry(cmd, retry_on, [5, 30])
+
+ def SVN(self, args="", prefix="", pipe=True, retry_on=None):
+ cmd = lambda: self._side_effect_handler.Command("svn", args, prefix, pipe)
+ return self.Retry(cmd, retry_on, [5, 30])
+
+ def Editor(self, args):
+ if self._options.requires_editor:
+ return self._side_effect_handler.Command(os.environ["EDITOR"], args,
+ pipe=False)
+
+ def ReadURL(self, url, params=None, retry_on=None, wait_plan=None):
+ wait_plan = wait_plan or [3, 60, 600]
+ cmd = lambda: self._side_effect_handler.ReadURL(url, params)
+ return self.Retry(cmd, retry_on, wait_plan)
+
+ def GetDate(self):
+ return self._side_effect_handler.GetDate()
+
+ def Die(self, msg=""):
+ if msg != "":
+ print "Error: %s" % msg
+ print "Exiting"
+ raise Exception(msg)
+
+ def DieNoManualMode(self, msg=""):
+ if not self._options.manual:
+ msg = msg or "Only available in manual mode."
+ self.Die(msg)
+
+ def Confirm(self, msg):
+ print "%s [Y/n] " % msg,
+ answer = self.ReadLine(default="Y")
+ return answer == "" or answer == "Y" or answer == "y"
+
+ def DeleteBranch(self, name):
+ git_result = self.Git("branch").strip()
+ for line in git_result.splitlines():
+ if re.match(r".*\s+%s$" % name, line):
+ msg = "Branch %s exists, do you want to delete it?" % name
+ if self.Confirm(msg):
+ if self.Git("branch -D %s" % name) is None:
+ self.Die("Deleting branch '%s' failed." % name)
+ print "Branch %s deleted." % name
+ else:
+ msg = "Can't continue. Please delete branch %s and try again." % name
+ self.Die(msg)
+
+ def Persist(self, var, value):
+ value = value or "__EMPTY__"
+ TextToFile(value, "%s-%s" % (self._config[PERSISTFILE_BASENAME], var))
+
+ def Restore(self, var):
+ value = FileToText("%s-%s" % (self._config[PERSISTFILE_BASENAME], var))
+ value = value or self.Die("Variable '%s' could not be restored." % var)
+ return "" if value == "__EMPTY__" else value
+
+ def RestoreIfUnset(self, var_name):
+ if self._state.get(var_name) is None:
+ self._state[var_name] = self.Restore(var_name)
+
+ def InitialEnvironmentChecks(self):
+ # Cancel if this is not a git checkout.
+ if not os.path.exists(self._config[DOT_GIT_LOCATION]):
+ self.Die("This is not a git checkout, this script won't work for you.")
+
+ # Cancel if EDITOR is unset or not executable.
+ if (self._options.requires_editor and (not os.environ.get("EDITOR") or
+ Command("which", os.environ["EDITOR"]) is None)):
+ self.Die("Please set your EDITOR environment variable, you'll need it.")
+
+ def CommonPrepare(self):
+ # Check for a clean workdir.
+ if self.Git("status -s -uno").strip() != "":
+ self.Die("Workspace is not clean. Please commit or undo your changes.")
+
+ # Persist current branch.
+ current_branch = ""
+ git_result = self.Git("status -s -b -uno").strip()
+ for line in git_result.splitlines():
+ match = re.match(r"^## (.+)", line)
+ if match:
+ current_branch = match.group(1)
+ break
+ self.Persist("current_branch", current_branch)
+
+ # Fetch unfetched revisions.
+ if self.Git("svn fetch") is None:
+ self.Die("'git svn fetch' failed.")
+
+ def PrepareBranch(self):
+ # Get ahold of a safe temporary branch and check it out.
+ self.RestoreIfUnset("current_branch")
+ if self._state["current_branch"] != self._config[TEMP_BRANCH]:
+ self.DeleteBranch(self._config[TEMP_BRANCH])
+ self.Git("checkout -b %s" % self._config[TEMP_BRANCH])
+
+ # Delete the branch that will be created later if it exists already.
+ self.DeleteBranch(self._config[BRANCHNAME])
+
+ def CommonCleanup(self):
+ self.RestoreIfUnset("current_branch")
+ self.Git("checkout -f %s" % self._state["current_branch"])
+ if self._config[TEMP_BRANCH] != self._state["current_branch"]:
+ self.Git("branch -D %s" % self._config[TEMP_BRANCH])
+ if self._config[BRANCHNAME] != self._state["current_branch"]:
+ self.Git("branch -D %s" % self._config[BRANCHNAME])
+
+ # Clean up all temporary files.
+ Command("rm", "-f %s*" % self._config[PERSISTFILE_BASENAME])
+
+ def ReadAndPersistVersion(self, prefix=""):
+ def ReadAndPersist(var_name, def_name):
+ match = re.match(r"^#define %s\s+(\d*)" % def_name, line)
+ if match:
+ value = match.group(1)
+ self.Persist("%s%s" % (prefix, var_name), value)
+ self._state["%s%s" % (prefix, var_name)] = value
+ for line in LinesInFile(self._config[VERSION_FILE]):
+ for (var_name, def_name) in [("major", "MAJOR_VERSION"),
+ ("minor", "MINOR_VERSION"),
+ ("build", "BUILD_NUMBER"),
+ ("patch", "PATCH_LEVEL")]:
+ ReadAndPersist(var_name, def_name)
+
+ def RestoreVersionIfUnset(self, prefix=""):
+ for v in ["major", "minor", "build", "patch"]:
+ self.RestoreIfUnset("%s%s" % (prefix, v))
+
+ def WaitForLGTM(self):
+ print ("Please wait for an LGTM, then type \"LGTM<Return>\" to commit "
+ "your change. (If you need to iterate on the patch or double check "
+ "that it's sane, do so in another shell, but remember to not "
+ "change the headline of the uploaded CL.")
+ answer = ""
+ while answer != "LGTM":
+ print "> ",
+ answer = self.ReadLine(None if self._options.wait_for_lgtm else "LGTM")
+ if answer != "LGTM":
+ print "That was not 'LGTM'."
+
+ def WaitForResolvingConflicts(self, patch_file):
+ print("Applying the patch \"%s\" failed. Either type \"ABORT<Return>\", "
+ "or resolve the conflicts, stage *all* touched files with "
+ "'git add', and type \"RESOLVED<Return>\"")
+ self.DieNoManualMode()
+ answer = ""
+ while answer != "RESOLVED":
+ if answer == "ABORT":
+ self.Die("Applying the patch failed.")
+ if answer != "":
+ print "That was not 'RESOLVED' or 'ABORT'."
+ print "> ",
+ answer = self.ReadLine()
+
+ # Takes a file containing the patch to apply as first argument.
+ def ApplyPatch(self, patch_file, reverse_patch=""):
+ args = "apply --index --reject %s \"%s\"" % (reverse_patch, patch_file)
+ if self.Git(args) is None:
+ self.WaitForResolvingConflicts(patch_file)
+
+
+class UploadStep(Step):
+ MESSAGE = "Upload for code review."
+
+ def RunStep(self):
+ if self._options.reviewer:
+ print "Using account %s for review." % self._options.reviewer
+ reviewer = self._options.reviewer
+ else:
+ print "Please enter the email address of a V8 reviewer for your patch: ",
+ self.DieNoManualMode("A reviewer must be specified in forced mode.")
+ reviewer = self.ReadLine()
+ author_option = self._options.author
+ author = " --email \"%s\"" % author_option if author_option else ""
+ force_flag = " -f" if self._options.force_upload else ""
+ args = ("cl upload%s -r \"%s\" --send-mail%s"
+ % (author, reviewer, force_flag))
+ # TODO(machenbach): Check output in forced mode. Verify that all required
+ # base files were uploaded, if not retry.
+ if self.Git(args, pipe=False) is None:
+ self.Die("'git cl upload' failed, please try again.")
+
+
+def MakeStep(step_class=Step, number=0, state=None, config=None,
+ options=None, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
+ # Allow to pass in empty dictionaries.
+ state = state if state is not None else {}
+ config = config if config is not None else {}
+
+ try:
+ message = step_class.MESSAGE
+ except AttributeError:
+ message = step_class.__name__
+ try:
+ requires = step_class.REQUIRES
+ except AttributeError:
+ requires = None
+
+ return step_class(message, requires, number=number, config=config,
+ state=state, options=options,
+ handler=side_effect_handler)
+
+
+def RunScript(step_classes,
+ config,
+ options,
+ side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
+ state = {}
+ steps = []
+ for (number, step_class) in enumerate(step_classes):
+ steps.append(MakeStep(step_class, number, state, config,
+ options, side_effect_handler))
+
+ for step in steps[options.s:]:
+ step.Run()
diff --git a/deps/v8/tools/push-to-trunk/merge_to_branch.py b/deps/v8/tools/push-to-trunk/merge_to_branch.py
new file mode 100755
index 0000000000..7d5f53f3d4
--- /dev/null
+++ b/deps/v8/tools/push-to-trunk/merge_to_branch.py
@@ -0,0 +1,406 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from collections import OrderedDict
+import optparse
+import sys
+
+from common_includes import *
+
+ALREADY_MERGING_SENTINEL_FILE = "ALREADY_MERGING_SENTINEL_FILE"
+COMMIT_HASHES_FILE = "COMMIT_HASHES_FILE"
+TEMPORARY_PATCH_FILE = "TEMPORARY_PATCH_FILE"
+
+CONFIG = {
+ BRANCHNAME: "prepare-merge",
+ PERSISTFILE_BASENAME: "/tmp/v8-merge-to-branch-tempfile",
+ ALREADY_MERGING_SENTINEL_FILE:
+ "/tmp/v8-merge-to-branch-tempfile-already-merging",
+ TEMP_BRANCH: "prepare-merge-temporary-branch-created-by-script",
+ DOT_GIT_LOCATION: ".git",
+ VERSION_FILE: "src/version.cc",
+ TEMPORARY_PATCH_FILE: "/tmp/v8-prepare-merge-tempfile-temporary-patch",
+ COMMITMSG_FILE: "/tmp/v8-prepare-merge-tempfile-commitmsg",
+ COMMIT_HASHES_FILE: "/tmp/v8-merge-to-branch-tempfile-PATCH_COMMIT_HASHES",
+}
+
+
+class MergeToBranchOptions(CommonOptions):
+ def __init__(self, options, args):
+ super(MergeToBranchOptions, self).__init__(options, True)
+ self.requires_editor = True
+ self.wait_for_lgtm = True
+ self.delete_sentinel = options.f
+ self.message = getattr(options, "message", "")
+ self.revert = "--reverse" if getattr(options, "r", None) else ""
+ self.revert_bleeding_edge = getattr(options, "revert_bleeding_edge", False)
+ self.patch = getattr(options, "p", "")
+ self.args = args
+
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
+
+ def RunStep(self):
+ if os.path.exists(self.Config(ALREADY_MERGING_SENTINEL_FILE)):
+ if self._options.delete_sentinel:
+ os.remove(self.Config(ALREADY_MERGING_SENTINEL_FILE))
+ elif self._options.s == 0:
+ self.Die("A merge is already in progress")
+ open(self.Config(ALREADY_MERGING_SENTINEL_FILE), "a").close()
+
+ self.InitialEnvironmentChecks()
+ if self._options.revert_bleeding_edge:
+ self.Persist("merge_to_branch", "bleeding_edge")
+ elif self._options.args[0]:
+ self.Persist("merge_to_branch", self._options.args[0])
+ self._options.args = self._options.args[1:]
+ else:
+ self.Die("Please specify a branch to merge to")
+
+ self.CommonPrepare()
+ self.PrepareBranch()
+
+
+class CreateBranch(Step):
+ MESSAGE = "Create a fresh branch for the patch."
+
+ def RunStep(self):
+ self.RestoreIfUnset("merge_to_branch")
+ args = "checkout -b %s svn/%s" % (self.Config(BRANCHNAME),
+ self._state["merge_to_branch"])
+ if self.Git(args) is None:
+ self.die("Creating branch %s failed." % self.Config(BRANCHNAME))
+
+
+class SearchArchitecturePorts(Step):
+ MESSAGE = "Search for corresponding architecture ports."
+
+ def RunStep(self):
+ full_revision_list = list(OrderedDict.fromkeys(self._options.args))
+ port_revision_list = []
+ for revision in full_revision_list:
+ # Search for commits which matches the "Port rXXX" pattern.
+ args = ("log svn/bleeding_edge --reverse "
+ "--format=%%H --grep=\"Port r%d\"" % int(revision))
+ git_hashes = self.Git(args) or ""
+ for git_hash in git_hashes.strip().splitlines():
+ args = "svn find-rev %s svn/bleeding_edge" % git_hash
+ svn_revision = self.Git(args).strip()
+ if not svn_revision:
+ self.Die("Cannot determine svn revision for %s" % git_hash)
+ revision_title = self.Git("log -1 --format=%%s %s" % git_hash)
+
+ # Is this revision included in the original revision list?
+ if svn_revision in full_revision_list:
+ print("Found port of r%s -> r%s (already included): %s"
+ % (revision, svn_revision, revision_title))
+ else:
+ print("Found port of r%s -> r%s: %s"
+ % (revision, svn_revision, revision_title))
+ port_revision_list.append(svn_revision)
+
+ # Do we find any port?
+ if len(port_revision_list) > 0:
+ if self.Confirm("Automatically add corresponding ports (%s)?"
+ % ", ".join(port_revision_list)):
+ #: 'y': Add ports to revision list.
+ full_revision_list.extend(port_revision_list)
+ self.Persist("full_revision_list", ",".join(full_revision_list))
+
+
+class FindGitRevisions(Step):
+ MESSAGE = "Find the git revisions associated with the patches."
+
+ def RunStep(self):
+ self.RestoreIfUnset("full_revision_list")
+ self.RestoreIfUnset("merge_to_branch")
+ full_revision_list = self._state["full_revision_list"].split(",")
+ patch_commit_hashes = []
+ for revision in full_revision_list:
+ next_hash = self.Git("svn find-rev \"r%s\" svn/bleeding_edge" % revision)
+ if not next_hash:
+ self.Die("Cannot determine git hash for r%s" % revision)
+ patch_commit_hashes.append(next_hash)
+
+ # Stringify: [123, 234] -> "r123, r234"
+ revision_list = ", ".join(map(lambda s: "r%s" % s, full_revision_list))
+
+ if not revision_list:
+ self.Die("Revision list is empty.")
+
+ if self._options.revert:
+ if not self._options.revert_bleeding_edge:
+ new_commit_msg = ("Rollback of %s in %s branch."
+ % (revision_list, self._state["merge_to_branch"]))
+ else:
+ new_commit_msg = "Revert %s ." % revision_list
+ else:
+ new_commit_msg = ("Merged %s into %s branch."
+ % (revision_list, self._state["merge_to_branch"]))
+ new_commit_msg += "\n\n"
+
+ for commit_hash in patch_commit_hashes:
+ patch_merge_desc = self.Git("log -1 --format=%%s %s" % commit_hash)
+ new_commit_msg += "%s\n\n" % patch_merge_desc.strip()
+
+ bugs = []
+ for commit_hash in patch_commit_hashes:
+ msg = self.Git("log -1 %s" % commit_hash)
+ for bug in re.findall(r"^[ \t]*BUG[ \t]*=[ \t]*(.*?)[ \t]*$", msg,
+ re.M):
+ bugs.extend(map(lambda s: s.strip(), bug.split(",")))
+ bug_aggregate = ",".join(sorted(bugs))
+ if bug_aggregate:
+ new_commit_msg += "BUG=%s\nLOG=N\n" % bug_aggregate
+ TextToFile(new_commit_msg, self.Config(COMMITMSG_FILE))
+ self.Persist("new_commit_msg", new_commit_msg)
+ self.Persist("revision_list", revision_list)
+ self._state["patch_commit_hashes"] = patch_commit_hashes
+ self.Persist("patch_commit_hashes_list", " ".join(patch_commit_hashes))
+
+
+class ApplyPatches(Step):
+ MESSAGE = "Apply patches for selected revisions."
+
+ def RunStep(self):
+ self.RestoreIfUnset("merge_to_branch")
+ self.RestoreIfUnset("patch_commit_hashes_list")
+ patch_commit_hashes = self._state.get("patch_commit_hashes")
+ if not patch_commit_hashes:
+ patch_commit_hashes = (
+ self._state.get("patch_commit_hashes_list").strip().split(" "))
+ if not patch_commit_hashes and not options.patch:
+ self.Die("Variable patch_commit_hashes could not be restored.")
+ for commit_hash in patch_commit_hashes:
+ print("Applying patch for %s to %s..."
+ % (commit_hash, self._state["merge_to_branch"]))
+ patch = self.Git("log -1 -p %s" % commit_hash)
+ TextToFile(patch, self.Config(TEMPORARY_PATCH_FILE))
+ self.ApplyPatch(self.Config(TEMPORARY_PATCH_FILE), self._options.revert)
+ if self._options.patch:
+ self.ApplyPatch(self._options.patch, self._options.revert)
+
+
+class PrepareVersion(Step):
+ MESSAGE = "Prepare version file."
+
+ def RunStep(self):
+ if self._options.revert_bleeding_edge:
+ return
+ # These version numbers are used again for creating the tag
+ self.ReadAndPersistVersion()
+
+
+class IncrementVersion(Step):
+ MESSAGE = "Increment version number."
+
+ def RunStep(self):
+ if self._options.revert_bleeding_edge:
+ return
+ self.RestoreIfUnset("patch")
+ new_patch = str(int(self._state["patch"]) + 1)
+ if self.Confirm("Automatically increment PATCH_LEVEL? (Saying 'n' will "
+ "fire up your EDITOR on %s so you can make arbitrary "
+ "changes. When you're done, save the file and exit your "
+ "EDITOR.)" % self.Config(VERSION_FILE)):
+ text = FileToText(self.Config(VERSION_FILE))
+ text = MSub(r"(?<=#define PATCH_LEVEL)(?P<space>\s+)\d*$",
+ r"\g<space>%s" % new_patch,
+ text)
+ TextToFile(text, self.Config(VERSION_FILE))
+ else:
+ self.Editor(self.Config(VERSION_FILE))
+ self.ReadAndPersistVersion("new_")
+
+
+class CommitLocal(Step):
+ MESSAGE = "Commit to local branch."
+
+ def RunStep(self):
+ if self.Git("commit -a -F \"%s\"" % self.Config(COMMITMSG_FILE)) is None:
+ self.Die("'git commit -a' failed.")
+
+
+class CommitRepository(Step):
+ MESSAGE = "Commit to the repository."
+
+ def RunStep(self):
+ self.RestoreIfUnset("merge_to_branch")
+ if self.Git("checkout %s" % self.Config(BRANCHNAME)) is None:
+ self.Die("Cannot ensure that the current branch is %s"
+ % self.Config(BRANCHNAME))
+ self.WaitForLGTM()
+ if self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"") is None:
+ self.Die("Presubmit failed.")
+
+ if self.Git("cl dcommit -f --bypass-hooks",
+ retry_on=lambda x: x is None) is None:
+ self.Die("Failed to commit to %s" % self._status["merge_to_branch"])
+
+
+class PrepareSVN(Step):
+ MESSAGE = "Determine svn commit revision."
+
+ def RunStep(self):
+ if self._options.revert_bleeding_edge:
+ return
+ self.RestoreIfUnset("new_commit_msg")
+ self.RestoreIfUnset("merge_to_branch")
+ if self.Git("svn fetch") is None:
+ self.Die("'git svn fetch' failed.")
+ args = ("log -1 --format=%%H --grep=\"%s\" svn/%s"
+ % (self._state["new_commit_msg"], self._state["merge_to_branch"]))
+ commit_hash = self.Git(args).strip()
+ if not commit_hash:
+ self.Die("Unable to map git commit to svn revision.")
+ svn_revision = self.Git("svn find-rev %s" % commit_hash).strip()
+ print "subversion revision number is r%s" % svn_revision
+ self.Persist("svn_revision", svn_revision)
+
+
+class TagRevision(Step):
+ MESSAGE = "Create the tag."
+
+ def RunStep(self):
+ if self._options.revert_bleeding_edge:
+ return
+ self.RestoreVersionIfUnset("new_")
+ self.RestoreIfUnset("svn_revision")
+ self.RestoreIfUnset("merge_to_branch")
+ ver = "%s.%s.%s.%s" % (self._state["new_major"],
+ self._state["new_minor"],
+ self._state["new_build"],
+ self._state["new_patch"])
+ print "Creating tag svn/tags/%s" % ver
+ if self._state["merge_to_branch"] == "trunk":
+ to_url = "trunk"
+ else:
+ to_url = "branches/%s" % self._state["merge_to_branch"]
+ self.SVN("copy -r %s https://v8.googlecode.com/svn/%s "
+ "https://v8.googlecode.com/svn/tags/%s -m "
+ "\"Tagging version %s\""
+ % (self._state["svn_revision"], to_url, ver, ver))
+ self.Persist("to_url", to_url)
+
+
+class CleanUp(Step):
+ MESSAGE = "Cleanup."
+
+ def RunStep(self):
+ self.RestoreIfUnset("svn_revision")
+ self.RestoreIfUnset("to_url")
+ self.RestoreIfUnset("revision_list")
+ self.RestoreVersionIfUnset("new_")
+ ver = "%s.%s.%s.%s" % (self._state["new_major"],
+ self._state["new_minor"],
+ self._state["new_build"],
+ self._state["new_patch"])
+ self.CommonCleanup()
+ if not self._options.revert_bleeding_edge:
+ print "*** SUMMARY ***"
+ print "version: %s" % ver
+ print "branch: %s" % self._state["to_url"]
+ print "svn revision: %s" % self._state["svn_revision"]
+ if self._state["revision_list"]:
+ print "patches: %s" % self._state["revision_list"]
+
+
+def RunMergeToBranch(config,
+ options,
+ side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
+ step_classes = [
+ Preparation,
+ CreateBranch,
+ SearchArchitecturePorts,
+ FindGitRevisions,
+ ApplyPatches,
+ PrepareVersion,
+ IncrementVersion,
+ CommitLocal,
+ UploadStep,
+ CommitRepository,
+ PrepareSVN,
+ TagRevision,
+ CleanUp,
+ ]
+
+ RunScript(step_classes, config, options, side_effect_handler)
+
+
+def BuildOptions():
+ result = optparse.OptionParser()
+ result.set_usage("""%prog [OPTIONS]... [BRANCH] [REVISION]...
+
+Performs the necessary steps to merge revisions from bleeding_edge
+to other branches, including trunk.""")
+ result.add_option("-f",
+ help="Delete sentinel file.",
+ default=False, action="store_true")
+ result.add_option("-m", "--message",
+ help="Specify a commit message for the patch.")
+ result.add_option("-r", "--revert",
+ help="Revert specified patches.",
+ default=False, action="store_true")
+ result.add_option("-R", "--revert-bleeding-edge",
+ help="Revert specified patches from bleeding edge.",
+ default=False, action="store_true")
+ result.add_option("-p", "--patch", dest="p",
+ help="Specify a patch file to apply as part of the merge.")
+ result.add_option("-s", "--step", dest="s",
+ help="Specify the step where to start work. Default: 0.",
+ default=0, type="int")
+ return result
+
+
+def ProcessOptions(options, args):
+ revert_from_bleeding_edge = 1 if options.revert_bleeding_edge else 0
+ min_exp_args = 2 - revert_from_bleeding_edge
+ if len(args) < min_exp_args:
+ if not options.p:
+ print "Either a patch file or revision numbers must be specified"
+ return False
+ if not options.message:
+ print "You must specify a merge comment if no patches are specified"
+ return False
+ if options.s < 0:
+ print "Bad step number %d" % options.s
+ return False
+ return True
+
+
+def Main():
+ parser = BuildOptions()
+ (options, args) = parser.parse_args()
+ if not ProcessOptions(options, args):
+ parser.print_help()
+ return 1
+ RunMergeToBranch(CONFIG, MergeToBranchOptions(options, args))
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/deps/v8/tools/push-to-trunk/push_to_trunk.py b/deps/v8/tools/push-to-trunk/push_to_trunk.py
new file mode 100755
index 0000000000..71a037cf16
--- /dev/null
+++ b/deps/v8/tools/push-to-trunk/push_to_trunk.py
@@ -0,0 +1,628 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import optparse
+import sys
+import tempfile
+import urllib2
+
+from common_includes import *
+
+TRUNKBRANCH = "TRUNKBRANCH"
+CHROMIUM = "CHROMIUM"
+DEPS_FILE = "DEPS_FILE"
+
+CONFIG = {
+ BRANCHNAME: "prepare-push",
+ TRUNKBRANCH: "trunk-push",
+ PERSISTFILE_BASENAME: "/tmp/v8-push-to-trunk-tempfile",
+ TEMP_BRANCH: "prepare-push-temporary-branch-created-by-script",
+ DOT_GIT_LOCATION: ".git",
+ VERSION_FILE: "src/version.cc",
+ CHANGELOG_FILE: "ChangeLog",
+ CHANGELOG_ENTRY_FILE: "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
+ PATCH_FILE: "/tmp/v8-push-to-trunk-tempfile-patch-file",
+ COMMITMSG_FILE: "/tmp/v8-push-to-trunk-tempfile-commitmsg",
+ DEPS_FILE: "DEPS",
+}
+
+
+class PushToTrunkOptions(CommonOptions):
+ @staticmethod
+ def MakeForcedOptions(author, reviewer, chrome_path):
+ """Convenience wrapper."""
+ class Options(object):
+ pass
+ options = Options()
+ options.s = 0
+ options.l = None
+ options.f = True
+ options.m = False
+ options.c = chrome_path
+ options.a = author
+ return PushToTrunkOptions(options)
+
+ def __init__(self, options):
+ super(PushToTrunkOptions, self).__init__(options, options.m)
+ self.requires_editor = not options.f
+ self.wait_for_lgtm = not options.f
+ self.tbr_commit = not options.m
+ self.l = options.l
+ self.reviewer = options.reviewer
+ self.c = options.c
+ self.author = getattr(options, 'a', None)
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
+
+ def RunStep(self):
+ self.InitialEnvironmentChecks()
+ self.CommonPrepare()
+ self.PrepareBranch()
+ self.DeleteBranch(self.Config(TRUNKBRANCH))
+
+
+class FreshBranch(Step):
+ MESSAGE = "Create a fresh branch."
+
+ def RunStep(self):
+ args = "checkout -b %s svn/bleeding_edge" % self.Config(BRANCHNAME)
+ if self.Git(args) is None:
+ self.Die("Creating branch %s failed." % self.Config(BRANCHNAME))
+
+
+class DetectLastPush(Step):
+ MESSAGE = "Detect commit ID of last push to trunk."
+
+ def RunStep(self):
+ last_push = (self._options.l or
+ self.Git("log -1 --format=%H ChangeLog").strip())
+ while True:
+ # Print assumed commit, circumventing git's pager.
+ print self.Git("log -1 %s" % last_push)
+ if self.Confirm("Is the commit printed above the last push to trunk?"):
+ break
+ args = "log -1 --format=%H %s^ ChangeLog" % last_push
+ last_push = self.Git(args).strip()
+ self.Persist("last_push", last_push)
+ self._state["last_push"] = last_push
+
+
+class PrepareChangeLog(Step):
+ MESSAGE = "Prepare raw ChangeLog entry."
+
+ def Reload(self, body):
+ """Attempts to reload the commit message from rietveld in order to allow
+ late changes to the LOG flag. Note: This is brittle to future changes of
+ the web page name or structure.
+ """
+ match = re.search(r"^Review URL: https://codereview\.chromium\.org/(\d+)$",
+ body, flags=re.M)
+ if match:
+ cl_url = "https://codereview.chromium.org/%s/description" % match.group(1)
+ try:
+ # Fetch from Rietveld but only retry once with one second delay since
+ # there might be many revisions.
+ body = self.ReadURL(cl_url, wait_plan=[1])
+ except urllib2.URLError:
+ pass
+ return body
+
+ def RunStep(self):
+ self.RestoreIfUnset("last_push")
+
+ # These version numbers are used again later for the trunk commit.
+ self.ReadAndPersistVersion()
+
+ date = self.GetDate()
+ self.Persist("date", date)
+ output = "%s: Version %s.%s.%s\n\n" % (date,
+ self._state["major"],
+ self._state["minor"],
+ self._state["build"])
+ TextToFile(output, self.Config(CHANGELOG_ENTRY_FILE))
+
+ args = "log %s..HEAD --format=%%H" % self._state["last_push"]
+ commits = self.Git(args).strip()
+
+ # Cache raw commit messages.
+ commit_messages = [
+ [
+ self.Git("log -1 %s --format=\"%%s\"" % commit),
+ self.Reload(self.Git("log -1 %s --format=\"%%B\"" % commit)),
+ self.Git("log -1 %s --format=\"%%an\"" % commit),
+ ] for commit in commits.splitlines()
+ ]
+
+ # Auto-format commit messages.
+ body = MakeChangeLogBody(commit_messages, auto_format=True)
+ AppendToFile(body, self.Config(CHANGELOG_ENTRY_FILE))
+
+ msg = (" Performance and stability improvements on all platforms."
+ "\n#\n# The change log above is auto-generated. Please review if "
+ "all relevant\n# commit messages from the list below are included."
+ "\n# All lines starting with # will be stripped.\n#\n")
+ AppendToFile(msg, self.Config(CHANGELOG_ENTRY_FILE))
+
+ # Include unformatted commit messages as a reference in a comment.
+ comment_body = MakeComment(MakeChangeLogBody(commit_messages))
+ AppendToFile(comment_body, self.Config(CHANGELOG_ENTRY_FILE))
+
+
+class EditChangeLog(Step):
+ MESSAGE = "Edit ChangeLog entry."
+
+ def RunStep(self):
+ print ("Please press <Return> to have your EDITOR open the ChangeLog "
+ "entry, then edit its contents to your liking. When you're done, "
+ "save the file and exit your EDITOR. ")
+ self.ReadLine(default="")
+ self.Editor(self.Config(CHANGELOG_ENTRY_FILE))
+ handle, new_changelog = tempfile.mkstemp()
+ os.close(handle)
+
+ # Strip comments and reformat with correct indentation.
+ changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE)).rstrip()
+ changelog_entry = StripComments(changelog_entry)
+ changelog_entry = "\n".join(map(Fill80, changelog_entry.splitlines()))
+ changelog_entry = changelog_entry.lstrip()
+
+ if changelog_entry == "":
+ self.Die("Empty ChangeLog entry.")
+
+ with open(new_changelog, "w") as f:
+ f.write(changelog_entry)
+ f.write("\n\n\n") # Explicitly insert two empty lines.
+
+ AppendToFile(FileToText(self.Config(CHANGELOG_FILE)), new_changelog)
+ TextToFile(FileToText(new_changelog), self.Config(CHANGELOG_FILE))
+ os.remove(new_changelog)
+
+
+class IncrementVersion(Step):
+ MESSAGE = "Increment version number."
+
+ def RunStep(self):
+ self.RestoreIfUnset("build")
+ new_build = str(int(self._state["build"]) + 1)
+
+ if self.Confirm(("Automatically increment BUILD_NUMBER? (Saying 'n' will "
+ "fire up your EDITOR on %s so you can make arbitrary "
+ "changes. When you're done, save the file and exit your "
+ "EDITOR.)" % self.Config(VERSION_FILE))):
+ text = FileToText(self.Config(VERSION_FILE))
+ text = MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
+ r"\g<space>%s" % new_build,
+ text)
+ TextToFile(text, self.Config(VERSION_FILE))
+ else:
+ self.Editor(self.Config(VERSION_FILE))
+
+ self.ReadAndPersistVersion("new_")
+
+
+class CommitLocal(Step):
+ MESSAGE = "Commit to local branch."
+
+ def RunStep(self):
+ self.RestoreVersionIfUnset("new_")
+ prep_commit_msg = ("Prepare push to trunk. "
+ "Now working on version %s.%s.%s." % (self._state["new_major"],
+ self._state["new_minor"],
+ self._state["new_build"]))
+ self.Persist("prep_commit_msg", prep_commit_msg)
+
+ # Include optional TBR only in the git command. The persisted commit
+ # message is used for finding the commit again later.
+ if self._options.tbr_commit:
+ review = "\n\nTBR=%s" % self._options.reviewer
+ else:
+ review = ""
+ if self.Git("commit -a -m \"%s%s\"" % (prep_commit_msg, review)) is None:
+ self.Die("'git commit -a' failed.")
+
+
+class CommitRepository(Step):
+ MESSAGE = "Commit to the repository."
+
+ def RunStep(self):
+ self.WaitForLGTM()
+ # Re-read the ChangeLog entry (to pick up possible changes).
+ # FIXME(machenbach): This was hanging once with a broken pipe.
+ TextToFile(GetLastChangeLogEntries(self.Config(CHANGELOG_FILE)),
+ self.Config(CHANGELOG_ENTRY_FILE))
+
+ if self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"") is None:
+ self.Die("'git cl presubmit' failed, please try again.")
+
+ if self.Git("cl dcommit -f --bypass-hooks",
+ retry_on=lambda x: x is None) is None:
+ self.Die("'git cl dcommit' failed, please try again.")
+
+
+class StragglerCommits(Step):
+ MESSAGE = ("Fetch straggler commits that sneaked in since this script was "
+ "started.")
+
+ def RunStep(self):
+ if self.Git("svn fetch") is None:
+ self.Die("'git svn fetch' failed.")
+ self.Git("checkout svn/bleeding_edge")
+ self.RestoreIfUnset("prep_commit_msg")
+ args = "log -1 --format=%%H --grep=\"%s\"" % self._state["prep_commit_msg"]
+ prepare_commit_hash = self.Git(args).strip()
+ self.Persist("prepare_commit_hash", prepare_commit_hash)
+
+
+class SquashCommits(Step):
+ MESSAGE = "Squash commits into one."
+
+ def RunStep(self):
+ # Instead of relying on "git rebase -i", we'll just create a diff, because
+ # that's easier to automate.
+ self.RestoreIfUnset("prepare_commit_hash")
+ args = "diff svn/trunk %s" % self._state["prepare_commit_hash"]
+ TextToFile(self.Git(args), self.Config(PATCH_FILE))
+
+ # Convert the ChangeLog entry to commit message format.
+ self.RestoreIfUnset("date")
+ text = FileToText(self.Config(CHANGELOG_ENTRY_FILE))
+
+ # Remove date and trailing white space.
+ text = re.sub(r"^%s: " % self._state["date"], "", text.rstrip())
+
+ # Retrieve svn revision for showing the used bleeding edge revision in the
+ # commit message.
+ args = "svn find-rev %s" % self._state["prepare_commit_hash"]
+ svn_revision = self.Git(args).strip()
+ self.Persist("svn_revision", svn_revision)
+ text = MSub(r"^(Version \d+\.\d+\.\d+)$",
+ "\\1 (based on bleeding_edge revision r%s)" % svn_revision,
+ text)
+
+ # Remove indentation and merge paragraphs into single long lines, keeping
+ # empty lines between them.
+ def SplitMapJoin(split_text, fun, join_text):
+ return lambda text: join_text.join(map(fun, text.split(split_text)))
+ strip = lambda line: line.strip()
+ text = SplitMapJoin("\n\n", SplitMapJoin("\n", strip, " "), "\n\n")(text)
+
+ if not text:
+ self.Die("Commit message editing failed.")
+ TextToFile(text, self.Config(COMMITMSG_FILE))
+ os.remove(self.Config(CHANGELOG_ENTRY_FILE))
+
+
+class NewBranch(Step):
+ MESSAGE = "Create a new branch from trunk."
+
+ def RunStep(self):
+ if self.Git("checkout -b %s svn/trunk" % self.Config(TRUNKBRANCH)) is None:
+ self.Die("Checking out a new branch '%s' failed." %
+ self.Config(TRUNKBRANCH))
+
+
+class ApplyChanges(Step):
+ MESSAGE = "Apply squashed changes."
+
+ def RunStep(self):
+ self.ApplyPatch(self.Config(PATCH_FILE))
+ Command("rm", "-f %s*" % self.Config(PATCH_FILE))
+
+
+class SetVersion(Step):
+ MESSAGE = "Set correct version for trunk."
+
+ def RunStep(self):
+ self.RestoreVersionIfUnset()
+ output = ""
+ for line in FileToText(self.Config(VERSION_FILE)).splitlines():
+ if line.startswith("#define MAJOR_VERSION"):
+ line = re.sub("\d+$", self._state["major"], line)
+ elif line.startswith("#define MINOR_VERSION"):
+ line = re.sub("\d+$", self._state["minor"], line)
+ elif line.startswith("#define BUILD_NUMBER"):
+ line = re.sub("\d+$", self._state["build"], line)
+ elif line.startswith("#define PATCH_LEVEL"):
+ line = re.sub("\d+$", "0", line)
+ elif line.startswith("#define IS_CANDIDATE_VERSION"):
+ line = re.sub("\d+$", "0", line)
+ output += "%s\n" % line
+ TextToFile(output, self.Config(VERSION_FILE))
+
+
+class CommitTrunk(Step):
+ MESSAGE = "Commit to local trunk branch."
+
+ def RunStep(self):
+ self.Git("add \"%s\"" % self.Config(VERSION_FILE))
+ if self.Git("commit -F \"%s\"" % self.Config(COMMITMSG_FILE)) is None:
+ self.Die("'git commit' failed.")
+ Command("rm", "-f %s*" % self.Config(COMMITMSG_FILE))
+
+
+class SanityCheck(Step):
+ MESSAGE = "Sanity check."
+
+ def RunStep(self):
+ if not self.Confirm("Please check if your local checkout is sane: Inspect "
+ "%s, compile, run tests. Do you want to commit this new trunk "
+ "revision to the repository?" % self.Config(VERSION_FILE)):
+ self.Die("Execution canceled.")
+
+
+class CommitSVN(Step):
+ MESSAGE = "Commit to SVN."
+
+ def RunStep(self):
+ result = self.Git("svn dcommit 2>&1", retry_on=lambda x: x is None)
+ if not result:
+ self.Die("'git svn dcommit' failed.")
+ result = filter(lambda x: re.search(r"^Committed r[0-9]+", x),
+ result.splitlines())
+ if len(result) > 0:
+ trunk_revision = re.sub(r"^Committed r([0-9]+)", r"\1", result[0])
+
+ # Sometimes grepping for the revision fails. No idea why. If you figure
+ # out why it is flaky, please do fix it properly.
+ if not trunk_revision:
+ print("Sorry, grepping for the SVN revision failed. Please look for it "
+ "in the last command's output above and provide it manually (just "
+ "the number, without the leading \"r\").")
+ self.DieNoManualMode("Can't prompt in forced mode.")
+ while not trunk_revision:
+ print "> ",
+ trunk_revision = self.ReadLine()
+ self.Persist("trunk_revision", trunk_revision)
+
+
+class TagRevision(Step):
+ MESSAGE = "Tag the new revision."
+
+ def RunStep(self):
+ self.RestoreVersionIfUnset()
+ ver = "%s.%s.%s" % (self._state["major"],
+ self._state["minor"],
+ self._state["build"])
+ if self.Git("svn tag %s -m \"Tagging version %s\"" % (ver, ver),
+ retry_on=lambda x: x is None) is None:
+ self.Die("'git svn tag' failed.")
+
+
+class CheckChromium(Step):
+ MESSAGE = "Ask for chromium checkout."
+
+ def Run(self):
+ chrome_path = self._options.c
+ if not chrome_path:
+ self.DieNoManualMode("Please specify the path to a Chromium checkout in "
+ "forced mode.")
+ print ("Do you have a \"NewGit\" Chromium checkout and want "
+ "this script to automate creation of the roll CL? If yes, enter the "
+ "path to (and including) the \"src\" directory here, otherwise just "
+ "press <Return>: "),
+ chrome_path = self.ReadLine()
+ self.Persist("chrome_path", chrome_path)
+
+
+class SwitchChromium(Step):
+ MESSAGE = "Switch to Chromium checkout."
+ REQUIRES = "chrome_path"
+
+ def RunStep(self):
+ v8_path = os.getcwd()
+ self.Persist("v8_path", v8_path)
+ os.chdir(self._state["chrome_path"])
+ self.InitialEnvironmentChecks()
+ # Check for a clean workdir.
+ if self.Git("status -s -uno").strip() != "":
+ self.Die("Workspace is not clean. Please commit or undo your changes.")
+ # Assert that the DEPS file is there.
+ if not os.path.exists(self.Config(DEPS_FILE)):
+ self.Die("DEPS file not present.")
+
+
+class UpdateChromiumCheckout(Step):
+ MESSAGE = "Update the checkout and create a new branch."
+ REQUIRES = "chrome_path"
+
+ def RunStep(self):
+ os.chdir(self._state["chrome_path"])
+ if self.Git("checkout master") is None:
+ self.Die("'git checkout master' failed.")
+ if self.Git("pull") is None:
+ self.Die("'git pull' failed, please try again.")
+
+ self.RestoreIfUnset("trunk_revision")
+ args = "checkout -b v8-roll-%s" % self._state["trunk_revision"]
+ if self.Git(args) is None:
+ self.Die("Failed to checkout a new branch.")
+
+
+class UploadCL(Step):
+ MESSAGE = "Create and upload CL."
+ REQUIRES = "chrome_path"
+
+ def RunStep(self):
+ os.chdir(self._state["chrome_path"])
+
+ # Patch DEPS file.
+ self.RestoreIfUnset("trunk_revision")
+ deps = FileToText(self.Config(DEPS_FILE))
+ deps = re.sub("(?<=\"v8_revision\": \")([0-9]+)(?=\")",
+ self._state["trunk_revision"],
+ deps)
+ TextToFile(deps, self.Config(DEPS_FILE))
+
+ self.RestoreVersionIfUnset()
+ ver = "%s.%s.%s" % (self._state["major"],
+ self._state["minor"],
+ self._state["build"])
+ if self._options.reviewer:
+ print "Using account %s for review." % self._options.reviewer
+ rev = self._options.reviewer
+ else:
+ print "Please enter the email address of a reviewer for the roll CL: ",
+ self.DieNoManualMode("A reviewer must be specified in forced mode.")
+ rev = self.ReadLine()
+ self.RestoreIfUnset("svn_revision")
+ args = ("commit -am \"Update V8 to version %s "
+ "(based on bleeding_edge revision r%s).\n\nTBR=%s\""
+ % (ver, self._state["svn_revision"], rev))
+ if self.Git(args) is None:
+ self.Die("'git commit' failed.")
+ author_option = self._options.author
+ author = " --email \"%s\"" % author_option if author_option else ""
+ force_flag = " -f" if self._options.force_upload else ""
+ if self.Git("cl upload%s --send-mail%s" % (author, force_flag),
+ pipe=False) is None:
+ self.Die("'git cl upload' failed, please try again.")
+ print "CL uploaded."
+
+
+class SwitchV8(Step):
+ MESSAGE = "Returning to V8 checkout."
+ REQUIRES = "chrome_path"
+
+ def RunStep(self):
+ self.RestoreIfUnset("v8_path")
+ os.chdir(self._state["v8_path"])
+
+
+class CleanUp(Step):
+ MESSAGE = "Done!"
+
+ def RunStep(self):
+ self.RestoreVersionIfUnset()
+ ver = "%s.%s.%s" % (self._state["major"],
+ self._state["minor"],
+ self._state["build"])
+ self.RestoreIfUnset("trunk_revision")
+ self.RestoreIfUnset("chrome_path")
+
+ if self._state["chrome_path"]:
+ print("Congratulations, you have successfully created the trunk "
+ "revision %s and rolled it into Chromium. Please don't forget to "
+ "update the v8rel spreadsheet:" % ver)
+ else:
+ print("Congratulations, you have successfully created the trunk "
+ "revision %s. Please don't forget to roll this new version into "
+ "Chromium, and to update the v8rel spreadsheet:" % ver)
+ print "%s\ttrunk\t%s" % (ver, self._state["trunk_revision"])
+
+ self.CommonCleanup()
+ if self.Config(TRUNKBRANCH) != self._state["current_branch"]:
+ self.Git("branch -D %s" % self.Config(TRUNKBRANCH))
+
+
+def RunPushToTrunk(config,
+ options,
+ side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
+ step_classes = [
+ Preparation,
+ FreshBranch,
+ DetectLastPush,
+ PrepareChangeLog,
+ EditChangeLog,
+ IncrementVersion,
+ CommitLocal,
+ UploadStep,
+ CommitRepository,
+ StragglerCommits,
+ SquashCommits,
+ NewBranch,
+ ApplyChanges,
+ SetVersion,
+ CommitTrunk,
+ SanityCheck,
+ CommitSVN,
+ TagRevision,
+ CheckChromium,
+ SwitchChromium,
+ UpdateChromiumCheckout,
+ UploadCL,
+ SwitchV8,
+ CleanUp,
+ ]
+
+ RunScript(step_classes, config, options, side_effect_handler)
+
+
+def BuildOptions():
+ result = optparse.OptionParser()
+ result.add_option("-a", "--author", dest="a",
+ help=("Specify the author email used for rietveld."))
+ result.add_option("-c", "--chromium", dest="c",
+ help=("Specify the path to your Chromium src/ "
+ "directory to automate the V8 roll."))
+ result.add_option("-f", "--force", dest="f",
+ help="Don't prompt the user.",
+ default=False, action="store_true")
+ result.add_option("-l", "--last-push", dest="l",
+ help=("Manually specify the git commit ID "
+ "of the last push to trunk."))
+ result.add_option("-m", "--manual", dest="m",
+ help="Prompt the user at every important step.",
+ default=False, action="store_true")
+ result.add_option("-r", "--reviewer",
+ help=("Specify the account name to be used for reviews."))
+ result.add_option("-s", "--step", dest="s",
+ help="Specify the step where to start work. Default: 0.",
+ default=0, type="int")
+ return result
+
+
+def ProcessOptions(options):
+ if options.s < 0:
+ print "Bad step number %d" % options.s
+ return False
+ if not options.m and not options.reviewer:
+ print "A reviewer (-r) is required in (semi-)automatic mode."
+ return False
+ if options.f and options.m:
+ print "Manual and forced mode cannot be combined."
+ return False
+ if not options.m and not options.c:
+ print "A chromium checkout (-c) is required in (semi-)automatic mode."
+ return False
+ if not options.m and not options.a:
+ print "Specify your chromium.org email with -a in (semi-)automatic mode."
+ return False
+ return True
+
+
+def Main():
+ parser = BuildOptions()
+ (options, args) = parser.parse_args()
+ if not ProcessOptions(options):
+ parser.print_help()
+ return 1
+ RunPushToTrunk(CONFIG, PushToTrunkOptions(options))
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/deps/v8/tools/push-to-trunk/test_scripts.py b/deps/v8/tools/push-to-trunk/test_scripts.py
new file mode 100644
index 0000000000..242efba8bc
--- /dev/null
+++ b/deps/v8/tools/push-to-trunk/test_scripts.py
@@ -0,0 +1,991 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import tempfile
+import traceback
+import unittest
+
+import auto_roll
+from auto_roll import AutoRollOptions
+from auto_roll import CheckLastPush
+from auto_roll import FetchLatestRevision
+from auto_roll import SETTINGS_LOCATION
+import common_includes
+from common_includes import *
+import merge_to_branch
+from merge_to_branch import *
+import push_to_trunk
+from push_to_trunk import *
+
+
+TEST_CONFIG = {
+ BRANCHNAME: "test-prepare-push",
+ TRUNKBRANCH: "test-trunk-push",
+ PERSISTFILE_BASENAME: "/tmp/test-v8-push-to-trunk-tempfile",
+ TEMP_BRANCH: "test-prepare-push-temporary-branch-created-by-script",
+ DOT_GIT_LOCATION: None,
+ VERSION_FILE: None,
+ CHANGELOG_FILE: None,
+ CHANGELOG_ENTRY_FILE: "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
+ PATCH_FILE: "/tmp/test-v8-push-to-trunk-tempfile-patch",
+ COMMITMSG_FILE: "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
+ CHROMIUM: "/tmp/test-v8-push-to-trunk-tempfile-chromium",
+ DEPS_FILE: "/tmp/test-v8-push-to-trunk-tempfile-chromium/DEPS",
+ SETTINGS_LOCATION: None,
+ ALREADY_MERGING_SENTINEL_FILE:
+ "/tmp/test-merge-to-branch-tempfile-already-merging",
+ COMMIT_HASHES_FILE: "/tmp/test-merge-to-branch-tempfile-PATCH_COMMIT_HASHES",
+ TEMPORARY_PATCH_FILE: "/tmp/test-merge-to-branch-tempfile-temporary-patch",
+}
+
+
+def MakeOptions(s=0, l=None, f=False, m=True, r=None, c=None, a=None,
+ status_password=None, revert_bleeding_edge=None, p=None):
+ """Convenience wrapper."""
+ class Options(object):
+ pass
+ options = Options()
+ options.s = s
+ options.l = l
+ options.f = f
+ options.m = m
+ options.reviewer = r
+ options.c = c
+ options.a = a
+ options.p = p
+ options.status_password = status_password
+ options.revert_bleeding_edge = revert_bleeding_edge
+ return options
+
+
+class ToplevelTest(unittest.TestCase):
+ def testMakeComment(self):
+ self.assertEquals("# Line 1\n# Line 2\n#",
+ MakeComment(" Line 1\n Line 2\n"))
+ self.assertEquals("#Line 1\n#Line 2",
+ MakeComment("Line 1\n Line 2"))
+
+ def testStripComments(self):
+ self.assertEquals(" Line 1\n Line 3\n",
+ StripComments(" Line 1\n# Line 2\n Line 3\n#\n"))
+ self.assertEquals("\nLine 2 ### Test\n #",
+ StripComments("###\n# \n\n# Line 1\nLine 2 ### Test\n #"))
+
+ def testMakeChangeLogBodySimple(self):
+ commits = [
+ ["Title text 1",
+ "Title text 1\n\nBUG=\n",
+ "author1@chromium.org"],
+ ["Title text 2.",
+ "Title text 2\n\nBUG=1234\n",
+ "author2@chromium.org"],
+ ]
+ self.assertEquals(" Title text 1.\n"
+ " (author1@chromium.org)\n\n"
+ " Title text 2 (Chromium issue 1234).\n"
+ " (author2@chromium.org)\n\n",
+ MakeChangeLogBody(commits))
+
+ def testMakeChangeLogBodyEmpty(self):
+ self.assertEquals("", MakeChangeLogBody([]))
+
+ def testMakeChangeLogBodyAutoFormat(self):
+ commits = [
+ ["Title text 1!",
+ "Title text 1\nLOG=y\nBUG=\n",
+ "author1@chromium.org"],
+ ["Title text 2",
+ "Title text 2\n\nBUG=1234\n",
+ "author2@chromium.org"],
+ ["Title text 3",
+ "Title text 3\n\nBUG=1234\nLOG = Yes\n",
+ "author3@chromium.org"],
+ ["Title text 3",
+ "Title text 4\n\nBUG=1234\nLOG=\n",
+ "author4@chromium.org"],
+ ]
+ self.assertEquals(" Title text 1.\n\n"
+ " Title text 3 (Chromium issue 1234).\n\n",
+ MakeChangeLogBody(commits, True))
+
+ def testRegressWrongLogEntryOnTrue(self):
+ body = """
+Check elimination: Learn from if(CompareMap(x)) on true branch.
+
+BUG=
+R=verwaest@chromium.org
+
+Committed: https://code.google.com/p/v8/source/detail?r=18210
+"""
+ self.assertEquals("", MakeChangeLogBody([["title", body, "author"]], True))
+
+ def testMakeChangeLogBugReferenceEmpty(self):
+ self.assertEquals("", MakeChangeLogBugReference(""))
+ self.assertEquals("", MakeChangeLogBugReference("LOG="))
+ self.assertEquals("", MakeChangeLogBugReference(" BUG ="))
+ self.assertEquals("", MakeChangeLogBugReference("BUG=none\t"))
+
+ def testMakeChangeLogBugReferenceSimple(self):
+ self.assertEquals("(issue 987654)",
+ MakeChangeLogBugReference("BUG = v8:987654"))
+ self.assertEquals("(Chromium issue 987654)",
+ MakeChangeLogBugReference("BUG=987654 "))
+
+ def testMakeChangeLogBugReferenceFromBody(self):
+ self.assertEquals("(Chromium issue 1234567)",
+ MakeChangeLogBugReference("Title\n\nTBR=\nBUG=\n"
+ " BUG=\tchromium:1234567\t\n"
+ "R=somebody\n"))
+
+ def testMakeChangeLogBugReferenceMultiple(self):
+ # All issues should be sorted and grouped. Multiple references to the same
+ # issue should be filtered.
+ self.assertEquals("(issues 123, 234, Chromium issue 345)",
+ MakeChangeLogBugReference("Title\n\n"
+ "BUG=v8:234\n"
+ " BUG\t= 345, \tv8:234,\n"
+ "BUG=v8:123\n"
+ "R=somebody\n"))
+ self.assertEquals("(Chromium issues 123, 234)",
+ MakeChangeLogBugReference("Title\n\n"
+ "BUG=234,,chromium:123 \n"
+ "R=somebody\n"))
+ self.assertEquals("(Chromium issues 123, 234)",
+ MakeChangeLogBugReference("Title\n\n"
+ "BUG=chromium:234, , 123\n"
+ "R=somebody\n"))
+ self.assertEquals("(issues 345, 456)",
+ MakeChangeLogBugReference("Title\n\n"
+ "\t\tBUG=v8:345,v8:456\n"
+ "R=somebody\n"))
+ self.assertEquals("(issue 123, Chromium issues 345, 456)",
+ MakeChangeLogBugReference("Title\n\n"
+ "BUG=chromium:456\n"
+ "BUG = none\n"
+ "R=somebody\n"
+ "BUG=456,v8:123, 345"))
+
+ # TODO(machenbach): These test don't make much sense when the formatting is
+ # done later.
+ def testMakeChangeLogBugReferenceLong(self):
+ # -----------------00--------10--------20--------30--------
+ self.assertEquals("(issues 234, 1234567890, 1234567"
+ "8901234567890, Chromium issues 12345678,"
+ " 123456789)",
+ MakeChangeLogBugReference("BUG=v8:234\n"
+ "BUG=v8:1234567890\n"
+ "BUG=v8:12345678901234567890\n"
+ "BUG=123456789\n"
+ "BUG=12345678\n"))
+ # -----------------00--------10--------20--------30--------
+ self.assertEquals("(issues 234, 1234567890, 1234567"
+ "8901234567890, Chromium issues"
+ " 123456789, 1234567890)",
+ MakeChangeLogBugReference("BUG=v8:234\n"
+ "BUG=v8:12345678901234567890\n"
+ "BUG=v8:1234567890\n"
+ "BUG=123456789\n"
+ "BUG=1234567890\n"))
+ # -----------------00--------10--------20--------30--------
+ self.assertEquals("(Chromium issues 234, 1234567890"
+ ", 12345678901234567, "
+ "1234567890123456789)",
+ MakeChangeLogBugReference("BUG=234\n"
+ "BUG=12345678901234567\n"
+ "BUG=1234567890123456789\n"
+ "BUG=1234567890\n"))
+
+
+class SimpleMock(object):
+ def __init__(self, name):
+ self._name = name
+ self._recipe = []
+ self._index = -1
+
+ def Expect(self, recipe):
+ self._recipe = recipe
+
+ def Call(self, *args):
+ self._index += 1
+ try:
+ expected_call = self._recipe[self._index]
+ except IndexError:
+ raise NoRetryException("Calling %s %s" % (self._name, " ".join(args)))
+
+ # Pack expectations without arguments into a list.
+ if not isinstance(expected_call, list):
+ expected_call = [expected_call]
+
+ # The number of arguments in the expectation must match the actual
+ # arguments.
+ if len(args) > len(expected_call):
+ raise NoRetryException("When calling %s with arguments, the "
+ "expectations must consist of at least as many arguments.")
+
+ # Compare expected and actual arguments.
+ for (expected_arg, actual_arg) in zip(expected_call, args):
+ if expected_arg != actual_arg:
+ raise NoRetryException("Expected: %s - Actual: %s"
+ % (expected_arg, actual_arg))
+
+ # The expectation list contains a mandatory return value and an optional
+ # callback for checking the context at the time of the call.
+ if len(expected_call) == len(args) + 2:
+ try:
+ expected_call[len(args) + 1]()
+ except:
+ tb = traceback.format_exc()
+ raise NoRetryException("Caught exception from callback: %s" % tb)
+ return_value = expected_call[len(args)]
+
+ # If the return value is an exception, raise it instead of returning.
+ if isinstance(return_value, Exception):
+ raise return_value
+ return return_value
+
+ def AssertFinished(self):
+ if self._index < len(self._recipe) -1:
+ raise NoRetryException("Called %s too seldom: %d vs. %d"
+ % (self._name, self._index, len(self._recipe)))
+
+
+class ScriptTest(unittest.TestCase):
+ def MakeEmptyTempFile(self):
+ handle, name = tempfile.mkstemp()
+ os.close(handle)
+ self._tmp_files.append(name)
+ return name
+
+ def MakeTempVersionFile(self):
+ name = self.MakeEmptyTempFile()
+ with open(name, "w") as f:
+ f.write(" // Some line...\n")
+ f.write("\n")
+ f.write("#define MAJOR_VERSION 3\n")
+ f.write("#define MINOR_VERSION 22\n")
+ f.write("#define BUILD_NUMBER 5\n")
+ f.write("#define PATCH_LEVEL 0\n")
+ f.write(" // Some line...\n")
+ f.write("#define IS_CANDIDATE_VERSION 0\n")
+ return name
+
+ def MakeStep(self, step_class=Step, state=None, options=None):
+ """Convenience wrapper."""
+ options = options or CommonOptions(MakeOptions())
+ return MakeStep(step_class=step_class, number=0, state=state,
+ config=TEST_CONFIG, options=options,
+ side_effect_handler=self)
+
+ def GitMock(self, cmd, args="", pipe=True):
+ print "%s %s" % (cmd, args)
+ return self._git_mock.Call(args)
+
+ def LogMock(self, cmd, args=""):
+ print "Log: %s %s" % (cmd, args)
+
+ MOCKS = {
+ "git": GitMock,
+ # TODO(machenbach): Little hack to reuse the git mock for the one svn call
+ # in merge-to-branch. The command should be made explicit in the test
+ # expectations.
+ "svn": GitMock,
+ "vi": LogMock,
+ }
+
+ def Call(self, fun, *args, **kwargs):
+ print "Calling %s with %s and %s" % (str(fun), str(args), str(kwargs))
+
+ def Command(self, cmd, args="", prefix="", pipe=True):
+ return ScriptTest.MOCKS[cmd](self, cmd, args)
+
+ def ReadLine(self):
+ return self._rl_mock.Call()
+
+ def ReadURL(self, url, params):
+ if params is not None:
+ return self._url_mock.Call(url, params)
+ else:
+ return self._url_mock.Call(url)
+
+ def Sleep(self, seconds):
+ pass
+
+ def GetDate(self):
+ return "1999-07-31"
+
+ def ExpectGit(self, *args):
+ """Convenience wrapper."""
+ self._git_mock.Expect(*args)
+
+ def ExpectReadline(self, *args):
+ """Convenience wrapper."""
+ self._rl_mock.Expect(*args)
+
+ def ExpectReadURL(self, *args):
+ """Convenience wrapper."""
+ self._url_mock.Expect(*args)
+
+ def setUp(self):
+ self._git_mock = SimpleMock("git")
+ self._rl_mock = SimpleMock("readline")
+ self._url_mock = SimpleMock("readurl")
+ self._tmp_files = []
+
+ def tearDown(self):
+ Command("rm", "-rf %s*" % TEST_CONFIG[PERSISTFILE_BASENAME])
+
+ # Clean up temps. Doesn't work automatically.
+ for name in self._tmp_files:
+ if os.path.exists(name):
+ os.remove(name)
+
+ self._git_mock.AssertFinished()
+ self._rl_mock.AssertFinished()
+ self._url_mock.AssertFinished()
+
+ def testPersistRestore(self):
+ self.MakeStep().Persist("test1", "")
+ self.assertEquals("", self.MakeStep().Restore("test1"))
+ self.MakeStep().Persist("test2", "AB123")
+ self.assertEquals("AB123", self.MakeStep().Restore("test2"))
+
+ def testGitOrig(self):
+ self.assertTrue(Command("git", "--version").startswith("git version"))
+
+ def testGitMock(self):
+ self.ExpectGit([["--version", "git version 1.2.3"], ["dummy", ""]])
+ self.assertEquals("git version 1.2.3", self.MakeStep().Git("--version"))
+ self.assertEquals("", self.MakeStep().Git("dummy"))
+
+ def testCommonPrepareDefault(self):
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch"],
+ ["svn fetch", ""],
+ ["branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]],
+ ["branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""],
+ ["checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""],
+ ["branch", ""],
+ ])
+ self.ExpectReadline(["Y"])
+ self.MakeStep().CommonPrepare()
+ self.MakeStep().PrepareBranch()
+ self.assertEquals("some_branch", self.MakeStep().Restore("current_branch"))
+
+ def testCommonPrepareNoConfirm(self):
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch"],
+ ["svn fetch", ""],
+ ["branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]],
+ ])
+ self.ExpectReadline(["n"])
+ self.MakeStep().CommonPrepare()
+ self.assertRaises(Exception, self.MakeStep().PrepareBranch)
+ self.assertEquals("some_branch", self.MakeStep().Restore("current_branch"))
+
+ def testCommonPrepareDeleteBranchFailure(self):
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch"],
+ ["svn fetch", ""],
+ ["branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]],
+ ["branch -D %s" % TEST_CONFIG[TEMP_BRANCH], None],
+ ])
+ self.ExpectReadline(["Y"])
+ self.MakeStep().CommonPrepare()
+ self.assertRaises(Exception, self.MakeStep().PrepareBranch)
+ self.assertEquals("some_branch", self.MakeStep().Restore("current_branch"))
+
+ def testInitialEnvironmentChecks(self):
+ TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+ os.environ["EDITOR"] = "vi"
+ self.MakeStep().InitialEnvironmentChecks()
+
+ def testReadAndPersistVersion(self):
+ TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
+ step = self.MakeStep()
+ step.ReadAndPersistVersion()
+ self.assertEquals("3", self.MakeStep().Restore("major"))
+ self.assertEquals("22", self.MakeStep().Restore("minor"))
+ self.assertEquals("5", self.MakeStep().Restore("build"))
+ self.assertEquals("0", self.MakeStep().Restore("patch"))
+ self.assertEquals("3", step._state["major"])
+ self.assertEquals("22", step._state["minor"])
+ self.assertEquals("5", step._state["build"])
+ self.assertEquals("0", step._state["patch"])
+
+ def testRegex(self):
+ self.assertEqual("(issue 321)",
+ re.sub(r"BUG=v8:(.*)$", r"(issue \1)", "BUG=v8:321"))
+ self.assertEqual("(Chromium issue 321)",
+ re.sub(r"BUG=(.*)$", r"(Chromium issue \1)", "BUG=321"))
+
+ cl = " too little\n\ttab\ttab\n too much\n trailing "
+ cl = MSub(r"\t", r" ", cl)
+ cl = MSub(r"^ {1,7}([^ ])", r" \1", cl)
+ cl = MSub(r"^ {9,80}([^ ])", r" \1", cl)
+ cl = MSub(r" +$", r"", cl)
+ self.assertEqual(" too little\n"
+ " tab tab\n"
+ " too much\n"
+ " trailing", cl)
+
+ self.assertEqual("//\n#define BUILD_NUMBER 3\n",
+ MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
+ r"\g<space>3",
+ "//\n#define BUILD_NUMBER 321\n"))
+
+ def testPrepareChangeLog(self):
+ TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
+ TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
+
+ self.ExpectGit([
+ ["log 1234..HEAD --format=%H", "rev1\nrev2\nrev3\nrev4"],
+ ["log -1 rev1 --format=\"%s\"", "Title text 1"],
+ ["log -1 rev1 --format=\"%B\"", "Title\n\nBUG=\nLOG=y\n"],
+ ["log -1 rev1 --format=\"%an\"", "author1@chromium.org"],
+ ["log -1 rev2 --format=\"%s\"", "Title text 2."],
+ ["log -1 rev2 --format=\"%B\"", "Title\n\nBUG=123\nLOG= \n"],
+ ["log -1 rev2 --format=\"%an\"", "author2@chromium.org"],
+ ["log -1 rev3 --format=\"%s\"", "Title text 3"],
+ ["log -1 rev3 --format=\"%B\"", "Title\n\nBUG=321\nLOG=true\n"],
+ ["log -1 rev3 --format=\"%an\"", "author3@chromium.org"],
+ ["log -1 rev4 --format=\"%s\"", "Title text 4"],
+ ["log -1 rev4 --format=\"%B\"",
+ ("Title\n\nBUG=456\nLOG=Y\n\n"
+ "Review URL: https://codereview.chromium.org/9876543210\n")],
+ ["log -1 rev4 --format=\"%an\"", "author4@chromium.org"],
+ ])
+
+ # The cl for rev4 on rietveld has an updated LOG flag.
+ self.ExpectReadURL([
+ ["https://codereview.chromium.org/9876543210/description",
+ "Title\n\nBUG=456\nLOG=N\n\n"],
+ ])
+
+ self.MakeStep().Persist("last_push", "1234")
+ self.MakeStep(PrepareChangeLog).Run()
+
+ actual_cl = FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE])
+
+ expected_cl = """1999-07-31: Version 3.22.5
+
+ Title text 1.
+
+ Title text 3 (Chromium issue 321).
+
+ Performance and stability improvements on all platforms.
+#
+# The change log above is auto-generated. Please review if all relevant
+# commit messages from the list below are included.
+# All lines starting with # will be stripped.
+#
+# Title text 1.
+# (author1@chromium.org)
+#
+# Title text 2 (Chromium issue 123).
+# (author2@chromium.org)
+#
+# Title text 3 (Chromium issue 321).
+# (author3@chromium.org)
+#
+# Title text 4 (Chromium issue 456).
+# (author4@chromium.org)
+#
+#"""
+
+ self.assertEquals(expected_cl, actual_cl)
+ self.assertEquals("3", self.MakeStep().Restore("major"))
+ self.assertEquals("22", self.MakeStep().Restore("minor"))
+ self.assertEquals("5", self.MakeStep().Restore("build"))
+ self.assertEquals("0", self.MakeStep().Restore("patch"))
+
+ def testEditChangeLog(self):
+ TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
+ TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
+ TextToFile(" Original CL", TEST_CONFIG[CHANGELOG_FILE])
+ TextToFile(" New \n\tLines \n", TEST_CONFIG[CHANGELOG_ENTRY_FILE])
+ os.environ["EDITOR"] = "vi"
+
+ self.ExpectReadline([
+ "", # Open editor.
+ ])
+
+ self.MakeStep(EditChangeLog).Run()
+
+ self.assertEquals("New\n Lines\n\n\n Original CL",
+ FileToText(TEST_CONFIG[CHANGELOG_FILE]))
+
+ def testIncrementVersion(self):
+ TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
+ self.MakeStep().Persist("build", "5")
+
+ self.ExpectReadline([
+ "Y", # Increment build number.
+ ])
+
+ self.MakeStep(IncrementVersion).Run()
+
+ self.assertEquals("3", self.MakeStep().Restore("new_major"))
+ self.assertEquals("22", self.MakeStep().Restore("new_minor"))
+ self.assertEquals("6", self.MakeStep().Restore("new_build"))
+ self.assertEquals("0", self.MakeStep().Restore("new_patch"))
+
+ def testLastChangeLogEntries(self):
+ TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
+ l = """
+ Fixed something.
+ (issue 1234)\n"""
+ for _ in xrange(10): l = l + l
+
+ cl_chunk = """2013-11-12: Version 3.23.2\n%s
+ Performance and stability improvements on all platforms.\n\n\n""" % l
+
+ cl_chunk_full = cl_chunk + cl_chunk + cl_chunk
+ TextToFile(cl_chunk_full, TEST_CONFIG[CHANGELOG_FILE])
+
+ cl = GetLastChangeLogEntries(TEST_CONFIG[CHANGELOG_FILE])
+ self.assertEquals(cl_chunk, cl)
+
+ def _TestSquashCommits(self, change_log, expected_msg):
+ TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
+ with open(TEST_CONFIG[CHANGELOG_ENTRY_FILE], "w") as f:
+ f.write(change_log)
+
+ self.ExpectGit([
+ ["diff svn/trunk hash1", "patch content"],
+ ["svn find-rev hash1", "123455\n"],
+ ])
+
+ self.MakeStep().Persist("prepare_commit_hash", "hash1")
+ self.MakeStep().Persist("date", "1999-11-11")
+
+ self.MakeStep(SquashCommits).Run()
+ self.assertEquals(FileToText(TEST_CONFIG[COMMITMSG_FILE]), expected_msg)
+
+ patch = FileToText(TEST_CONFIG[ PATCH_FILE])
+ self.assertTrue(re.search(r"patch content", patch))
+
+ def testSquashCommitsUnformatted(self):
+ change_log = """1999-11-11: Version 3.22.5
+
+ Log text 1.
+ Chromium issue 12345
+
+ Performance and stability improvements on all platforms.\n"""
+ commit_msg = """Version 3.22.5 (based on bleeding_edge revision r123455)
+
+Log text 1. Chromium issue 12345
+
+Performance and stability improvements on all platforms."""
+ self._TestSquashCommits(change_log, commit_msg)
+
+ def testSquashCommitsFormatted(self):
+ change_log = """1999-11-11: Version 3.22.5
+
+ Long commit message that fills more than 80 characters (Chromium issue
+ 12345).
+
+ Performance and stability improvements on all platforms.\n"""
+ commit_msg = """Version 3.22.5 (based on bleeding_edge revision r123455)
+
+Long commit message that fills more than 80 characters (Chromium issue 12345).
+
+Performance and stability improvements on all platforms."""
+ self._TestSquashCommits(change_log, commit_msg)
+
+ def testSquashCommitsQuotationMarks(self):
+ change_log = """Line with "quotation marks".\n"""
+ commit_msg = """Line with "quotation marks"."""
+ self._TestSquashCommits(change_log, commit_msg)
+
+ def _PushToTrunk(self, force=False, manual=False):
+ TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+ TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
+ TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
+ TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
+ if not os.path.exists(TEST_CONFIG[CHROMIUM]):
+ os.makedirs(TEST_CONFIG[CHROMIUM])
+ TextToFile("1999-04-05: Version 3.22.4", TEST_CONFIG[CHANGELOG_FILE])
+ TextToFile("Some line\n \"v8_revision\": \"123444\",\n some line",
+ TEST_CONFIG[DEPS_FILE])
+ os.environ["EDITOR"] = "vi"
+
+ def CheckPreparePush():
+ cl = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+ self.assertTrue(re.search(r"Version 3.22.5", cl))
+ self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
+ self.assertFalse(re.search(r" \(author1@chromium\.org\)", cl))
+
+ # Make sure all comments got stripped.
+ self.assertFalse(re.search(r"^#", cl, flags=re.M))
+
+ version = FileToText(TEST_CONFIG[VERSION_FILE])
+ self.assertTrue(re.search(r"#define BUILD_NUMBER\s+6", version))
+
+ def CheckSVNCommit():
+ commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
+ self.assertEquals(
+"""Version 3.22.5 (based on bleeding_edge revision r123455)
+
+Log text 1 (issue 321).
+
+Performance and stability improvements on all platforms.""", commit)
+ version = FileToText(TEST_CONFIG[VERSION_FILE])
+ self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
+ self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
+ self.assertFalse(re.search(r"#define BUILD_NUMBER\s+6", version))
+ self.assertTrue(re.search(r"#define PATCH_LEVEL\s+0", version))
+ self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
+
+ force_flag = " -f" if not manual else ""
+ review_suffix = "\n\nTBR=reviewer@chromium.org" if not manual else ""
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch\n"],
+ ["svn fetch", ""],
+ ["branch", " branch1\n* branch2\n"],
+ ["checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""],
+ ["branch", " branch1\n* branch2\n"],
+ ["branch", " branch1\n* branch2\n"],
+ ["checkout -b %s svn/bleeding_edge" % TEST_CONFIG[BRANCHNAME], ""],
+ ["log -1 --format=%H ChangeLog", "1234\n"],
+ ["log -1 1234", "Last push ouput\n"],
+ ["log 1234..HEAD --format=%H", "rev1\n"],
+ ["log -1 rev1 --format=\"%s\"", "Log text 1.\n"],
+ ["log -1 rev1 --format=\"%B\"", "Text\nLOG=YES\nBUG=v8:321\nText\n"],
+ ["log -1 rev1 --format=\"%an\"", "author1@chromium.org\n"],
+ [("commit -a -m \"Prepare push to trunk. "
+ "Now working on version 3.22.6.%s\"" % review_suffix),
+ " 2 files changed\n",
+ CheckPreparePush],
+ [("cl upload --email \"author@chromium.org\" "
+ "-r \"reviewer@chromium.org\" --send-mail%s" % force_flag),
+ "done\n"],
+ ["cl presubmit", "Presubmit successfull\n"],
+ ["cl dcommit -f --bypass-hooks", "Closing issue\n"],
+ ["svn fetch", "fetch result\n"],
+ ["checkout svn/bleeding_edge", ""],
+ [("log -1 --format=%H --grep=\"Prepare push to trunk. "
+ "Now working on version 3.22.6.\""),
+ "hash1\n"],
+ ["diff svn/trunk hash1", "patch content\n"],
+ ["svn find-rev hash1", "123455\n"],
+ ["checkout -b %s svn/trunk" % TEST_CONFIG[TRUNKBRANCH], ""],
+ ["apply --index --reject \"%s\"" % TEST_CONFIG[PATCH_FILE], ""],
+ ["add \"%s\"" % TEST_CONFIG[VERSION_FILE], ""],
+ ["commit -F \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], "", CheckSVNCommit],
+ ["svn dcommit 2>&1", "Some output\nCommitted r123456\nSome output\n"],
+ ["svn tag 3.22.5 -m \"Tagging version 3.22.5\"", ""],
+ ["status -s -uno", ""],
+ ["checkout master", ""],
+ ["pull", ""],
+ ["checkout -b v8-roll-123456", ""],
+ [("commit -am \"Update V8 to version 3.22.5 "
+ "(based on bleeding_edge revision r123455).\n\n"
+ "TBR=reviewer@chromium.org\""),
+ ""],
+ ["cl upload --email \"author@chromium.org\" --send-mail%s" % force_flag,
+ ""],
+ ["checkout -f some_branch", ""],
+ ["branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""],
+ ["branch -D %s" % TEST_CONFIG[BRANCHNAME], ""],
+ ["branch -D %s" % TEST_CONFIG[TRUNKBRANCH], ""],
+ ])
+
+ # Expected keyboard input in manual mode:
+ if manual:
+ self.ExpectReadline([
+ "Y", # Confirm last push.
+ "", # Open editor.
+ "Y", # Increment build number.
+ "reviewer@chromium.org", # V8 reviewer.
+ "LGTX", # Enter LGTM for V8 CL (wrong).
+ "LGTM", # Enter LGTM for V8 CL.
+ "Y", # Sanity check.
+ "reviewer@chromium.org", # Chromium reviewer.
+ ])
+
+ # Expected keyboard input in semi-automatic mode:
+ if not manual and not force:
+ self.ExpectReadline([
+ "LGTM", # Enter LGTM for V8 CL.
+ ])
+
+ # No keyboard input in forced mode:
+ if force:
+ self.ExpectReadline([])
+
+ options = MakeOptions(f=force, m=manual, a="author@chromium.org",
+ r="reviewer@chromium.org" if not manual else None,
+ c = TEST_CONFIG[CHROMIUM])
+ RunPushToTrunk(TEST_CONFIG, PushToTrunkOptions(options), self)
+
+ deps = FileToText(TEST_CONFIG[DEPS_FILE])
+ self.assertTrue(re.search("\"v8_revision\": \"123456\"", deps))
+
+ cl = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+ self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
+ self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
+ self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
+
+ # Note: The version file is on build number 5 again in the end of this test
+ # since the git command that merges to the bleeding edge branch is mocked
+ # out.
+
+ def testPushToTrunkManual(self):
+ self._PushToTrunk(manual=True)
+
+ def testPushToTrunkSemiAutomatic(self):
+ self._PushToTrunk()
+
+ def testPushToTrunkForced(self):
+ self._PushToTrunk(force=True)
+
+ def testCheckLastPushRecently(self):
+ self.ExpectGit([
+ ["svn log -1 --oneline", "r101 | Text"],
+ ["svn log -1 --oneline ChangeLog", "r99 | Prepare push to trunk..."],
+ ])
+
+ state = {}
+ self.MakeStep(FetchLatestRevision, state=state).Run()
+ self.assertRaises(Exception, self.MakeStep(CheckLastPush, state=state).Run)
+
+ def testAutoRoll(self):
+ status_password = self.MakeEmptyTempFile()
+ TextToFile("PW", status_password)
+ TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+ TEST_CONFIG[SETTINGS_LOCATION] = "~/.doesnotexist"
+
+ self.ExpectReadURL([
+ ["https://v8-status.appspot.com/current?format=json",
+ "{\"message\": \"Tree is throttled\"}"],
+ ["https://v8-status.appspot.com/lkgr", Exception("Network problem")],
+ ["https://v8-status.appspot.com/lkgr", "100"],
+ ["https://v8-status.appspot.com/status",
+ ("username=v8-auto-roll%40chromium.org&"
+ "message=Tree+is+closed+%28preparing+to+push%29&password=PW"),
+ ""],
+ ["https://v8-status.appspot.com/status",
+ ("username=v8-auto-roll%40chromium.org&"
+ "message=Tree+is+throttled&password=PW"), ""],
+ ])
+
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch\n"],
+ ["svn fetch", ""],
+ ["svn log -1 --oneline", "r100 | Text"],
+ ["svn log -1 --oneline ChangeLog", "r65 | Prepare push to trunk..."],
+ ])
+
+ auto_roll.RunAutoRoll(TEST_CONFIG, AutoRollOptions(
+ MakeOptions(status_password=status_password)), self)
+
+ self.assertEquals("100", self.MakeStep().Restore("lkgr"))
+ self.assertEquals("100", self.MakeStep().Restore("latest"))
+
+ def testAutoRollStoppedBySettings(self):
+ TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+ TEST_CONFIG[SETTINGS_LOCATION] = self.MakeEmptyTempFile()
+ TextToFile("{\"enable_auto_roll\": false}", TEST_CONFIG[SETTINGS_LOCATION])
+
+ self.ExpectReadURL([])
+
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch\n"],
+ ["svn fetch", ""],
+ ])
+
+ def RunAutoRoll():
+ auto_roll.RunAutoRoll(TEST_CONFIG, AutoRollOptions(MakeOptions()), self)
+ self.assertRaises(Exception, RunAutoRoll)
+
+ def testAutoRollStoppedByTreeStatus(self):
+ TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+ TEST_CONFIG[SETTINGS_LOCATION] = "~/.doesnotexist"
+
+ self.ExpectReadURL([
+ ["https://v8-status.appspot.com/current?format=json",
+ "{\"message\": \"Tree is throttled (no push)\"}"],
+ ])
+
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch\n"],
+ ["svn fetch", ""],
+ ])
+
+ def RunAutoRoll():
+ auto_roll.RunAutoRoll(TEST_CONFIG, AutoRollOptions(MakeOptions()), self)
+ self.assertRaises(Exception, RunAutoRoll)
+
+ def testMergeToBranch(self):
+ TEST_CONFIG[ALREADY_MERGING_SENTINEL_FILE] = self.MakeEmptyTempFile()
+ TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+ TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
+ os.environ["EDITOR"] = "vi"
+ extra_patch = self.MakeEmptyTempFile()
+
+ def VerifyPatch(patch):
+ return lambda: self.assertEquals(patch,
+ FileToText(TEST_CONFIG[TEMPORARY_PATCH_FILE]))
+
+ msg = """Merged r12345, r23456, r34567, r45678, r56789 into trunk branch.
+
+Title4
+
+Title2
+
+Title3
+
+Title1
+
+Title5
+
+BUG=123,234,345,456,567,v8:123
+LOG=N
+"""
+
+ def VerifySVNCommit():
+ commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
+ self.assertEquals(msg, commit)
+ version = FileToText(TEST_CONFIG[VERSION_FILE])
+ self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
+ self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
+ self.assertTrue(re.search(r"#define PATCH_LEVEL\s+1", version))
+ self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
+
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch\n"],
+ ["svn fetch", ""],
+ ["branch", " branch1\n* branch2\n"],
+ ["checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""],
+ ["branch", " branch1\n* branch2\n"],
+ ["checkout -b %s svn/trunk" % TEST_CONFIG[BRANCHNAME], ""],
+ ["log svn/bleeding_edge --reverse --format=%H --grep=\"Port r12345\"",
+ "hash1\nhash2"],
+ ["svn find-rev hash1 svn/bleeding_edge", "45678"],
+ ["log -1 --format=%s hash1", "Title1"],
+ ["svn find-rev hash2 svn/bleeding_edge", "23456"],
+ ["log -1 --format=%s hash2", "Title2"],
+ ["log svn/bleeding_edge --reverse --format=%H --grep=\"Port r23456\"",
+ ""],
+ ["log svn/bleeding_edge --reverse --format=%H --grep=\"Port r34567\"",
+ "hash3"],
+ ["svn find-rev hash3 svn/bleeding_edge", "56789"],
+ ["log -1 --format=%s hash3", "Title3"],
+ ["svn find-rev \"r12345\" svn/bleeding_edge", "hash4"],
+ ["svn find-rev \"r23456\" svn/bleeding_edge", "hash2"],
+ ["svn find-rev \"r34567\" svn/bleeding_edge", "hash3"],
+ ["svn find-rev \"r45678\" svn/bleeding_edge", "hash1"],
+ ["svn find-rev \"r56789\" svn/bleeding_edge", "hash5"],
+ ["log -1 --format=%s hash4", "Title4"],
+ ["log -1 --format=%s hash2", "Title2"],
+ ["log -1 --format=%s hash3", "Title3"],
+ ["log -1 --format=%s hash1", "Title1"],
+ ["log -1 --format=%s hash5", "Title5"],
+ ["log -1 hash4", "Title4\nBUG=123\nBUG=234"],
+ ["log -1 hash2", "Title2\n BUG = v8:123,345"],
+ ["log -1 hash3", "Title3\nLOG=n\nBUG=567, 456"],
+ ["log -1 hash1", "Title1"],
+ ["log -1 hash5", "Title5"],
+ ["log -1 -p hash4", "patch4"],
+ ["apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ "", VerifyPatch("patch4")],
+ ["log -1 -p hash2", "patch2"],
+ ["apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ "", VerifyPatch("patch2")],
+ ["log -1 -p hash3", "patch3"],
+ ["apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ "", VerifyPatch("patch3")],
+ ["log -1 -p hash1", "patch1"],
+ ["apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ "", VerifyPatch("patch1")],
+ ["log -1 -p hash5", "patch5"],
+ ["apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ "", VerifyPatch("patch5")],
+ ["apply --index --reject \"%s\"" % extra_patch, ""],
+ ["commit -a -F \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], ""],
+ ["cl upload -r \"reviewer@chromium.org\" --send-mail", ""],
+ ["checkout %s" % TEST_CONFIG[BRANCHNAME], ""],
+ ["cl presubmit", "Presubmit successfull\n"],
+ ["cl dcommit -f --bypass-hooks", "Closing issue\n", VerifySVNCommit],
+ ["svn fetch", ""],
+ ["log -1 --format=%%H --grep=\"%s\" svn/trunk" % msg, "hash6"],
+ ["svn find-rev hash6", "1324"],
+ [("copy -r 1324 https://v8.googlecode.com/svn/trunk "
+ "https://v8.googlecode.com/svn/tags/3.22.5.1 -m "
+ "\"Tagging version 3.22.5.1\""), ""],
+ ["checkout -f some_branch", ""],
+ ["branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""],
+ ["branch -D %s" % TEST_CONFIG[BRANCHNAME], ""],
+ ])
+
+ self.ExpectReadline([
+ "Y", # Automatically add corresponding ports (34567, 56789)?
+ "Y", # Automatically increment patch level?
+ "reviewer@chromium.org", # V8 reviewer.
+ "LGTM", # Enter LGTM for V8 CL.
+ ])
+
+ options = MakeOptions(p=extra_patch, f=True)
+ # r12345 and r34567 are patches. r23456 (included) and r45678 are the MIPS
+ # ports of r12345. r56789 is the MIPS port of r34567.
+ args = ["trunk", "12345", "23456", "34567"]
+ self.assertTrue(merge_to_branch.ProcessOptions(options, args))
+ RunMergeToBranch(TEST_CONFIG, MergeToBranchOptions(options, args), self)
+
+
+class SystemTest(unittest.TestCase):
+ def testReload(self):
+ step = MakeStep(step_class=PrepareChangeLog, number=0, state={}, config={},
+ options=CommonOptions(MakeOptions()),
+ side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER)
+ body = step.Reload(
+"""------------------------------------------------------------------------
+r17997 | machenbach@chromium.org | 2013-11-22 11:04:04 +0100 (...) | 6 lines
+
+Prepare push to trunk. Now working on version 3.23.11.
+
+R=danno@chromium.org
+
+Review URL: https://codereview.chromium.org/83173002
+
+------------------------------------------------------------------------""")
+ self.assertEquals(
+"""Prepare push to trunk. Now working on version 3.23.11.
+
+R=danno@chromium.org
+
+Committed: https://code.google.com/p/v8/source/detail?r=17997""", body)
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
index 292cf002f9..489f447bf2 100755
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ b/deps/v8/tools/run-deopt-fuzzer.py
@@ -154,6 +154,9 @@ def BuildOptions():
result.add_option("--arch-and-mode",
help="Architecture and mode in the format 'arch.mode'",
default=None)
+ result.add_option("--asan",
+ help="Regard test expectations for ASAN",
+ default=False, action="store_true")
result.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
@@ -363,12 +366,15 @@ def Execute(arch, mode, args, options, suites, workspace):
# Find available test suites and read test cases from them.
variables = {
- "mode": mode,
"arch": arch,
- "system": utils.GuessOS(),
- "isolates": options.isolates,
+ "asan": options.asan,
"deopt_fuzzer": True,
+ "gc_stress": False,
+ "isolates": options.isolates,
+ "mode": mode,
"no_i18n": False,
+ "simulator": utils.UseSimulator(arch),
+ "system": utils.GuessOS(),
}
all_tests = []
num_tests = 0
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index 2fdbeb9d65..de45934a21 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -28,10 +28,12 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import itertools
import multiprocessing
import optparse
import os
from os.path import join
+import platform
import shlex
import subprocess
import sys
@@ -53,9 +55,13 @@ TIMEOUT_SCALEFACTOR = {"debug" : 4,
"release" : 1 }
# Use this to run several variants of the tests.
-VARIANT_FLAGS = [[],
- ["--stress-opt", "--always-opt"],
- ["--nocrankshaft"]]
+VARIANT_FLAGS = {
+ "default": [],
+ "stress": ["--stress-opt", "--always-opt"],
+ "nocrankshaft": ["--nocrankshaft"]}
+
+VARIANTS = ["default", "stress", "nocrankshaft"]
+
MODE_FLAGS = {
"debug" : ["--nobreak-on-abort", "--nodead-code-elimination",
"--nofold-constants", "--enable-slow-asserts",
@@ -63,6 +69,11 @@ MODE_FLAGS = {
"release" : ["--nobreak-on-abort", "--nodead-code-elimination",
"--nofold-constants"]}
+GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
+ "--concurrent-recompilation-queue-length=64",
+ "--concurrent-recompilation-delay=500",
+ "--concurrent-recompilation"]
+
SUPPORTED_ARCHS = ["android_arm",
"android_ia32",
"arm",
@@ -70,14 +81,16 @@ SUPPORTED_ARCHS = ["android_arm",
"mipsel",
"nacl_ia32",
"nacl_x64",
- "x64"]
+ "x64",
+ "a64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
"android_ia32",
"arm",
"mipsel",
"nacl_ia32",
- "nacl_x64"]
+ "nacl_x64",
+ "a64"]
def BuildOptions():
@@ -89,6 +102,9 @@ def BuildOptions():
result.add_option("--arch-and-mode",
help="Architecture and mode in the format 'arch.mode'",
default=None)
+ result.add_option("--asan",
+ help="Regard test expectations for ASAN",
+ default=False, action="store_true")
result.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
@@ -97,6 +113,15 @@ def BuildOptions():
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="dontcare")
+ result.add_option("--slow-tests",
+ help="Regard slow tests (run|skip|dontcare)",
+ default="dontcare")
+ result.add_option("--pass-fail-tests",
+ help="Regard pass|fail tests (run|skip|dontcare)",
+ default="dontcare")
+ result.add_option("--gc-stress",
+ help="Switch on GC stress mode",
+ default=False, action="store_true")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
@@ -128,12 +153,16 @@ def BuildOptions():
result.add_option("--no-variants", "--novariants",
help="Don't run any testing variants",
default=False, dest="no_variants", action="store_true")
+ result.add_option("--variants",
+ help="Comma-separated list of testing variants")
result.add_option("--outdir", help="Base directory with compile output",
default="out")
result.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
+ result.add_option("--quickcheck", default=False, action="store_true",
+ help=("Quick check mode (skip slow/flaky tests)"))
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("--shard-count",
@@ -145,6 +174,10 @@ def BuildOptions():
result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
result.add_option("--shell-dir", help="Directory containing executables",
default="")
+ result.add_option("--dont-skip-slow-simulator-tests",
+ help="Don't skip more slow tests when using a simulator.",
+ default=False, action="store_true",
+ dest="dont_skip_simulator_slow_tests")
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
@@ -167,15 +200,17 @@ def BuildOptions():
def ProcessOptions(options):
global VARIANT_FLAGS
+ global VARIANTS
# Architecture and mode related stuff.
if options.arch_and_mode:
- tokens = options.arch_and_mode.split(".")
- options.arch = tokens[0]
- options.mode = tokens[1]
+ options.arch_and_mode = [arch_and_mode.split(".")
+ for arch_and_mode in options.arch_and_mode.split(",")]
+ options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
+ options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
options.mode = options.mode.split(",")
for mode in options.mode:
- if not mode.lower() in ["debug", "release"]:
+ if not mode.lower() in ["debug", "release", "optdebug"]:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
@@ -186,6 +221,11 @@ def ProcessOptions(options):
print "Unknown architecture %s" % arch
return False
+ # Store the final configuration in arch_and_mode list. Don't overwrite
+ # predefined arch_and_mode since it is more expressive than arch and mode.
+ if not options.arch_and_mode:
+ options.arch_and_mode = itertools.product(options.arch, options.mode)
+
# Special processing of other options, sorted alphabetically.
if options.buildbot:
@@ -198,6 +238,10 @@ def ProcessOptions(options):
options.no_network = True
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = shlex.split(options.extra_flags)
+
+ if options.gc_stress:
+ options.extra_flags += GC_STRESS_FLAGS
+
if options.j == 0:
options.j = multiprocessing.cpu_count()
@@ -205,26 +249,47 @@ def ProcessOptions(options):
"""Returns true if zero or one of multiple arguments are true."""
return reduce(lambda x, y: x + y, args) <= 1
- if not excl(options.no_stress, options.stress_only, options.no_variants):
- print "Use only one of --no-stress, --stress-only or --no-variants."
+ if not excl(options.no_stress, options.stress_only, options.no_variants,
+ bool(options.variants), options.quickcheck):
+ print("Use only one of --no-stress, --stress-only, --no-variants, "
+ "--variants, or --quickcheck.")
return False
if options.no_stress:
- VARIANT_FLAGS = [[], ["--nocrankshaft"]]
+ VARIANTS = ["default", "nocrankshaft"]
if options.no_variants:
- VARIANT_FLAGS = [[]]
+ VARIANTS = ["default"]
+ if options.stress_only:
+ VARIANTS = ["stress"]
+ if options.variants:
+ VARIANTS = options.variants.split(",")
+ if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
+ print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
+ return False
+ if options.quickcheck:
+ VARIANTS = ["default", "stress"]
+ options.flaky_tests = "skip"
+ options.slow_tests = "skip"
+ options.pass_fail_tests = "skip"
+
if not options.shell_dir:
if options.shell:
print "Warning: --shell is deprecated, use --shell-dir instead."
options.shell_dir = os.path.dirname(options.shell)
- if options.stress_only:
- VARIANT_FLAGS = [["--stress-opt", "--always-opt"]]
if options.valgrind:
run_valgrind = os.path.join("tools", "run-valgrind.py")
# This is OK for distributed running, so we don't need to set no_network.
options.command_prefix = (["python", "-u", run_valgrind] +
options.command_prefix)
- if not options.flaky_tests in ["run", "skip", "dontcare"]:
- print "Unknown flaky test mode %s" % options.flaky_tests
+ def CheckTestMode(name, option):
+ if not option in ["run", "skip", "dontcare"]:
+ print "Unknown %s mode %s" % (name, option)
+ return False
+ return True
+ if not CheckTestMode("flaky test", options.flaky_tests):
+ return False
+ if not CheckTestMode("slow test", options.slow_tests):
+ return False
+ if not CheckTestMode("pass|fail test", options.pass_fail_tests):
return False
if not options.no_i18n:
DEFAULT_TESTS.append("intl")
@@ -265,14 +330,14 @@ def Main():
suite_paths = utils.GetSuitePaths(join(workspace, "test"))
if len(args) == 0:
- suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
+ suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
else:
args_suites = set()
for arg in args:
suite = arg.split(os.path.sep)[0]
if not suite in args_suites:
args_suites.add(suite)
- suite_paths = [ s for s in suite_paths if s in args_suites ]
+ suite_paths = [ s for s in args_suites if s in suite_paths ]
suites = []
for root in suite_paths:
@@ -285,10 +350,12 @@ def Main():
for s in suites:
s.DownloadData()
- for mode in options.mode:
- for arch in options.arch:
+ for (arch, mode) in options.arch_and_mode:
+ try:
code = Execute(arch, mode, args, options, suites, workspace)
- exit_code = exit_code or code
+ except KeyboardInterrupt:
+ return 2
+ exit_code = exit_code or code
return exit_code
@@ -305,6 +372,9 @@ def Execute(arch, mode, args, options, suites, workspace):
"%s.%s" % (arch, mode))
shell_dir = os.path.relpath(shell_dir)
+ if mode == "optdebug":
+ mode = "debug" # "optdebug" is just an alias.
+
# Populate context object.
mode_flags = MODE_FLAGS[mode]
timeout = options.timeout
@@ -323,14 +393,21 @@ def Execute(arch, mode, args, options, suites, workspace):
options.extra_flags,
options.no_i18n)
+ # TODO(all): Combine "simulator" and "simulator_run".
+ simulator_run = not options.dont_skip_simulator_slow_tests and \
+ arch in ['a64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
# Find available test suites and read test cases from them.
variables = {
- "mode": mode,
"arch": arch,
- "system": utils.GuessOS(),
- "isolates": options.isolates,
+ "asan": options.asan,
"deopt_fuzzer": False,
+ "gc_stress": options.gc_stress,
+ "isolates": options.isolates,
+ "mode": mode,
"no_i18n": options.no_i18n,
+ "simulator_run": simulator_run,
+ "simulator": utils.UseSimulator(arch),
+ "system": utils.GuessOS(),
}
all_tests = []
num_tests = 0
@@ -341,13 +418,15 @@ def Execute(arch, mode, args, options, suites, workspace):
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
- s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests)
+ s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
+ options.slow_tests, options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
+ variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
s.tests = [ t.CopyAddingFlags(v)
for t in s.tests
- for v in s.VariantFlags(t, VARIANT_FLAGS) ]
+ for v in s.VariantFlags(t, variant_flags) ]
s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
num_tests += len(s.tests)
for t in s.tests:
@@ -402,7 +481,7 @@ def Execute(arch, mode, args, options, suites, workspace):
return exit_code
overall_duration = time.time() - start_time
except KeyboardInterrupt:
- return 1
+ raise
if options.time:
verbose.PrintTestDurations(suites, overall_duration)
diff --git a/deps/v8/tools/testrunner/local/commands.py b/deps/v8/tools/testrunner/local/commands.py
index 01f170dc87..4f3dc51e02 100644
--- a/deps/v8/tools/testrunner/local/commands.py
+++ b/deps/v8/tools/testrunner/local/commands.py
@@ -64,34 +64,34 @@ def Win32SetErrorMode(mode):
def RunProcess(verbose, timeout, args, **rest):
- if verbose: print "#", " ".join(args)
- popen_args = args
- prev_error_mode = SEM_INVALID_VALUE
- if utils.IsWindows():
- popen_args = subprocess.list2cmdline(args)
- # Try to change the error mode to avoid dialogs on fatal errors. Don't
- # touch any existing error mode flags by merging the existing error mode.
- # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
- error_mode = SEM_NOGPFAULTERRORBOX
- prev_error_mode = Win32SetErrorMode(error_mode)
- Win32SetErrorMode(error_mode | prev_error_mode)
- process = subprocess.Popen(
- shell=utils.IsWindows(),
- args=popen_args,
- **rest
- )
- if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
- Win32SetErrorMode(prev_error_mode)
- # Compute the end time - if the process crosses this limit we
- # consider it timed out.
- if timeout is None: end_time = None
- else: end_time = time.time() + timeout
- timed_out = False
- # Repeatedly check the exit code from the process in a
- # loop and keep track of whether or not it times out.
- exit_code = None
- sleep_time = INITIAL_SLEEP_TIME
try:
+ if verbose: print "#", " ".join(args)
+ popen_args = args
+ prev_error_mode = SEM_INVALID_VALUE
+ if utils.IsWindows():
+ popen_args = subprocess.list2cmdline(args)
+ # Try to change the error mode to avoid dialogs on fatal errors. Don't
+ # touch any existing error mode flags by merging the existing error mode.
+ # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
+ error_mode = SEM_NOGPFAULTERRORBOX
+ prev_error_mode = Win32SetErrorMode(error_mode)
+ Win32SetErrorMode(error_mode | prev_error_mode)
+ process = subprocess.Popen(
+ shell=utils.IsWindows(),
+ args=popen_args,
+ **rest
+ )
+ if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
+ Win32SetErrorMode(prev_error_mode)
+ # Compute the end time - if the process crosses this limit we
+ # consider it timed out.
+ if timeout is None: end_time = None
+ else: end_time = time.time() + timeout
+ timed_out = False
+ # Repeatedly check the exit code from the process in a
+ # loop and keep track of whether or not it times out.
+ exit_code = None
+ sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
@@ -131,10 +131,10 @@ def CheckedUnlink(name):
def Execute(args, verbose=False, timeout=None):
- args = [ c for c in args if c != "" ]
- (fd_out, outname) = tempfile.mkstemp()
- (fd_err, errname) = tempfile.mkstemp()
try:
+ args = [ c for c in args if c != "" ]
+ (fd_out, outname) = tempfile.mkstemp()
+ (fd_err, errname) = tempfile.mkstemp()
(exit_code, timed_out) = RunProcess(
verbose,
timeout,
@@ -142,12 +142,15 @@ def Execute(args, verbose=False, timeout=None):
stdout=fd_out,
stderr=fd_err
)
+ except KeyboardInterrupt:
+ raise
except:
raise
- os.close(fd_out)
- os.close(fd_err)
- out = file(outname).read()
- errors = file(errname).read()
- CheckedUnlink(outname)
- CheckedUnlink(errname)
+ finally:
+ os.close(fd_out)
+ os.close(fd_err)
+ out = file(outname).read()
+ errors = file(errname).read()
+ CheckedUnlink(outname)
+ CheckedUnlink(errname)
return output.Output(exit_code, timed_out, out, errors)
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
index 437adb1789..d2748febd9 100644
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ b/deps/v8/tools/testrunner/local/junit_output.py
@@ -46,4 +46,3 @@ class JUnitTestOutput:
def FinishAndWrite(self, file):
xml.ElementTree(self.root).write(file, "UTF-8")
-
diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py
index a663be23eb..03116ee768 100644
--- a/deps/v8/tools/testrunner/local/progress.py
+++ b/deps/v8/tools/testrunner/local/progress.py
@@ -89,6 +89,7 @@ class SimpleProgressIndicator(ProgressIndicator):
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(self.runner.GetCommand(failed))
if failed.output.HasCrashed():
+ print "exit code: %d" % failed.output.exit_code
print "--- CRASHED ---"
if failed.output.HasTimedOut():
print "--- TIMEOUT ---"
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index da0c797d0a..d63fdbf6e6 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -52,14 +52,18 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
-for var in ["debug", "release", "android_arm", "android_ia32", "arm", "ia32",
- "mipsel", "x64", "nacl_ia32", "nacl_x64", "macos", "windows",
- "linux"]:
+for var in ["debug", "release", "android_arm", "android_ia32", "arm", "a64",
+ "ia32", "mipsel", "x64", "nacl_ia32", "nacl_x64", "macos",
+ "windows", "linux"]:
VARIABLES[var] = var
def DoSkip(outcomes):
- return SKIP in outcomes or SLOW in outcomes
+ return SKIP in outcomes
+
+
+def IsSlow(outcomes):
+ return SLOW in outcomes
def OnlyStandardVariant(outcomes):
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 8517ce9f49..ff51196a56 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -93,11 +93,24 @@ class TestSuite(object):
def _FilterFlaky(flaky, mode):
return (mode == "run" and not flaky) or (mode == "skip" and flaky)
- def FilterTestCasesByStatus(self, warn_unused_rules, flaky_tests="dontcare"):
+ @staticmethod
+ def _FilterSlow(slow, mode):
+ return (mode == "run" and not slow) or (mode == "skip" and slow)
+
+ @staticmethod
+ def _FilterPassFail(pass_fail, mode):
+ return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
+
+ def FilterTestCasesByStatus(self, warn_unused_rules,
+ flaky_tests="dontcare",
+ slow_tests="dontcare",
+ pass_fail_tests="dontcare"):
filtered = []
used_rules = set()
for t in self.tests:
flaky = False
+ slow = False
+ pass_fail = False
testname = self.CommonTestName(t)
if testname in self.rules:
used_rules.add(testname)
@@ -107,6 +120,8 @@ class TestSuite(object):
if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|.
flaky = statusfile.IsFlaky(t.outcomes)
+ slow = statusfile.IsSlow(t.outcomes)
+ pass_fail = statusfile.IsPassOrFail(t.outcomes)
skip = False
for rule in self.wildcards:
assert rule[-1] == '*'
@@ -117,7 +132,11 @@ class TestSuite(object):
skip = True
break # "for rule in self.wildcards"
flaky = flaky or statusfile.IsFlaky(t.outcomes)
- if skip or self._FilterFlaky(flaky, flaky_tests):
+ slow = slow or statusfile.IsSlow(t.outcomes)
+ pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
+ if (skip or self._FilterFlaky(flaky, flaky_tests)
+ or self._FilterSlow(slow, slow_tests)
+ or self._FilterPassFail(pass_fail, pass_fail_tests)):
continue # "for t in self.tests"
filtered.append(t)
self.tests = filtered
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index 61ee7dac67..efd609270d 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -78,6 +78,13 @@ def GuessOS():
return None
+def UseSimulator(arch):
+ machine = platform.machine()
+ return (machine and
+ (arch == "mipsel" or arch == "arm" or arch == "a64") and
+ not arch.startswith(machine))
+
+
# This will default to building the 32 bit VM even on machines that are
# capable of running the 64 bit VM.
def DefaultArch():
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index ff5254172f..187e647033 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -950,4 +950,3 @@ ArgumentsProcessor.prototype.printUsageAndExit = function() {
}
quit(2);
};
-
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 06141c2f96..140269a5ff 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -62,63 +62,72 @@ INSTANCE_TYPES = {
135: "FOREIGN_TYPE",
136: "BYTE_ARRAY_TYPE",
137: "FREE_SPACE_TYPE",
- 138: "EXTERNAL_BYTE_ARRAY_TYPE",
- 139: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE",
- 140: "EXTERNAL_SHORT_ARRAY_TYPE",
- 141: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE",
- 142: "EXTERNAL_INT_ARRAY_TYPE",
- 143: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE",
- 144: "EXTERNAL_FLOAT_ARRAY_TYPE",
- 145: "EXTERNAL_DOUBLE_ARRAY_TYPE",
- 146: "EXTERNAL_PIXEL_ARRAY_TYPE",
- 149: "FILLER_TYPE",
- 150: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
- 151: "DECLARED_ACCESSOR_INFO_TYPE",
- 152: "EXECUTABLE_ACCESSOR_INFO_TYPE",
- 153: "ACCESSOR_PAIR_TYPE",
- 154: "ACCESS_CHECK_INFO_TYPE",
- 155: "INTERCEPTOR_INFO_TYPE",
- 156: "CALL_HANDLER_INFO_TYPE",
- 157: "FUNCTION_TEMPLATE_INFO_TYPE",
- 158: "OBJECT_TEMPLATE_INFO_TYPE",
- 159: "SIGNATURE_INFO_TYPE",
- 160: "TYPE_SWITCH_INFO_TYPE",
- 162: "ALLOCATION_MEMENTO_TYPE",
- 161: "ALLOCATION_SITE_TYPE",
- 163: "SCRIPT_TYPE",
- 164: "CODE_CACHE_TYPE",
- 165: "POLYMORPHIC_CODE_CACHE_TYPE",
- 166: "TYPE_FEEDBACK_INFO_TYPE",
- 167: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 168: "BOX_TYPE",
- 171: "FIXED_ARRAY_TYPE",
- 147: "FIXED_DOUBLE_ARRAY_TYPE",
- 148: "CONSTANT_POOL_ARRAY_TYPE",
- 172: "SHARED_FUNCTION_INFO_TYPE",
- 173: "JS_MESSAGE_OBJECT_TYPE",
- 176: "JS_VALUE_TYPE",
- 177: "JS_DATE_TYPE",
- 178: "JS_OBJECT_TYPE",
- 179: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 180: "JS_GENERATOR_OBJECT_TYPE",
- 181: "JS_MODULE_TYPE",
- 182: "JS_GLOBAL_OBJECT_TYPE",
- 183: "JS_BUILTINS_OBJECT_TYPE",
- 184: "JS_GLOBAL_PROXY_TYPE",
- 185: "JS_ARRAY_TYPE",
- 186: "JS_ARRAY_BUFFER_TYPE",
- 187: "JS_TYPED_ARRAY_TYPE",
- 188: "JS_DATA_VIEW_TYPE",
- 175: "JS_PROXY_TYPE",
- 189: "JS_SET_TYPE",
- 190: "JS_MAP_TYPE",
- 191: "JS_WEAK_MAP_TYPE",
- 192: "JS_WEAK_SET_TYPE",
- 193: "JS_REGEXP_TYPE",
- 194: "JS_FUNCTION_TYPE",
- 174: "JS_FUNCTION_PROXY_TYPE",
- 169: "DEBUG_INFO_TYPE",
- 170: "BREAK_POINT_INFO_TYPE",
+ 138: "EXTERNAL_INT8_ARRAY_TYPE",
+ 139: "EXTERNAL_UINT8_ARRAY_TYPE",
+ 140: "EXTERNAL_INT16_ARRAY_TYPE",
+ 141: "EXTERNAL_UINT16_ARRAY_TYPE",
+ 142: "EXTERNAL_INT32_ARRAY_TYPE",
+ 143: "EXTERNAL_UINT32_ARRAY_TYPE",
+ 144: "EXTERNAL_FLOAT32_ARRAY_TYPE",
+ 145: "EXTERNAL_FLOAT64_ARRAY_TYPE",
+ 146: "EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE",
+ 147: "FIXED_INT8_ARRAY_TYPE",
+ 148: "FIXED_UINT8_ARRAY_TYPE",
+ 149: "FIXED_INT16_ARRAY_TYPE",
+ 150: "FIXED_UINT16_ARRAY_TYPE",
+ 151: "FIXED_INT32_ARRAY_TYPE",
+ 152: "FIXED_UINT32_ARRAY_TYPE",
+ 153: "FIXED_FLOAT32_ARRAY_TYPE",
+ 154: "FIXED_FLOAT64_ARRAY_TYPE",
+ 155: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
+ 157: "FILLER_TYPE",
+ 158: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
+ 159: "DECLARED_ACCESSOR_INFO_TYPE",
+ 160: "EXECUTABLE_ACCESSOR_INFO_TYPE",
+ 161: "ACCESSOR_PAIR_TYPE",
+ 162: "ACCESS_CHECK_INFO_TYPE",
+ 163: "INTERCEPTOR_INFO_TYPE",
+ 164: "CALL_HANDLER_INFO_TYPE",
+ 165: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 166: "OBJECT_TEMPLATE_INFO_TYPE",
+ 167: "SIGNATURE_INFO_TYPE",
+ 168: "TYPE_SWITCH_INFO_TYPE",
+ 170: "ALLOCATION_MEMENTO_TYPE",
+ 169: "ALLOCATION_SITE_TYPE",
+ 171: "SCRIPT_TYPE",
+ 172: "CODE_CACHE_TYPE",
+ 173: "POLYMORPHIC_CODE_CACHE_TYPE",
+ 174: "TYPE_FEEDBACK_INFO_TYPE",
+ 175: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 176: "BOX_TYPE",
+ 179: "FIXED_ARRAY_TYPE",
+ 156: "FIXED_DOUBLE_ARRAY_TYPE",
+ 180: "CONSTANT_POOL_ARRAY_TYPE",
+ 181: "SHARED_FUNCTION_INFO_TYPE",
+ 182: "JS_MESSAGE_OBJECT_TYPE",
+ 185: "JS_VALUE_TYPE",
+ 186: "JS_DATE_TYPE",
+ 187: "JS_OBJECT_TYPE",
+ 188: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 189: "JS_GENERATOR_OBJECT_TYPE",
+ 190: "JS_MODULE_TYPE",
+ 191: "JS_GLOBAL_OBJECT_TYPE",
+ 192: "JS_BUILTINS_OBJECT_TYPE",
+ 193: "JS_GLOBAL_PROXY_TYPE",
+ 194: "JS_ARRAY_TYPE",
+ 195: "JS_ARRAY_BUFFER_TYPE",
+ 196: "JS_TYPED_ARRAY_TYPE",
+ 197: "JS_DATA_VIEW_TYPE",
+ 184: "JS_PROXY_TYPE",
+ 198: "JS_SET_TYPE",
+ 199: "JS_MAP_TYPE",
+ 200: "JS_WEAK_MAP_TYPE",
+ 201: "JS_WEAK_SET_TYPE",
+ 202: "JS_REGEXP_TYPE",
+ 203: "JS_FUNCTION_TYPE",
+ 183: "JS_FUNCTION_PROXY_TYPE",
+ 177: "DEBUG_INFO_TYPE",
+ 178: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
@@ -127,21 +136,21 @@ KNOWN_MAPS = {
0x080a9: (129, "MetaMap"),
0x080d1: (131, "OddballMap"),
0x080f9: (4, "AsciiInternalizedStringMap"),
- 0x08121: (171, "FixedArrayMap"),
+ 0x08121: (179, "FixedArrayMap"),
0x08149: (134, "HeapNumberMap"),
- 0x08171: (137, "FreeSpaceMap"),
- 0x08199: (149, "OnePointerFillerMap"),
- 0x081c1: (149, "TwoPointerFillerMap"),
- 0x081e9: (132, "CellMap"),
- 0x08211: (133, "GlobalPropertyCellMap"),
- 0x08239: (172, "SharedFunctionInfoMap"),
- 0x08261: (171, "NativeContextMap"),
- 0x08289: (130, "CodeMap"),
- 0x082b1: (171, "ScopeInfoMap"),
- 0x082d9: (171, "FixedCOWArrayMap"),
- 0x08301: (147, "FixedDoubleArrayMap"),
- 0x08329: (148, "ConstantPoolArrayMap"),
- 0x08351: (171, "HashTableMap"),
+ 0x08171: (130, "CodeMap"),
+ 0x08199: (180, "ConstantPoolArrayMap"),
+ 0x081c1: (137, "FreeSpaceMap"),
+ 0x081e9: (157, "OnePointerFillerMap"),
+ 0x08211: (157, "TwoPointerFillerMap"),
+ 0x08239: (132, "CellMap"),
+ 0x08261: (133, "GlobalPropertyCellMap"),
+ 0x08289: (181, "SharedFunctionInfoMap"),
+ 0x082b1: (179, "NativeContextMap"),
+ 0x082d9: (179, "ScopeInfoMap"),
+ 0x08301: (179, "FixedCOWArrayMap"),
+ 0x08329: (156, "FixedDoubleArrayMap"),
+ 0x08351: (179, "HashTableMap"),
0x08379: (128, "SymbolMap"),
0x083a1: (64, "StringMap"),
0x083c9: (68, "AsciiStringMap"),
@@ -166,92 +175,104 @@ KNOWN_MAPS = {
0x086c1: (86, "ShortExternalAsciiStringMap"),
0x086e9: (64, "UndetectableStringMap"),
0x08711: (68, "UndetectableAsciiStringMap"),
- 0x08739: (138, "ExternalByteArrayMap"),
- 0x08761: (139, "ExternalUnsignedByteArrayMap"),
- 0x08789: (140, "ExternalShortArrayMap"),
- 0x087b1: (141, "ExternalUnsignedShortArrayMap"),
- 0x087d9: (142, "ExternalIntArrayMap"),
- 0x08801: (143, "ExternalUnsignedIntArrayMap"),
- 0x08829: (144, "ExternalFloatArrayMap"),
- 0x08851: (145, "ExternalDoubleArrayMap"),
- 0x08879: (146, "ExternalPixelArrayMap"),
- 0x088a1: (171, "NonStrictArgumentsElementsMap"),
- 0x088c9: (171, "FunctionContextMap"),
- 0x088f1: (171, "CatchContextMap"),
- 0x08919: (171, "WithContextMap"),
- 0x08941: (171, "BlockContextMap"),
- 0x08969: (171, "ModuleContextMap"),
- 0x08991: (171, "GlobalContextMap"),
- 0x089b9: (173, "JSMessageObjectMap"),
- 0x089e1: (135, "ForeignMap"),
- 0x08a09: (178, "NeanderMap"),
- 0x08a31: (162, "AllocationMementoMap"),
- 0x08a59: (161, "AllocationSiteMap"),
- 0x08a81: (165, "PolymorphicCodeCacheMap"),
- 0x08aa9: (163, "ScriptMap"),
- 0x08af9: (178, "ExternalMap"),
- 0x08b21: (168, "BoxMap"),
- 0x08b49: (150, "DeclaredAccessorDescriptorMap"),
- 0x08b71: (151, "DeclaredAccessorInfoMap"),
- 0x08b99: (152, "ExecutableAccessorInfoMap"),
- 0x08bc1: (153, "AccessorPairMap"),
- 0x08be9: (154, "AccessCheckInfoMap"),
- 0x08c11: (155, "InterceptorInfoMap"),
- 0x08c39: (156, "CallHandlerInfoMap"),
- 0x08c61: (157, "FunctionTemplateInfoMap"),
- 0x08c89: (158, "ObjectTemplateInfoMap"),
- 0x08cb1: (159, "SignatureInfoMap"),
- 0x08cd9: (160, "TypeSwitchInfoMap"),
- 0x08d01: (164, "CodeCacheMap"),
- 0x08d29: (166, "TypeFeedbackInfoMap"),
- 0x08d51: (167, "AliasedArgumentsEntryMap"),
- 0x08d79: (169, "DebugInfoMap"),
- 0x08da1: (170, "BreakPointInfoMap"),
+ 0x08739: (138, "ExternalInt8ArrayMap"),
+ 0x08761: (139, "ExternalUint8ArrayMap"),
+ 0x08789: (140, "ExternalInt16ArrayMap"),
+ 0x087b1: (141, "ExternalUint16ArrayMap"),
+ 0x087d9: (142, "ExternalInt32ArrayMap"),
+ 0x08801: (143, "ExternalUint32ArrayMap"),
+ 0x08829: (144, "ExternalFloat32ArrayMap"),
+ 0x08851: (145, "ExternalFloat64ArrayMap"),
+ 0x08879: (146, "ExternalUint8ClampedArrayMap"),
+ 0x088a1: (148, "FixedUint8ArrayMap"),
+ 0x088c9: (147, "FixedInt8ArrayMap"),
+ 0x088f1: (150, "FixedUint16ArrayMap"),
+ 0x08919: (149, "FixedInt16ArrayMap"),
+ 0x08941: (152, "FixedUint32ArrayMap"),
+ 0x08969: (151, "FixedInt32ArrayMap"),
+ 0x08991: (153, "FixedFloat32ArrayMap"),
+ 0x089b9: (154, "FixedFloat64ArrayMap"),
+ 0x089e1: (155, "FixedUint8ClampedArrayMap"),
+ 0x08a09: (179, "NonStrictArgumentsElementsMap"),
+ 0x08a31: (179, "FunctionContextMap"),
+ 0x08a59: (179, "CatchContextMap"),
+ 0x08a81: (179, "WithContextMap"),
+ 0x08aa9: (179, "BlockContextMap"),
+ 0x08ad1: (179, "ModuleContextMap"),
+ 0x08af9: (179, "GlobalContextMap"),
+ 0x08b21: (182, "JSMessageObjectMap"),
+ 0x08b49: (135, "ForeignMap"),
+ 0x08b71: (187, "NeanderMap"),
+ 0x08b99: (170, "AllocationMementoMap"),
+ 0x08bc1: (169, "AllocationSiteMap"),
+ 0x08be9: (173, "PolymorphicCodeCacheMap"),
+ 0x08c11: (171, "ScriptMap"),
+ 0x08c61: (187, "ExternalMap"),
+ 0x08cb1: (176, "BoxMap"),
+ 0x08cd9: (158, "DeclaredAccessorDescriptorMap"),
+ 0x08d01: (159, "DeclaredAccessorInfoMap"),
+ 0x08d29: (160, "ExecutableAccessorInfoMap"),
+ 0x08d51: (161, "AccessorPairMap"),
+ 0x08d79: (162, "AccessCheckInfoMap"),
+ 0x08da1: (163, "InterceptorInfoMap"),
+ 0x08dc9: (164, "CallHandlerInfoMap"),
+ 0x08df1: (165, "FunctionTemplateInfoMap"),
+ 0x08e19: (166, "ObjectTemplateInfoMap"),
+ 0x08e41: (167, "SignatureInfoMap"),
+ 0x08e69: (168, "TypeSwitchInfoMap"),
+ 0x08e91: (172, "CodeCacheMap"),
+ 0x08eb9: (174, "TypeFeedbackInfoMap"),
+ 0x08ee1: (175, "AliasedArgumentsEntryMap"),
+ 0x08f09: (177, "DebugInfoMap"),
+ 0x08f31: (178, "BreakPointInfoMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
("OLD_POINTER_SPACE", 0x08081): "NullValue",
("OLD_POINTER_SPACE", 0x08091): "UndefinedValue",
- ("OLD_POINTER_SPACE", 0x080a1): "TheHoleValue",
- ("OLD_POINTER_SPACE", 0x080b1): "TrueValue",
- ("OLD_POINTER_SPACE", 0x080c1): "FalseValue",
- ("OLD_POINTER_SPACE", 0x080d1): "UninitializedValue",
- ("OLD_POINTER_SPACE", 0x080e1): "NoInterceptorResultSentinel",
- ("OLD_POINTER_SPACE", 0x080f1): "ArgumentsMarker",
- ("OLD_POINTER_SPACE", 0x08101): "NumberStringCache",
- ("OLD_POINTER_SPACE", 0x08909): "SingleCharacterStringCache",
- ("OLD_POINTER_SPACE", 0x08d11): "StringSplitCache",
- ("OLD_POINTER_SPACE", 0x09119): "RegExpMultipleCache",
- ("OLD_POINTER_SPACE", 0x09521): "TerminationException",
- ("OLD_POINTER_SPACE", 0x09531): "MessageListeners",
- ("OLD_POINTER_SPACE", 0x0954d): "CodeStubs",
- ("OLD_POINTER_SPACE", 0x0a9d9): "NonMonomorphicCache",
- ("OLD_POINTER_SPACE", 0x0afed): "PolymorphicCodeCache",
- ("OLD_POINTER_SPACE", 0x0aff5): "NativesSourceCache",
- ("OLD_POINTER_SPACE", 0x0b03d): "EmptyScript",
- ("OLD_POINTER_SPACE", 0x0b075): "IntrinsicFunctionNames",
- ("OLD_POINTER_SPACE", 0x0e091): "ObservationState",
- ("OLD_POINTER_SPACE", 0x0e09d): "FrozenSymbol",
- ("OLD_POINTER_SPACE", 0x0e0a9): "ElementsTransitionSymbol",
- ("OLD_POINTER_SPACE", 0x0e0b5): "EmptySlowElementDictionary",
- ("OLD_POINTER_SPACE", 0x0e251): "ObservedSymbol",
- ("OLD_POINTER_SPACE", 0x29861): "StringTable",
+ ("OLD_POINTER_SPACE", 0x08111): "TheHoleValue",
+ ("OLD_POINTER_SPACE", 0x08121): "TerminationException",
+ ("OLD_POINTER_SPACE", 0x08131): "TrueValue",
+ ("OLD_POINTER_SPACE", 0x08141): "FalseValue",
+ ("OLD_POINTER_SPACE", 0x081c1): "UninitializedValue",
+ ("OLD_POINTER_SPACE", 0x081d1): "NoInterceptorResultSentinel",
+ ("OLD_POINTER_SPACE", 0x081e1): "ArgumentsMarker",
+ ("OLD_POINTER_SPACE", 0x081f1): "NumberStringCache",
+ ("OLD_POINTER_SPACE", 0x089f9): "SingleCharacterStringCache",
+ ("OLD_POINTER_SPACE", 0x08e01): "StringSplitCache",
+ ("OLD_POINTER_SPACE", 0x09209): "RegExpMultipleCache",
+ ("OLD_POINTER_SPACE", 0x09611): "MessageListeners",
+ ("OLD_POINTER_SPACE", 0x0962d): "CodeStubs",
+ ("OLD_POINTER_SPACE", 0x10789): "NonMonomorphicCache",
+ ("OLD_POINTER_SPACE", 0x10d9d): "PolymorphicCodeCache",
+ ("OLD_POINTER_SPACE", 0x10da5): "NativesSourceCache",
+ ("OLD_POINTER_SPACE", 0x10df1): "EmptyScript",
+ ("OLD_POINTER_SPACE", 0x10e29): "IntrinsicFunctionNames",
+ ("OLD_POINTER_SPACE", 0x13e45): "ObservationState",
+ ("OLD_POINTER_SPACE", 0x13e51): "FrozenSymbol",
+ ("OLD_POINTER_SPACE", 0x13e61): "ElementsTransitionSymbol",
+ ("OLD_POINTER_SPACE", 0x13e71): "EmptySlowElementDictionary",
+ ("OLD_POINTER_SPACE", 0x1400d): "ObservedSymbol",
+ ("OLD_POINTER_SPACE", 0x1401d): "AllocationSitesScratchpad",
+ ("OLD_POINTER_SPACE", 0x14425): "MicrotaskState",
+ ("OLD_POINTER_SPACE", 0x32c25): "StringTable",
("OLD_DATA_SPACE", 0x08099): "EmptyDescriptorArray",
("OLD_DATA_SPACE", 0x080a1): "EmptyFixedArray",
("OLD_DATA_SPACE", 0x080a9): "NanValue",
- ("OLD_DATA_SPACE", 0x08141): "EmptyByteArray",
- ("OLD_DATA_SPACE", 0x08269): "EmptyExternalByteArray",
- ("OLD_DATA_SPACE", 0x08275): "EmptyExternalUnsignedByteArray",
- ("OLD_DATA_SPACE", 0x08281): "EmptyExternalShortArray",
- ("OLD_DATA_SPACE", 0x0828d): "EmptyExternalUnsignedShortArray",
- ("OLD_DATA_SPACE", 0x08299): "EmptyExternalIntArray",
- ("OLD_DATA_SPACE", 0x082a5): "EmptyExternalUnsignedIntArray",
- ("OLD_DATA_SPACE", 0x082b1): "EmptyExternalFloatArray",
- ("OLD_DATA_SPACE", 0x082bd): "EmptyExternalDoubleArray",
- ("OLD_DATA_SPACE", 0x082c9): "EmptyExternalPixelArray",
- ("OLD_DATA_SPACE", 0x082d5): "InfinityValue",
- ("OLD_DATA_SPACE", 0x082e1): "MinusZeroValue",
- ("CODE_SPACE", 0x111a1): "JsConstructEntryCode",
- ("CODE_SPACE", 0x18bc1): "JsEntryCode",
+ ("OLD_DATA_SPACE", 0x080e5): "EmptyConstantPoolArray",
+ ("OLD_DATA_SPACE", 0x08235): "EmptyByteArray",
+ ("OLD_DATA_SPACE", 0x08349): "EmptyExternalInt8Array",
+ ("OLD_DATA_SPACE", 0x08355): "EmptyExternalUint8Array",
+ ("OLD_DATA_SPACE", 0x08361): "EmptyExternalInt16Array",
+ ("OLD_DATA_SPACE", 0x0836d): "EmptyExternalUint16Array",
+ ("OLD_DATA_SPACE", 0x08379): "EmptyExternalInt32Array",
+ ("OLD_DATA_SPACE", 0x08385): "EmptyExternalUint32Array",
+ ("OLD_DATA_SPACE", 0x08391): "EmptyExternalFloat32Array",
+ ("OLD_DATA_SPACE", 0x0839d): "EmptyExternalFloat64Array",
+ ("OLD_DATA_SPACE", 0x083a9): "EmptyExternalUint8ClampedArray",
+ ("OLD_DATA_SPACE", 0x083b5): "InfinityValue",
+ ("OLD_DATA_SPACE", 0x083c1): "MinusZeroValue",
+ ("CODE_SPACE", 0x13c81): "JsConstructEntryCode",
+ ("CODE_SPACE", 0x215a1): "JsEntryCode",
}